function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
column_arrays
|
def column_arrays(self) -> list[np.ndarray]:
"""
Used in the JSON C code to access column arrays.
This optimizes compared to using `iget_values` by converting each
Warning! This doesn't handle Copy-on-Write, so should be used with
caution (current use case of consuming this in the JSON code is fine).
"""
# This is an optimized equivalent to
# result = [self.iget_values(i) for i in range(len(self.items))]
result: list[np.ndarray | None] = [None] * len(self.items)
for blk in self.blocks:
mgr_locs = blk._mgr_locs
values = blk.array_values._values_for_json()
if values.ndim == 1:
# TODO(EA2D): special casing not needed with 2D EAs
result[mgr_locs[0]] = values
else:
for i, loc in enumerate(mgr_locs):
result[loc] = values[i]
# error: Incompatible return value type (got "List[None]",
# expected "List[ndarray[Any, Any]]")
return result # type: ignore[return-value]
|
Used in the JSON C code to access column arrays.
This optimizes compared to using `iget_values` by converting each
Warning! This doesn't handle Copy-on-Write, so should be used with
caution (current use case of consuming this in the JSON code is fine).
|
python
|
pandas/core/internals/managers.py
| 1,208
|
[
"self"
] |
list[np.ndarray]
| true
| 5
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
proxiedUserInterfaces
|
public static Class<?>[] proxiedUserInterfaces(Object proxy) {
Class<?>[] proxyInterfaces = proxy.getClass().getInterfaces();
int nonUserIfcCount = 0;
if (proxy instanceof SpringProxy) {
nonUserIfcCount++;
}
if (proxy instanceof Advised) {
nonUserIfcCount++;
}
if (proxy instanceof DecoratingProxy) {
nonUserIfcCount++;
}
Class<?>[] userInterfaces = Arrays.copyOf(proxyInterfaces, proxyInterfaces.length - nonUserIfcCount);
Assert.notEmpty(userInterfaces, "JDK proxy must implement one or more interfaces");
return userInterfaces;
}
|
Extract the user-specified interfaces that the given proxy implements,
i.e. all non-Advised interfaces that the proxy implements.
@param proxy the proxy to analyze (usually a JDK dynamic proxy)
@return all user-specified interfaces that the proxy implements,
in the original order (never {@code null} or empty)
@see Advised
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AopProxyUtils.java
| 205
|
[
"proxy"
] | true
| 4
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
conformsTo
|
function conformsTo(object, source) {
return source == null || baseConformsTo(object, source, keys(source));
}
|
Checks if `object` conforms to `source` by invoking the predicate
properties of `source` with the corresponding property values of `object`.
**Note:** This method is equivalent to `_.conforms` when `source` is
partially applied.
@static
@memberOf _
@since 4.14.0
@category Lang
@param {Object} object The object to inspect.
@param {Object} source The object of property predicates to conform to.
@returns {boolean} Returns `true` if `object` conforms, else `false`.
@example
var object = { 'a': 1, 'b': 2 };
_.conformsTo(object, { 'b': function(n) { return n > 1; } });
// => true
_.conformsTo(object, { 'b': function(n) { return n > 2; } });
// => false
|
javascript
|
lodash.js
| 11,255
|
[
"object",
"source"
] | false
| 2
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
length
|
def length(self) -> Index:
"""
Return an Index with entries denoting the length of each Interval.
The length of an interval is calculated as the difference between
its `right` and `left` bounds. This property is particularly useful
when working with intervals where the size of the interval is an important
attribute, such as in time-series analysis or spatial data analysis.
See Also
--------
arrays.IntervalArray.left : Return the left endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.right : Return the right endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.length
Index([1, 4], dtype='int64')
"""
return self.right - self.left
|
Return an Index with entries denoting the length of each Interval.
The length of an interval is calculated as the difference between
its `right` and `left` bounds. This property is particularly useful
when working with intervals where the size of the interval is an important
attribute, such as in time-series analysis or spatial data analysis.
See Also
--------
arrays.IntervalArray.left : Return the left endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.right : Return the right endpoints of each Interval in
the IntervalArray as an Index.
arrays.IntervalArray.mid : Return the midpoint of each Interval in the
IntervalArray as an Index.
Examples
--------
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.length
Index([1, 4], dtype='int64')
|
python
|
pandas/core/arrays/interval.py
| 1,408
|
[
"self"
] |
Index
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
runtime
|
def runtime() -> str | None:
"""Determine the runtime type based on available backends.
Returns:
"CUDA" if CUDA is available, "HIP" if HIP is available, None otherwise.
"""
return "CUDA" if torch.version.cuda else "HIP" if torch.version.hip else None
|
Determine the runtime type based on available backends.
Returns:
"CUDA" if CUDA is available, "HIP" if HIP is available, None otherwise.
|
python
|
torch/_inductor/runtime/caching/context.py
| 168
|
[] |
str | None
| true
| 3
| 7.6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
parseTimeValue
|
public static TimeValue parseTimeValue(@Nullable String sValue, TimeValue defaultValue, String settingName) {
settingName = Objects.requireNonNull(settingName);
if (sValue == null) {
return defaultValue;
}
final String normalized = sValue.toLowerCase(Locale.ROOT).trim();
if (normalized.endsWith("nanos")) {
return TimeValue.timeValueNanos(parse(sValue, normalized, "nanos", settingName));
} else if (normalized.endsWith("micros")) {
return new TimeValue(parse(sValue, normalized, "micros", settingName), TimeUnit.MICROSECONDS);
} else if (normalized.endsWith("ms")) {
return TimeValue.timeValueMillis(parse(sValue, normalized, "ms", settingName));
} else if (normalized.endsWith("s")) {
return TimeValue.timeValueSeconds(parse(sValue, normalized, "s", settingName));
} else if (sValue.endsWith("m")) {
// parsing minutes should be case-sensitive as 'M' means "months", not "minutes"; this is the only special case.
return TimeValue.timeValueMinutes(parse(sValue, normalized, "m", settingName));
} else if (normalized.endsWith("h")) {
return TimeValue.timeValueHours(parse(sValue, normalized, "h", settingName));
} else if (normalized.endsWith("d")) {
return new TimeValue(parse(sValue, normalized, "d", settingName), TimeUnit.DAYS);
} else if (normalized.matches("-0*1")) {
return TimeValue.MINUS_ONE;
} else if (normalized.matches("0+")) {
return TimeValue.ZERO;
} else {
// Missing units:
throw new IllegalArgumentException(
"failed to parse setting [" + settingName + "] with value [" + sValue + "] as a time value: unit is missing or unrecognized"
);
}
}
|
@param sValue Value to parse, which may be {@code null}.
@param defaultValue Value to return if {@code sValue} is {@code null}.
@param settingName Name of the parameter or setting. On invalid input, this value is included in the exception message. Otherwise,
this parameter is unused.
@return The {@link TimeValue} which the input string represents, or {@code defaultValue} if the input is {@code null}.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 376
|
[
"sValue",
"defaultValue",
"settingName"
] |
TimeValue
| true
| 11
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
toString
|
public static String toString(Readable r) throws IOException {
return toStringBuilder(r).toString();
}
|
Reads all characters from a {@link Readable} object into a {@link String}. Does not close the
{@code Readable}.
<p><b>Java 25+ users:</b> If the input is a {@link Reader}, prefer {@link
Reader#readAllAsString()}.
@param r the object to read from
@return a string containing all the characters
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/CharStreams.java
| 162
|
[
"r"
] |
String
| true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
where
|
def where(condition, x=None, y=None, /):
"""
where(condition, [x, y], /)
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> import numpy as np
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
|
where(condition, [x, y], /)
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> import numpy as np
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
|
python
|
numpy/_core/multiarray.py
| 404
|
[
"condition",
"x",
"y"
] | false
| 1
| 6.4
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
transformAndEmitBreakStatement
|
function transformAndEmitBreakStatement(node: BreakStatement): void {
const label = findBreakTarget(node.label ? idText(node.label) : undefined);
if (label > 0) {
emitBreak(label, /*location*/ node);
}
else {
// invalid break without a containing loop, switch, or labeled statement. Leave the node as is, per #17875.
emitStatement(node);
}
}
|
Visits an ElementAccessExpression that contains a YieldExpression.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/generators.ts
| 1,764
|
[
"node"
] | true
| 4
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
write_to_cache_file
|
def write_to_cache_file(param_name: str, param_value: str, check_allowed_values: bool = True) -> None:
"""
Writs value to cache. If asked it can also check if the value is allowed for the parameter. and exit
in case the value is not allowed for that parameter instead of writing it.
:param param_name: name of the parameter
:param param_value: new value for the parameter
:param check_allowed_values: whether to fail if the parameter value is not allowed for that name.
"""
allowed = False
allowed_values = None
if check_allowed_values:
allowed, allowed_values = check_if_values_allowed(param_name, param_value)
if allowed or not check_allowed_values:
cache_path = Path(BUILD_CACHE_PATH, f".{param_name}")
cache_path.parent.mkdir(parents=True, exist_ok=True)
cache_path.write_text(param_value)
else:
get_console().print(f"[cyan]You have sent the {param_value} for {param_name}")
get_console().print(f"[cyan]Allowed value for the {param_name} are {allowed_values}")
get_console().print("[cyan]Provide one of the supported params. Write to cache dir failed")
sys.exit(1)
|
Writs value to cache. If asked it can also check if the value is allowed for the parameter. and exit
in case the value is not allowed for that parameter instead of writing it.
:param param_name: name of the parameter
:param param_value: new value for the parameter
:param check_allowed_values: whether to fail if the parameter value is not allowed for that name.
|
python
|
dev/breeze/src/airflow_breeze/utils/cache.py
| 51
|
[
"param_name",
"param_value",
"check_allowed_values"
] |
None
| true
| 5
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
postReadCleanup
|
void postReadCleanup() {
if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) {
cleanUp();
}
}
|
Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup
is not observed after a sufficient number of reads, try cleaning up from the read thread.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 3,355
|
[] |
void
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
of
|
private static <E> Stream<E> of(final Stream<E> stream) {
return stream == null ? Stream.empty() : stream;
}
|
Returns the stream or {@link Stream#empty()} if the stream is null.
@param <E> the type of elements in the collection.
@param stream the stream to stream or null.
@return the stream or {@link Stream#empty()} if the stream is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 723
|
[
"stream"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
visitExportSpecifier
|
function visitExportSpecifier(node: ExportSpecifier): VisitResult<ExportSpecifier> | undefined {
// Elide an export specifier if it does not reference a value.
return !node.isTypeOnly && (compilerOptions.verbatimModuleSyntax || resolver.isValueAliasDeclaration(node)) ? node : undefined;
}
|
Visits an export specifier, eliding it if it does not resolve to a value.
@param node The export specifier node.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,401
|
[
"node"
] | true
| 4
| 6.8
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
checkAssignmentMatchedSubscription
|
public synchronized boolean checkAssignmentMatchedSubscription(Collection<TopicPartition> assignments) {
for (TopicPartition topicPartition : assignments) {
if (this.subscribedPattern != null) {
if (!this.subscribedPattern.matcher(topicPartition.topic()).matches()) {
log.info("Assigned partition {} for non-subscribed topic regex pattern; subscription pattern is {}",
topicPartition,
this.subscribedPattern);
return false;
}
} else {
if (!this.subscription.contains(topicPartition.topic())) {
log.info("Assigned partition {} for non-subscribed topic; subscription is {}", topicPartition, this.subscription);
return false;
}
}
}
return true;
}
|
Check if an assignment received while using the classic group protocol matches the subscription.
Note that this only considers the subscribedPattern because this functionality is only used under the
classic protocol, where subscribedRe2JPattern is not supported.
@return true if assignments matches subscription, otherwise false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 289
|
[
"assignments"
] | true
| 4
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
maybeLeaderEpoch
|
private Optional<Integer> maybeLeaderEpoch(final int leaderEpoch) {
return leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH ? Optional.empty() : Optional.of(leaderEpoch);
}
|
Scans for the next record in the available batches, skipping control records
@param checkCrcs Whether to check the CRC of fetched records
@return true if the current batch has more records, else false
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java
| 388
|
[
"leaderEpoch"
] | true
| 2
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
stream
|
@Deprecated
public static <T> FailableStream<T> stream(final Stream<T> stream) {
return failableStream(stream);
}
|
Converts the given {@link Stream stream} into a {@link FailableStream}. This is basically a simplified, reduced
version of the {@link Stream} class, with the same underlying element stream, except that failable objects, like
{@link FailablePredicate}, {@link FailableFunction}, or {@link FailableConsumer} may be applied, instead of
{@link Predicate}, {@link Function}, or {@link Consumer}. The idea is to rewrite a code snippet like this:
<pre>
{@code
final List<O> list;
final Method m;
final Function<O, String> mapper = (o) -> {
try {
return (String) m.invoke(o);
} catch (Throwable t) {
throw Failable.rethrow(t);
}
};
final List<String> strList = list.stream().map(mapper).collect(Collectors.toList());
}
</pre>
as follows:
<pre>
{@code
final List<O> list;
final Method m;
final List<String> strList = Failable.stream(list.stream()).map((o) -> (String) m.invoke(o)).collect(Collectors.toList());
}
</pre>
While the second version may not be <em>quite</em> as efficient (because it depends on the creation of additional,
intermediate objects, of type FailableStream), it is much more concise, and readable, and meets the spirit of Lambdas
better than the first version.
@param <T> The streams element type.
@param stream The stream, which is being converted.
@return The {@link FailableStream}, which has been created by converting the stream.
@deprecated Use {@link #failableStream(Stream)}.
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 825
|
[
"stream"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
finalizeDeferredProperties
|
private void finalizeDeferredProperties() {
for (DeferredProperty dp : this.deferredProperties.values()) {
if (dp.value instanceof List<?> list) {
dp.value = manageListIfNecessary(list);
}
else if (dp.value instanceof Map<?, ?> map) {
dp.value = manageMapIfNecessary(map);
}
dp.apply();
}
this.deferredProperties.clear();
}
|
This method overrides method invocation to create beans for each method name that
takes a class argument.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
| 434
|
[] |
void
| true
| 3
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_get_metadata_for_step
|
def _get_metadata_for_step(self, *, step_idx, step_params, all_params):
"""Get params (metadata) for step `name`.
This transforms the metadata up to this step if required, which is
indicated by the `transform_input` parameter.
If a param in `step_params` is included in the `transform_input` list,
it will be transformed.
Parameters
----------
step_idx : int
Index of the step in the pipeline.
step_params : dict
Parameters specific to the step. These are routed parameters, e.g.
`routed_params[name]`. If a parameter name here is included in the
`pipeline.transform_input`, then it will be transformed. Note that
these parameters are *after* routing, so the aliases are already
resolved.
all_params : dict
All parameters passed by the user. Here this is used to call
`transform` on the slice of the pipeline itself.
Returns
-------
dict
Parameters to be passed to the step. The ones which should be
transformed are transformed.
"""
if (
self.transform_input is None
or not all_params
or not step_params
or step_idx == 0
):
# we only need to process step_params if transform_input is set
# and metadata is given by the user.
return step_params
sub_pipeline = self[:step_idx]
sub_metadata_routing = get_routing_for_object(sub_pipeline)
# here we get the metadata required by sub_pipeline.transform
transform_params = {
key: value
for key, value in all_params.items()
if key
in sub_metadata_routing.consumes(
method="transform", params=all_params.keys()
)
}
transformed_params = dict() # this is to be returned
transformed_cache = dict() # used to transform each param once
# `step_params` is the output of `process_routing`, so it has a dict for each
# method (e.g. fit, transform, predict), which are the args to be passed to
# those methods. We need to transform the parameters which are in the
# `transform_input`, before returning these dicts.
for method, method_params in step_params.items():
transformed_params[method] = Bunch()
for param_name, param_value in method_params.items():
# An example of `(param_name, param_value)` is
# `('sample_weight', array([0.5, 0.5, ...]))`
if param_name in self.transform_input:
# This parameter now needs to be transformed by the sub_pipeline, to
# this step. We cache these computations to avoid repeating them.
transformed_params[method][param_name] = _cached_transform(
sub_pipeline,
cache=transformed_cache,
param_name=param_name,
param_value=param_value,
transform_params=transform_params,
)
else:
transformed_params[method][param_name] = param_value
return transformed_params
|
Get params (metadata) for step `name`.
This transforms the metadata up to this step if required, which is
indicated by the `transform_input` parameter.
If a param in `step_params` is included in the `transform_input` list,
it will be transformed.
Parameters
----------
step_idx : int
Index of the step in the pipeline.
step_params : dict
Parameters specific to the step. These are routed parameters, e.g.
`routed_params[name]`. If a parameter name here is included in the
`pipeline.transform_input`, then it will be transformed. Note that
these parameters are *after* routing, so the aliases are already
resolved.
all_params : dict
All parameters passed by the user. Here this is used to call
`transform` on the slice of the pipeline itself.
Returns
-------
dict
Parameters to be passed to the step. The ones which should be
transformed are transformed.
|
python
|
sklearn/pipeline.py
| 434
|
[
"self",
"step_idx",
"step_params",
"all_params"
] | false
| 9
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_get_output_storages
|
def _get_output_storages(node: fx.Node) -> OrderedSet[StorageKey]:
"""
Get all storages from a node's outputs.
Uses pytree to handle arbitrary nested structures.
"""
val = node.meta.get("val")
if val is None:
return OrderedSet()
storages: OrderedSet[StorageKey] = OrderedSet()
def collect_storage(tensor: torch._subclasses.FakeTensor) -> None:
storages.add(StorageKey(tensor.untyped_storage(), tensor.device))
# Use tree_map_only to handle FakeTensors in nested structures
tree_map_only(torch._subclasses.FakeTensor, collect_storage, val)
return storages
|
Get all storages from a node's outputs.
Uses pytree to handle arbitrary nested structures.
|
python
|
torch/_inductor/fx_passes/memory_estimator.py
| 97
|
[
"node"
] |
OrderedSet[StorageKey]
| true
| 2
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
entrySet
|
@Override
public Set<Entry<K, V>> entrySet() {
Set<Entry<K, V>> result = entrySet;
return (result == null) ? entrySet = createEntrySet() : result;
}
|
Creates the entry set to be returned by {@link #entrySet()}. This method is invoked at most
once on a given map, at the time when {@code entrySet} is first called.
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 3,523
|
[] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
configure
|
protected void configure(D registration) {
registration.setAsyncSupported(this.asyncSupported);
if (!this.initParameters.isEmpty()) {
registration.setInitParameters(this.initParameters);
}
}
|
Sets whether registration failures should be ignored. If set to true, a failure
will be logged. If set to false, an {@link IllegalStateException} will be thrown.
@param ignoreRegistrationFailure whether to ignore registration failures
@since 3.1.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/DynamicRegistrationBean.java
| 145
|
[
"registration"
] |
void
| true
| 2
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
parseKeywordAndNoDot
|
function parseKeywordAndNoDot(): TypeNode | undefined {
const node = parseTokenNode<TypeNode>();
return token() === SyntaxKind.DotToken ? undefined : node;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,523
|
[] | true
| 2
| 6.64
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
timeoutPendingCalls
|
private void timeoutPendingCalls(TimeoutProcessor processor) {
int numTimedOut = processor.handleTimeouts(pendingCalls, "Timed out waiting for a node assignment.");
if (numTimedOut > 0)
log.debug("Timed out {} pending calls.", numTimedOut);
}
|
Time out the elements in the pendingCalls list which are expired.
@param processor The timeout processor.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,128
|
[
"processor"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
render_log_filename
|
def render_log_filename(
self,
ti: TaskInstance | TaskInstanceHistory,
try_number: int | None = None,
*,
session: Session = NEW_SESSION,
) -> str:
"""
Render the log attachment filename.
:param ti: The task instance
:param try_number: The task try number
"""
dagrun = ti.get_dagrun(session=session)
attachment_filename = render_log_filename(
ti=ti,
try_number="all" if try_number is None else try_number,
filename_template=dagrun.get_log_template(session=session).filename,
)
return attachment_filename
|
Render the log attachment filename.
:param ti: The task instance
:param try_number: The task try number
|
python
|
airflow-core/src/airflow/utils/log/log_reader.py
| 195
|
[
"self",
"ti",
"try_number",
"session"
] |
str
| true
| 2
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
set
|
def set(self, immutable=None, **options):
"""Set arbitrary execution options (same as ``.options.update(…)``).
Returns:
Signature: This is a chaining method call
(i.e., it will return ``self``).
"""
if immutable is not None:
self.set_immutable(immutable)
self.options.update(options)
return self
|
Set arbitrary execution options (same as ``.options.update(…)``).
Returns:
Signature: This is a chaining method call
(i.e., it will return ``self``).
|
python
|
celery/canvas.py
| 538
|
[
"self",
"immutable"
] | false
| 2
| 6.8
|
celery/celery
| 27,741
|
unknown
| false
|
|
refresh_model_names_and_batch_sizes
|
def refresh_model_names_and_batch_sizes():
"""
This function reads the HF Fx tracer supported models and finds the largest
batch size that could fit on the GPU with PyTorch eager.
The resulting data is written in huggingface_models_list.txt.
Note - We only need to run this function if we believe that HF Fx tracer now
supports more models.
"""
import transformers.utils.fx as hf_fx
family = {}
lm_seen = set()
family_seen = set()
for cls_name in hf_fx._SUPPORTED_MODELS:
if "For" not in cls_name:
continue
model_cls = get_module_cls_by_model_name(cls_name)
# TODO: AttributeError: '*Config' object has no attribute 'vocab_size'
if model_cls in [
CLIPModel,
CLIPVisionModel,
# SwinForImageClassification,
# SwinForImageClassification,
# SwinForMaskedImageModeling,
# SwinModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
]:
continue
# TODO: AssertionError: Padding_idx must be within num_embeddings
if model_cls in [MarianForCausalLM, MarianMTModel, MarianModel]:
continue
# TODO: "model is not supported yet" from HFTracer
if model_cls in [HubertForSequenceClassification]:
continue
# TODO: shape mismatch in loss calculation
if model_cls in [LxmertForQuestionAnswering]:
continue
family_name = cls_name.split("For")[0]
if family_name not in family:
family[family_name] = []
if cls_name.endswith(("MaskedLM", "CausalLM")) and family_name not in lm_seen:
family[family_name].append(cls_name)
lm_seen.add(family_name)
elif (
cls_name.endswith(
("SequenceClassification", "ConditionalGeneration", "QuestionAnswering")
)
and family_name not in family_seen
):
family[family_name].append(cls_name)
family_seen.add(family_name)
elif cls_name.endswith("ImageClassification"):
family[family_name].append(cls_name)
chosen_models = set()
for members in family.values():
chosen_models.update(set(members))
# Add the EXTRA_MODELS
chosen_models.update(set(EXTRA_MODELS.keys()))
for model_name in sorted(chosen_models):
try:
subprocess.check_call(
[sys.executable]
+ sys.argv
+ ["--find-batch-sizes"]
+ [f"--only={model_name}"]
+ [f"--output={MODELS_FILENAME}"]
)
except subprocess.SubprocessError:
log.warning(f"Failed to find suitable batch size for {model_name}") # noqa: G004
|
This function reads the HF Fx tracer supported models and finds the largest
batch size that could fit on the GPU with PyTorch eager.
The resulting data is written in huggingface_models_list.txt.
Note - We only need to run this function if we believe that HF Fx tracer now
supports more models.
|
python
|
benchmarks/dynamo/huggingface.py
| 569
|
[] | false
| 15
| 6.16
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
toString
|
@Override
public String toString() {
return pattern;
}
|
@return Regular expression pattern compatible with RE2/J.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/SubscriptionPattern.java
| 44
|
[] |
String
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
validIndex
|
public static <T extends CharSequence> T validIndex(final T chars, final int index) {
return validIndex(chars, index, DEFAULT_VALID_INDEX_CHAR_SEQUENCE_EX_MESSAGE, Integer.valueOf(index));
}
|
Validates that the index is within the bounds of the argument
character sequence; otherwise throwing an exception.
<pre>Validate.validIndex(myStr, 2);</pre>
<p>If the character sequence is {@code null}, then the message
of the exception is "The validated object is
null".</p>
<p>If the index is invalid, then the message of the exception
is "The validated character sequence index is invalid: "
followed by the index.</p>
@param <T> the character sequence type.
@param chars the character sequence to check, validated not null by this method.
@param index the index to check.
@return the validated character sequence (never {@code null} for method chaining).
@throws NullPointerException if the character sequence is {@code null}.
@throws IndexOutOfBoundsException if the index is invalid.
@see #validIndex(CharSequence, int, String, Object...)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 1,114
|
[
"chars",
"index"
] |
T
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createDefaultEditors
|
private void createDefaultEditors() {
this.defaultEditors = new HashMap<>(64);
// Simple editors, without parameterization capabilities.
// The JDK does not contain a default editor for any of these target types.
this.defaultEditors.put(Charset.class, new CharsetEditor());
this.defaultEditors.put(Class.class, new ClassEditor());
this.defaultEditors.put(Class[].class, new ClassArrayEditor());
this.defaultEditors.put(Currency.class, new CurrencyEditor());
this.defaultEditors.put(File.class, new FileEditor());
this.defaultEditors.put(InputStream.class, new InputStreamEditor());
this.defaultEditors.put(InputSource.class, new InputSourceEditor());
this.defaultEditors.put(Locale.class, new LocaleEditor());
this.defaultEditors.put(Path.class, new PathEditor());
this.defaultEditors.put(Pattern.class, new PatternEditor());
this.defaultEditors.put(Properties.class, new PropertiesEditor());
this.defaultEditors.put(Reader.class, new ReaderEditor());
this.defaultEditors.put(Resource[].class, new ResourceArrayPropertyEditor());
this.defaultEditors.put(TimeZone.class, new TimeZoneEditor());
this.defaultEditors.put(URI.class, new URIEditor());
this.defaultEditors.put(URL.class, new URLEditor());
this.defaultEditors.put(UUID.class, new UUIDEditor());
this.defaultEditors.put(ZoneId.class, new ZoneIdEditor());
// Default instances of collection editors.
// Can be overridden by registering custom instances of those as custom editors.
this.defaultEditors.put(Collection.class, new CustomCollectionEditor(Collection.class));
this.defaultEditors.put(Set.class, new CustomCollectionEditor(Set.class));
this.defaultEditors.put(SortedSet.class, new CustomCollectionEditor(SortedSet.class));
this.defaultEditors.put(List.class, new CustomCollectionEditor(List.class));
this.defaultEditors.put(SortedMap.class, new CustomMapEditor(SortedMap.class));
// Default editors for primitive arrays.
this.defaultEditors.put(byte[].class, new ByteArrayPropertyEditor());
this.defaultEditors.put(char[].class, new CharArrayPropertyEditor());
// The JDK does not contain a default editor for char!
this.defaultEditors.put(char.class, new CharacterEditor(false));
this.defaultEditors.put(Character.class, new CharacterEditor(true));
// Spring's CustomBooleanEditor accepts more flag values than the JDK's default editor.
this.defaultEditors.put(boolean.class, new CustomBooleanEditor(false));
this.defaultEditors.put(Boolean.class, new CustomBooleanEditor(true));
// The JDK does not contain default editors for number wrapper types!
// Override JDK primitive number editors with our own CustomNumberEditor.
this.defaultEditors.put(byte.class, new CustomNumberEditor(Byte.class, false));
this.defaultEditors.put(Byte.class, new CustomNumberEditor(Byte.class, true));
this.defaultEditors.put(short.class, new CustomNumberEditor(Short.class, false));
this.defaultEditors.put(Short.class, new CustomNumberEditor(Short.class, true));
this.defaultEditors.put(int.class, new CustomNumberEditor(Integer.class, false));
this.defaultEditors.put(Integer.class, new CustomNumberEditor(Integer.class, true));
this.defaultEditors.put(long.class, new CustomNumberEditor(Long.class, false));
this.defaultEditors.put(Long.class, new CustomNumberEditor(Long.class, true));
this.defaultEditors.put(float.class, new CustomNumberEditor(Float.class, false));
this.defaultEditors.put(Float.class, new CustomNumberEditor(Float.class, true));
this.defaultEditors.put(double.class, new CustomNumberEditor(Double.class, false));
this.defaultEditors.put(Double.class, new CustomNumberEditor(Double.class, true));
this.defaultEditors.put(BigDecimal.class, new CustomNumberEditor(BigDecimal.class, true));
this.defaultEditors.put(BigInteger.class, new CustomNumberEditor(BigInteger.class, true));
// Only register config value editors if explicitly requested.
if (this.configValueEditorsActive) {
StringArrayPropertyEditor sae = new StringArrayPropertyEditor();
this.defaultEditors.put(String[].class, sae);
this.defaultEditors.put(short[].class, sae);
this.defaultEditors.put(int[].class, sae);
this.defaultEditors.put(long[].class, sae);
}
}
|
Actually register the default editors for this registry instance.
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyEditorRegistrySupport.java
| 212
|
[] |
void
| true
| 2
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
checkMergedBeanDefinition
|
protected void checkMergedBeanDefinition(RootBeanDefinition mbd, String beanName, @Nullable Object @Nullable [] args) {
if (mbd.isAbstract()) {
throw new BeanIsAbstractException(beanName);
}
}
|
Check the given merged bean definition,
potentially throwing validation exceptions.
@param mbd the merged bean definition to check
@param beanName the name of the bean
@param args the arguments for bean creation, if any
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,512
|
[
"mbd",
"beanName",
"args"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isLeaderKnownToHaveChanged
|
private boolean isLeaderKnownToHaveChanged(int nodeId, TopicIdPartition topicIdPartition) {
Optional<Node> leaderNode = metadata.currentLeader(topicIdPartition.topicPartition()).leader;
if (leaderNode.isPresent()) {
if (leaderNode.get().id() != nodeId) {
log.debug("Node {} is no longer the leader for partition {}, failing acknowledgements", nodeId, topicIdPartition);
return true;
}
} else {
log.debug("No leader found for partition {}", topicIdPartition);
metadata.requestUpdate(false);
return false;
}
return false;
}
|
The method checks whether the leader for a topicIdPartition has changed.
@param nodeId The previous leader for the partition.
@param topicIdPartition The TopicIdPartition to check.
@return Returns true if leader information is available and leader has changed.
If the leader information is not available or if the leader has not changed, it returns false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 752
|
[
"nodeId",
"topicIdPartition"
] | true
| 3
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
afterPropertiesSet
|
@Override
public void afterPropertiesSet() throws Exception {
prepare();
if (this.singleton) {
this.initialized = true;
this.singletonObject = invokeWithTargetException();
}
}
|
Set if a singleton should be created, or a new object on each
{@link #getObject()} request otherwise. Default is "true".
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/MethodInvokingFactoryBean.java
| 103
|
[] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getTypeForFactoryBean
|
protected ResolvableType getTypeForFactoryBean(String beanName, RootBeanDefinition mbd, boolean allowInit) {
try {
ResolvableType result = getTypeForFactoryBeanFromAttributes(mbd);
if (result != ResolvableType.NONE) {
return result;
}
}
catch (IllegalArgumentException ex) {
throw new BeanDefinitionStoreException(mbd.getResourceDescription(), beanName,
String.valueOf(ex.getMessage()));
}
if (allowInit && mbd.isSingleton()) {
try {
FactoryBean<?> factoryBean = doGetBean(FACTORY_BEAN_PREFIX + beanName, FactoryBean.class, null, true);
Class<?> objectType = getTypeForFactoryBean(factoryBean);
return (objectType != null ? ResolvableType.forClass(objectType) : ResolvableType.NONE);
}
catch (BeanCreationException ex) {
if (ex.contains(BeanCurrentlyInCreationException.class)) {
logger.trace(LogMessage.format("Bean currently in creation on FactoryBean type check: %s", ex));
}
else if (mbd.isLazyInit()) {
logger.trace(LogMessage.format("Bean creation exception on lazy FactoryBean type check: %s", ex));
}
else {
logger.debug(LogMessage.format("Bean creation exception on eager FactoryBean type check: %s", ex));
}
onSuppressedException(ex);
}
}
// FactoryBean type not resolvable
return ResolvableType.NONE;
}
|
Determine the bean type for the given FactoryBean definition, as far as possible.
Only called if there is no singleton instance registered for the target bean
already. The implementation is allowed to instantiate the target factory bean if
{@code allowInit} is {@code true} and the type cannot be determined another way;
otherwise it is restricted to introspecting signatures and related metadata.
<p>If no {@link FactoryBean#OBJECT_TYPE_ATTRIBUTE} is set on the bean definition
and {@code allowInit} is {@code true}, the default implementation will create
the FactoryBean via {@code getBean} to call its {@code getObjectType} method.
Subclasses are encouraged to optimize this, typically by inspecting the generic
signature of the factory bean class or the factory method that creates it.
If subclasses do instantiate the FactoryBean, they should consider trying the
{@code getObjectType} method without fully populating the bean. If this fails,
a full FactoryBean creation as performed by this implementation should be used
as fallback.
@param beanName the name of the bean
@param mbd the merged bean definition for the bean
@param allowInit if initialization of the FactoryBean is permitted if the type
cannot be determined another way
@return the type for the bean if determinable, otherwise {@code ResolvableType.NONE}
@since 5.2
@see org.springframework.beans.factory.FactoryBean#getObjectType()
@see #getBean(String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,730
|
[
"beanName",
"mbd",
"allowInit"
] |
ResolvableType
| true
| 9
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
cjsEmplaceModuleCacheEntry
|
function cjsEmplaceModuleCacheEntry(filename, parent) {
// TODO: Do we want to keep hitting the user mutable CJS loader here?
let cjsMod = CJSModule._cache[filename];
if (cjsMod) {
return cjsMod;
}
cjsMod = new CJSModule(filename, parent);
cjsMod.filename = filename;
cjsMod.paths = CJSModule._nodeModulePaths(cjsMod.path);
cjsMod[kIsCachedByESMLoader] = true;
CJSModule._cache[filename] = cjsMod;
return cjsMod;
}
|
Get or create an entry in the CJS module cache for the given filename.
@param {string} filename CJS module filename
@param {CJSModule} parent The parent CJS module
@returns {CJSModule} the cached CJS module entry
|
javascript
|
lib/internal/modules/esm/translators.js
| 379
|
[
"filename",
"parent"
] | false
| 2
| 6.4
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
send_email_smtp
|
def send_email_smtp(
to: str | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
dryrun: bool = False,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
conn_id: str = "smtp_default",
from_email: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""
Send an email with html content.
:param to: Recipient email address or list of addresses.
:param subject: Email subject.
:param html_content: Email body in HTML format.
:param files: List of file paths to attach to the email.
:param dryrun: If True, the email will not be sent, but all other actions will be performed.
:param cc: Carbon copy recipient email address or list of addresses.
:param bcc: Blind carbon copy recipient email address or list of addresses.
:param mime_subtype: MIME subtype of the email.
:param mime_charset: MIME charset of the email.
:param conn_id: Connection ID of the SMTP server.
:param from_email: Sender email address.
:param custom_headers: Dictionary of custom headers to include in the email.
:param kwargs: Additional keyword arguments.
>>> send_email("test@example.com", "foo", "<b>Foo</b> bar", ["/dev/null"], dryrun=True)
"""
smtp_mail_from = conf.get("smtp", "SMTP_MAIL_FROM")
if smtp_mail_from is not None:
mail_from = smtp_mail_from
else:
if from_email is None:
raise ValueError(
"You should set from email - either by smtp/smtp_mail_from config or `from_email` parameter"
)
mail_from = from_email
msg, recipients = build_mime_message(
mail_from=mail_from,
to=to,
subject=subject,
html_content=html_content,
files=files,
cc=cc,
bcc=bcc,
mime_subtype=mime_subtype,
mime_charset=mime_charset,
custom_headers=custom_headers,
)
send_mime_email(e_from=mail_from, e_to=recipients, mime_msg=msg, conn_id=conn_id, dryrun=dryrun)
|
Send an email with html content.
:param to: Recipient email address or list of addresses.
:param subject: Email subject.
:param html_content: Email body in HTML format.
:param files: List of file paths to attach to the email.
:param dryrun: If True, the email will not be sent, but all other actions will be performed.
:param cc: Carbon copy recipient email address or list of addresses.
:param bcc: Blind carbon copy recipient email address or list of addresses.
:param mime_subtype: MIME subtype of the email.
:param mime_charset: MIME charset of the email.
:param conn_id: Connection ID of the SMTP server.
:param from_email: Sender email address.
:param custom_headers: Dictionary of custom headers to include in the email.
:param kwargs: Additional keyword arguments.
>>> send_email("test@example.com", "foo", "<b>Foo</b> bar", ["/dev/null"], dryrun=True)
|
python
|
airflow-core/src/airflow/utils/email.py
| 96
|
[
"to",
"subject",
"html_content",
"files",
"dryrun",
"cc",
"bcc",
"mime_subtype",
"mime_charset",
"conn_id",
"from_email",
"custom_headers"
] |
None
| true
| 4
| 8.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
setFrom
|
public void setFrom(String from, String personal) throws MessagingException, UnsupportedEncodingException {
Assert.notNull(from, "From address must not be null");
setFrom(getEncoding() != null ?
new InternetAddress(from, personal, getEncoding()) : new InternetAddress(from, personal));
}
|
Validate all given mail addresses.
<p>The default implementation simply delegates to {@link #validateAddress}
for each address.
@param addresses the addresses to validate
@throws AddressException if validation failed
@see #validateAddress(InternetAddress)
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 571
|
[
"from",
"personal"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
findResources
|
@Override
public Enumeration<URL> findResources(String name) throws IOException {
if (!this.hasJarUrls) {
return super.findResources(name);
}
Optimizations.enable(false);
try {
return new OptimizedEnumeration(super.findResources(name));
}
finally {
Optimizations.disable();
}
}
|
Create a new {@link LaunchedClassLoader} instance.
@param urls the URLs from which to load classes and resources
@param parent the parent class loader for delegation
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrlClassLoader.java
| 80
|
[
"name"
] | true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
read
|
@Override
public int read() throws IOException {
if (finished) {
return -1;
}
if (available() == 0) {
readBlock();
}
if (finished) {
return -1;
}
return decompressedBuffer.get() & 0xFF;
}
|
Decompresses (if necessary) buffered data, optionally computes and validates a XXHash32 checksum, and writes the
result to a buffer.
@throws IOException
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java
| 210
|
[] | true
| 4
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_split_by_backtick
|
def _split_by_backtick(s: str) -> list[tuple[bool, str]]:
"""
Splits a str into substrings along backtick characters (`).
Disregards backticks inside quotes.
Parameters
----------
s : str
The Python source code string.
Returns
-------
substrings: list[tuple[bool, str]]
List of tuples, where each tuple has two elements:
The first is a boolean indicating if the substring is backtick-quoted.
The second is the actual substring.
"""
substrings = []
substr: list[str] = [] # Will join into a string before adding to `substrings`
i = 0
parse_state = ParseState.DEFAULT
while i < len(s):
char = s[i]
match char:
case "`":
# start of a backtick-quoted string
if parse_state == ParseState.DEFAULT:
if substr:
substrings.append((False, "".join(substr)))
substr = [char]
i += 1
parse_state = ParseState.IN_BACKTICK
continue
elif parse_state == ParseState.IN_BACKTICK:
# escaped backtick inside a backtick-quoted string
next_char = s[i + 1] if (i != len(s) - 1) else None
if next_char == "`":
substr.append(char)
substr.append(next_char)
i += 2
continue
# end of the backtick-quoted string
else:
substr.append(char)
substrings.append((True, "".join(substr)))
substr = []
i += 1
parse_state = ParseState.DEFAULT
continue
case "'":
# start of a single-quoted string
if parse_state == ParseState.DEFAULT:
parse_state = ParseState.IN_SINGLE_QUOTE
# end of a single-quoted string
elif (parse_state == ParseState.IN_SINGLE_QUOTE) and (s[i - 1] != "\\"):
parse_state = ParseState.DEFAULT
case '"':
# start of a double-quoted string
if parse_state == ParseState.DEFAULT:
parse_state = ParseState.IN_DOUBLE_QUOTE
# end of a double-quoted string
elif (parse_state == ParseState.IN_DOUBLE_QUOTE) and (s[i - 1] != "\\"):
parse_state = ParseState.DEFAULT
substr.append(char)
i += 1
if substr:
substrings.append((False, "".join(substr)))
return substrings
|
Splits a str into substrings along backtick characters (`).
Disregards backticks inside quotes.
Parameters
----------
s : str
The Python source code string.
Returns
-------
substrings: list[tuple[bool, str]]
List of tuples, where each tuple has two elements:
The first is a boolean indicating if the substring is backtick-quoted.
The second is the actual substring.
|
python
|
pandas/core/computation/parsing.py
| 145
|
[
"s"
] |
list[tuple[bool, str]]
| true
| 15
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
nextAlphabetic
|
public String nextAlphabetic(final int minLengthInclusive, final int maxLengthExclusive) {
return nextAlphabetic(randomUtils().randomInt(minLengthInclusive, maxLengthExclusive));
}
|
Creates a random string whose length is between the inclusive minimum and the exclusive maximum.
<p>
Characters will be chosen from the set of Latin alphabetic characters (a-z, A-Z).
</p>
@param minLengthInclusive the inclusive minimum length of the string to generate.
@param maxLengthExclusive the exclusive maximum length of the string to generate.
@return the random string.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 825
|
[
"minLengthInclusive",
"maxLengthExclusive"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getStartClass
|
private @Nullable Class<?> getStartClass(Enumeration<URL> manifestResources) {
while (manifestResources.hasMoreElements()) {
try (InputStream inputStream = manifestResources.nextElement().openStream()) {
Manifest manifest = new Manifest(inputStream);
String startClass = manifest.getMainAttributes().getValue("Start-Class");
if (startClass != null) {
return ClassUtils.forName(startClass, getClass().getClassLoader());
}
}
catch (Exception ex) {
// Ignore
}
}
return null;
}
|
Create a new {@link ApplicationHome} instance for the specified source class.
@param sourceClass the source class or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationHome.java
| 77
|
[
"manifestResources"
] | true
| 4
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
topics
|
public List<String> topics() {
return data.topics()
.stream()
.map(MetadataRequestTopic::name)
.collect(Collectors.toList());
}
|
@return Builder for metadata request using topic IDs.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
| 118
|
[] | true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
keys
|
function keys(object) {
return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object);
}
|
Creates an array of the own enumerable property names of `object`.
**Note:** Non-object values are coerced to objects. See the
[ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)
for more details.
@static
@since 0.1.0
@memberOf _
@category Object
@param {Object} object The object to query.
@returns {Array} Returns the array of property names.
@example
function Foo() {
this.a = 1;
this.b = 2;
}
Foo.prototype.c = 3;
_.keys(new Foo);
// => ['a', 'b'] (iteration order is not guaranteed)
_.keys('hi');
// => ['0', '1']
|
javascript
|
lodash.js
| 13,413
|
[
"object"
] | false
| 2
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
records
|
public Map<TopicPartition, List<ConsumerRecord<K, V>>> records() {
final LinkedHashMap<TopicPartition, List<ConsumerRecord<K, V>>> result = new LinkedHashMap<>();
batches.forEach((tip, batch) -> result.put(tip.topicPartition(), batch.getInFlightRecords()));
return Collections.unmodifiableMap(result);
}
|
@return all the non-control messages for this fetch, grouped by partition
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java
| 83
|
[] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getObjectTransformationCost
|
private static float getObjectTransformationCost(Class<?> srcClass, final Class<?> destClass) {
if (destClass.isPrimitive()) {
return getPrimitivePromotionCost(srcClass, destClass);
}
float cost = 0.0f;
while (srcClass != null && !destClass.equals(srcClass)) {
if (destClass.isInterface() && ClassUtils.isAssignable(srcClass, destClass)) {
// slight penalty for interface match.
// we still want an exact match to override an interface match,
// but
// an interface match should override anything where we have to
// get a superclass.
cost += 0.25f;
break;
}
cost++;
srcClass = srcClass.getSuperclass();
}
/*
* If the destination class is null, we've traveled all the way up to an Object match. We'll penalize this by adding 1.5 to the cost.
*/
if (srcClass == null) {
cost += 1.5f;
}
return cost;
}
|
Gets the number of steps needed to turn the source class into the destination class. This represents the number of steps in the object hierarchy graph.
@param srcClass The source class.
@param destClass The destination class.
@return The cost of transforming an object.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MemberUtils.java
| 135
|
[
"srcClass",
"destClass"
] | true
| 7
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
containsAny
|
private static boolean containsAny(final ToBooleanBiFunction<CharSequence, CharSequence> test, final CharSequence cs,
final CharSequence... searchCharSequences) {
if (StringUtils.isEmpty(cs) || ArrayUtils.isEmpty(searchCharSequences)) {
return false;
}
for (final CharSequence searchCharSequence : searchCharSequences) {
if (test.applyAsBoolean(cs, searchCharSequence)) {
return true;
}
}
return false;
}
|
Tests if the CharSequence contains any of the CharSequences in the given array.
<p>
A {@code null} {@code cs} CharSequence will return {@code false}. A {@code null} or zero length search array will return {@code false}.
</p>
@param cs The CharSequence to check, may be null
@param searchCharSequences The array of CharSequences to search for, may be null. Individual CharSequences may be null as well.
@return {@code true} if any of the search CharSequences are found, {@code false} otherwise
|
java
|
src/main/java/org/apache/commons/lang3/Strings.java
| 295
|
[
"test",
"cs"
] | true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_is_node_groupable_for_sink_waits
|
def _is_node_groupable_for_sink_waits(
candidate: BaseSchedulerNode,
) -> tuple[bool, Optional[str]]:
"""
Check if a candidate node can be grouped during sink_waits pass.
Sink Waits traverses waits right to left, so we don't group with
processed waits on the right or with async collectives.
Args:
candidate: Node to check for groupability
Returns:
Tuple of (is_groupable, reason_if_not_groupable)
"""
# Sink Waits traverse Waits right to left,
# => we do not group with processed Waits on the right.
if contains_wait(candidate):
return False, f"candidate contains wait {candidate.get_name()}"
if contains_async_collective(candidate):
return (
False,
f"candidate contains_async_collective {candidate.get_name()}",
)
# pyrefly: ignore[unbound-name]
if not config_comms.sink_iterative_use_runtime_estimations:
# Heuristics pre-use_runtime_estimations:
# TODO(ivankobzarev): Remove them after confirming,
# that using runtime estimations always give better results.
# We do not want to group with collectives to not reorder them forward.
if contains_collective(candidate):
return (
False,
f"candidate contains collective {candidate.get_name()}",
)
if contains_gemm_like(candidate):
return (
False,
f"candidate contains gemm_like {candidate.get_name()}",
)
return True, None
|
Check if a candidate node can be grouped during sink_waits pass.
Sink Waits traverses waits right to left, so we don't group with
processed waits on the right or with async collectives.
Args:
candidate: Node to check for groupability
Returns:
Tuple of (is_groupable, reason_if_not_groupable)
|
python
|
torch/_inductor/comms.py
| 1,398
|
[
"candidate"
] |
tuple[bool, Optional[str]]
| true
| 6
| 8.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
_where
|
def _where(self, mask: npt.NDArray[np.bool_], value) -> Self:
"""
Analogue to np.where(mask, self, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Returns
-------
same type as self
"""
result = self.copy()
if is_list_like(value):
val = value[~mask]
else:
val = value
result[~mask] = val
return result
|
Analogue to np.where(mask, self, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Returns
-------
same type as self
|
python
|
pandas/core/arrays/base.py
| 2,516
|
[
"self",
"mask",
"value"
] |
Self
| true
| 3
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
between_time
|
def between_time(
self,
start_time,
end_time,
inclusive: IntervalClosedType = "both",
axis: Axis | None = None,
) -> Self:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
inclusive : {"both", "neither", "left", "right"}, default "both"
Include boundaries; whether to set each bound as closed or open.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
For `Series` this parameter is unused and defaults to 0.
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range("2018-04-09", periods=4, freq="1D20min")
>>> ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time("0:15", "0:45")
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time("0:45", "0:15")
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
if not isinstance(index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=left_inclusive,
include_end=right_inclusive,
)
return self.take(indexer, axis=axis)
|
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
inclusive : {"both", "neither", "left", "right"}, default "both"
Include boundaries; whether to set each bound as closed or open.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
For `Series` this parameter is unused and defaults to 0.
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range("2018-04-09", periods=4, freq="1D20min")
>>> ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time("0:15", "0:45")
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time("0:45", "0:15")
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
|
python
|
pandas/core/generic.py
| 8,617
|
[
"self",
"start_time",
"end_time",
"inclusive",
"axis"
] |
Self
| true
| 3
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
head
|
function head(array) {
return (array && array.length) ? array[0] : undefined;
}
|
Gets the first element of `array`.
@static
@memberOf _
@since 0.1.0
@alias first
@category Array
@param {Array} array The array to query.
@returns {*} Returns the first element of `array`.
@example
_.head([1, 2, 3]);
// => 1
_.head([]);
// => undefined
|
javascript
|
lodash.js
| 7,526
|
[
"array"
] | false
| 3
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
fix_group_permissions
|
def fix_group_permissions():
"""Fixes permissions of all the files and directories that have group-write access."""
if get_verbose():
get_console().print("[info]Fixing group permissions[/]")
files_to_fix_result = run_command(["git", "ls-files", "./"], capture_output=True, check=False, text=True)
if files_to_fix_result.returncode == 0:
files_to_fix = files_to_fix_result.stdout.strip().splitlines()
for file_to_fix in files_to_fix:
change_file_permission(Path(file_to_fix))
directories_to_fix_result = run_command(
["git", "ls-tree", "-r", "-d", "--name-only", "HEAD"], capture_output=True, check=False, text=True
)
if directories_to_fix_result.returncode == 0:
directories_to_fix = directories_to_fix_result.stdout.strip().splitlines()
for directory_to_fix in directories_to_fix:
change_directory_permission(Path(directory_to_fix))
|
Fixes permissions of all the files and directories that have group-write access.
|
python
|
dev/breeze/src/airflow_breeze/utils/run_utils.py
| 359
|
[] | false
| 6
| 6.08
|
apache/airflow
| 43,597
|
unknown
| false
|
|
isDone
|
@Override
public boolean isDone() {
if (nextRecordMetadata != null)
return nextRecordMetadata.isDone();
return this.result.completed();
}
|
This method is used when we have to split a large batch in smaller ones. A chained metadata will allow the
future that has already returned to the users to wait on the newly created split batches even after the
old big batch has been deemed as done.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/FutureRecordMetadata.java
| 113
|
[] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
get_block_to_lifted_attrs
|
def get_block_to_lifted_attrs(
graph: torch._C.Graph,
) -> tuple[dict[torch._C.Block, set[str]], dict[str, str]]:
"""
Perform two passes to get a mapping of blocks to a set of FQNs of its lifted attributes.
When a graph has control flow, the graph will be divided into multiple blocks. We want to convert
each block to a graph which will be passed into torch.cond. A restriction for torch.cond is that model
parameters/buffers are expected to be lifted as inputs to the subgraphs. Before converting the model,
we will run this pass which will:
1. Figure out which params/buffers are used within blocks through tracing the GetAttr calls.
2. Process the graph bottom up to find the lifted attributes of each block by taking the union
of the attributes used in the current block, and the lifted attributes of all its child blocks.
Returns:
A mapping of blocks to a set of FQNs of its lifted attributes, and a
mapping of node names to the FQNs of its lifted attributes.
"""
# A map from a block to its expected to be lifted arguments.
blocks_to_lifted_attrs: dict[torch._C.Block, set[str]] = {}
# Reference map stores the input (i.e., src) and output (i.e., dest) IR of a
# GetAttr node. By traversing this reference map, we can figure out the
# full IR aliasing pass and figure out the FQN of an attribute.
# E.g., %2 = GetAttr(linear)[%1] --> node_to_parent_map["%2"] = "%1"
node_to_parent_map: dict[str, str] = {}
# Used for reconstructing the FQN of an attribute based on the reference map.
# In nutshell, for each GetAttr call, GetAttr(input IR, attribute name) -> output IR
# This name map stores which attribute name is called for a src IR --> dest IR action.
# E.g., %2 = GetAttr(linear)[%1] --> node_to_attr_name["%2"] = "linear"
node_to_attr_name: dict[str, str] = {}
def _dfs_get_attr_dependency(entry):
"""
First DFS path to construct reference map and name map.
"""
for node in entry.nodes():
if node.kind() == "prim::GetAttr":
(
irv_name,
irv_parent_name,
attr_name,
) = get_ir_value_parent_name_and_attr_name(node)
node_to_parent_map[irv_name] = irv_parent_name
node_to_attr_name[irv_name] = attr_name
for block in node.blocks():
_dfs_get_attr_dependency(block)
def _map_blocks_to_lifted_attrs(entry):
"""
Walk the graph in a bottom-up fashion to build the expected to be
lifted arguments for each block.
"""
arguments: set[str] = set()
for node in entry.nodes():
for block in node.blocks():
# Recursively build.
arguments = arguments.union(_map_blocks_to_lifted_attrs(block))
if node.kind() == "prim::GetAttr":
irv_name = node.output().debugName()
# Skip for intermediate GetAttr, which will anyway not result a FQN.
# E.g., node_to_parent_name: {"%3": "%2", "%2": "%1"}
# node_to_attr_name: {"%3": "weight", "%2": "linear", "%1": "self"}
# There is only one FQN %3-->%2-->%1: self.linear.weight
# %2-->%1 is not a FQN: self.linear
if irv_name not in set(node_to_parent_map.values()):
arguments.add(
construct_fqn(irv_name, node_to_parent_map, node_to_attr_name)
)
if not isinstance(entry, torch._C.Graph): # Skip the top level.
blocks_to_lifted_attrs[entry] = arguments
return arguments
_dfs_get_attr_dependency(graph)
_map_blocks_to_lifted_attrs(graph)
return blocks_to_lifted_attrs, node_to_attr_name
|
Perform two passes to get a mapping of blocks to a set of FQNs of its lifted attributes.
When a graph has control flow, the graph will be divided into multiple blocks. We want to convert
each block to a graph which will be passed into torch.cond. A restriction for torch.cond is that model
parameters/buffers are expected to be lifted as inputs to the subgraphs. Before converting the model,
we will run this pass which will:
1. Figure out which params/buffers are used within blocks through tracing the GetAttr calls.
2. Process the graph bottom up to find the lifted attributes of each block by taking the union
of the attributes used in the current block, and the lifted attributes of all its child blocks.
Returns:
A mapping of blocks to a set of FQNs of its lifted attributes, and a
mapping of node names to the FQNs of its lifted attributes.
|
python
|
torch/_export/converter.py
| 265
|
[
"graph"
] |
tuple[dict[torch._C.Block, set[str]], dict[str, str]]
| true
| 9
| 8.16
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
type
|
public Optional<GroupType> type() {
return type;
}
|
The type of the consumer group.
@return An Optional containing the type, if available.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java
| 151
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
get_schema_defaults
|
def get_schema_defaults(cls, object_type: str) -> dict[str, Any]:
"""
Extract default values from JSON schema for any object type.
:param object_type: The object type to get defaults for (e.g., "operator", "dag")
:return: Dictionary of field name -> default value
"""
# Load schema if needed (handles lazy loading)
schema_loader = cls._json_schema
if schema_loader is None:
return {}
# Access the schema definitions (trigger lazy loading)
schema_data = schema_loader.schema
object_def = schema_data.get("definitions", {}).get(object_type, {})
properties = object_def.get("properties", {})
defaults = {}
for field_name, field_def in properties.items():
if isinstance(field_def, dict) and "default" in field_def:
defaults[field_name] = field_def["default"]
return defaults
|
Extract default values from JSON schema for any object type.
:param object_type: The object type to get defaults for (e.g., "operator", "dag")
:return: Dictionary of field name -> default value
|
python
|
airflow-core/src/airflow/serialization/serialized_objects.py
| 944
|
[
"cls",
"object_type"
] |
dict[str, Any]
| true
| 5
| 8.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
_iset_split_block
|
def _iset_split_block(
self,
blkno_l: int,
blk_locs: np.ndarray | list[int],
value: ArrayLike | None = None,
refs: BlockValuesRefs | None = None,
) -> None:
"""Removes columns from a block by splitting the block.
Avoids copying the whole block through slicing and updates the manager
after determining the new block structure. Optionally adds a new block,
otherwise has to be done by the caller.
Parameters
----------
blkno_l: The block number to operate on, relevant for updating the manager
blk_locs: The locations of our block that should be deleted.
value: The value to set as a replacement.
refs: The reference tracking object of the value to set.
"""
blk = self.blocks[blkno_l]
if self._blklocs is None:
self._rebuild_blknos_and_blklocs()
nbs_tup = tuple(blk.delete(blk_locs))
if value is not None:
locs = blk.mgr_locs.as_array[blk_locs]
first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs)
else:
first_nb = nbs_tup[0]
nbs_tup = tuple(nbs_tup[1:])
nr_blocks = len(self.blocks)
blocks_tup = (
self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup
)
self.blocks = blocks_tup
if not nbs_tup and value is not None:
# No need to update anything if split did not happen
return
self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))
for i, nb in enumerate(nbs_tup):
self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
self._blknos[nb.mgr_locs.indexer] = i + nr_blocks
|
Removes columns from a block by splitting the block.
Avoids copying the whole block through slicing and updates the manager
after determining the new block structure. Optionally adds a new block,
otherwise has to be done by the caller.
Parameters
----------
blkno_l: The block number to operate on, relevant for updating the manager
blk_locs: The locations of our block that should be deleted.
value: The value to set as a replacement.
refs: The reference tracking object of the value to set.
|
python
|
pandas/core/internals/managers.py
| 1,385
|
[
"self",
"blkno_l",
"blk_locs",
"value",
"refs"
] |
None
| true
| 7
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
startupDurations
|
@J2ObjCIncompatible
// If users use this when they shouldn't, we hope that NewApi will catch subsequent Duration calls
@IgnoreJRERequirement
public ImmutableMap<Service, Duration> startupDurations() {
return ImmutableMap.copyOf(
Maps.<Service, Long, Duration>transformValues(startupTimes(), Duration::ofMillis));
}
|
Returns the service load times. This value will only return startup times for services that
have finished starting.
@return Map of services and their corresponding startup time, the map entries will be ordered
by startup time.
@since 33.4.0 (but since 31.0 in the JRE flavor)
|
java
|
android/guava/src/com/google/common/util/concurrent/ServiceManager.java
| 433
|
[] | true
| 1
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
setBucket
|
private void setBucket(long index, long count, boolean isPositive) {
if (count < 1) {
throw new IllegalArgumentException("Bucket count must be at least 1");
}
if (negativeBuckets == null && positiveBuckets == null) {
// so far, all received buckets were in order, try to directly build the result
if (result == null) {
// Initialize the result buffer if required
reallocateResultWithCapacity(estimatedBucketCount, false);
}
if ((isPositive && result.wasLastAddedBucketPositive() == false)
|| (isPositive == result.wasLastAddedBucketPositive() && index > result.getLastAddedBucketIndex())) {
// the new bucket is in order too, we can directly add the bucket
addBucketToResult(index, count, isPositive);
return;
}
}
// fallback to TreeMap if a bucket is received out of order
initializeBucketTreeMapsIfNeeded();
if (isPositive) {
positiveBuckets.put(index, count);
} else {
negativeBuckets.put(index, count);
}
}
|
Sets the given bucket of the negative buckets. If the bucket already exists, it will be replaced.
Buckets may be set in arbitrary order. However, for best performance and minimal allocations,
buckets should be set in order of increasing index and all negative buckets should be set before positive buckets.
@param index the index of the bucket
@param count the count of the bucket, must be at least 1
@return the builder
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
| 160
|
[
"index",
"count",
"isPositive"
] |
void
| true
| 10
| 8.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getBeanTypeConverter
|
protected TypeConverter getBeanTypeConverter() {
BeanFactory beanFactory = getBeanFactory();
if (beanFactory instanceof ConfigurableBeanFactory cbf) {
return cbf.getTypeConverter();
}
else {
return new SimpleTypeConverter();
}
}
|
Obtain a bean type converter from the BeanFactory that this bean
runs in. This is typically a fresh instance for each call,
since TypeConverters are usually <i>not</i> thread-safe.
<p>Falls back to a SimpleTypeConverter when not running in a BeanFactory.
@see ConfigurableBeanFactory#getTypeConverter()
@see org.springframework.beans.SimpleTypeConverter
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/AbstractFactoryBean.java
| 121
|
[] |
TypeConverter
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
totalCount
|
double totalCount() {
long count = 0;
for (Sample sample : samples) {
count += sample.eventCount;
}
return count;
}
|
Return the computed frequency describing the number of occurrences of the values in the bucket for the given
center point, relative to the total number of occurrences in the samples.
@param config the metric configuration
@param now the current time in milliseconds
@param centerValue the value corresponding to the center point of the bucket
@return the frequency of the values in the bucket relative to the total number of samples
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java
| 148
|
[] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_accumulate
|
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> ExtensionArray:
"""
Return an ExtensionArray performing an accumulation operation.
The underlying data type might change.
Parameters
----------
name : str
Name of the function, supported values are:
- cummin
- cummax
- cumsum
- cumprod
skipna : bool, default True
If True, skip NA values.
**kwargs
Additional keyword arguments passed to the accumulation function.
Currently, there is no supported kwarg.
Returns
-------
array
An array performing the accumulation operation.
Raises
------
NotImplementedError : subclass does not define accumulations
See Also
--------
api.extensions.ExtensionArray._concat_same_type : Concatenate multiple
array of this dtype.
api.extensions.ExtensionArray.view : Return a view on the array.
api.extensions.ExtensionArray._explode : Transform each element of
list-like to a row.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr._accumulate(name="cumsum")
<IntegerArray>
[1, 3, 6]
Length: 3, dtype: Int64
"""
raise NotImplementedError(f"cannot perform {name} with type {self.dtype}")
|
Return an ExtensionArray performing an accumulation operation.
The underlying data type might change.
Parameters
----------
name : str
Name of the function, supported values are:
- cummin
- cummax
- cumsum
- cumprod
skipna : bool, default True
If True, skip NA values.
**kwargs
Additional keyword arguments passed to the accumulation function.
Currently, there is no supported kwarg.
Returns
-------
array
An array performing the accumulation operation.
Raises
------
NotImplementedError : subclass does not define accumulations
See Also
--------
api.extensions.ExtensionArray._concat_same_type : Concatenate multiple
array of this dtype.
api.extensions.ExtensionArray.view : Return a view on the array.
api.extensions.ExtensionArray._explode : Transform each element of
list-like to a row.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr._accumulate(name="cumsum")
<IntegerArray>
[1, 3, 6]
Length: 3, dtype: Int64
|
python
|
pandas/core/arrays/base.py
| 2,188
|
[
"self",
"name",
"skipna"
] |
ExtensionArray
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_session
|
def get_session():
"""Get the configured Session, raising an error if not configured."""
if Session is None:
raise RuntimeError("Session not configured. Call configure_orm() first.")
return Session
|
Get the configured Session, raising an error if not configured.
|
python
|
airflow-core/src/airflow/settings.py
| 149
|
[] | false
| 2
| 6.08
|
apache/airflow
| 43,597
|
unknown
| false
|
|
hashCode
|
@Override
public int hashCode() {
final int prime = 31;
int result = prime + topicId.hashCode();
result = prime * result + topicPartition.hashCode();
return result;
}
|
@return Topic partition representing this instance.
|
java
|
clients/src/main/java/org/apache/kafka/common/TopicIdPartition.java
| 94
|
[] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
values
|
function values(object) {
return object == null ? [] : baseValues(object, keys(object));
}
|
Creates an array of the own enumerable string keyed property values of `object`.
**Note:** Non-object values are coerced to objects.
@static
@since 0.1.0
@memberOf _
@category Object
@param {Object} object The object to query.
@returns {Array} Returns the array of property values.
@example
function Foo() {
this.a = 1;
this.b = 2;
}
Foo.prototype.c = 3;
_.values(new Foo);
// => [1, 2] (iteration order is not guaranteed)
_.values('hi');
// => ['h', 'i']
|
javascript
|
lodash.js
| 14,035
|
[
"object"
] | false
| 2
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
findDimensionFields
|
private static boolean findDimensionFields(List<String> dimensions, DocumentMapper documentMapper) {
for (var objectMapper : documentMapper.mappers().objectMappers().values()) {
if (objectMapper instanceof PassThroughObjectMapper passThroughObjectMapper) {
if (passThroughObjectMapper.containsDimensions()) {
dimensions.add(passThroughObjectMapper.fullPath() + ".*");
}
}
}
boolean matchesAllDimensions = true;
for (var template : documentMapper.mapping().getRoot().dynamicTemplates()) {
if (template.isTimeSeriesDimension() == false) {
continue;
}
// At this point, we don't support index.dimensions when dimensions are mapped via a dynamic template.
// This is because more specific matches with a higher priority can exist that exclude certain fields from being mapped as a
// dimension. For example:
// - path_match: "labels.host_ip", time_series_dimension: false
// - path_match: "labels.*", time_series_dimension: true
// In this case, "labels.host_ip" is not a dimension,
// and adding labels.* to index.dimensions would lead to non-dimension fields being included in the tsid.
// Therefore, we fall back to using index.routing_path.
// While this also may include non-dimension fields in the routing path,
// it at least guarantees that the tsid only includes dimension fields and includes all dimension fields.
matchesAllDimensions = false;
if (template.pathMatch().isEmpty() == false) {
dimensions.addAll(template.pathMatch());
}
}
for (var fieldMapper : documentMapper.mappers().fieldMappers()) {
extractPath(dimensions, fieldMapper);
}
return matchesAllDimensions;
}
|
Finds the dimension fields in the provided document mapper and adds them to the provided list.
@param dimensions the list to which the found dimension fields will be added
@param documentMapper the document mapper from which to extract the dimension fields
@return true if all potential dimension fields can be matched via the dimensions in the list, false otherwise
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java
| 247
|
[
"dimensions",
"documentMapper"
] | true
| 5
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
nameHash
|
private int nameHash(CharSequence namePrefix, CharSequence name) {
int nameHash = 0;
nameHash = (namePrefix != null) ? ZipString.hash(nameHash, namePrefix, false) : nameHash;
nameHash = ZipString.hash(nameHash, name, true);
return nameHash;
}
|
Return the entry at the specified index.
@param index the entry index
@return the entry
@throws IndexOutOfBoundsException if the index is out of bounds
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 271
|
[
"namePrefix",
"name"
] | true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
nsecToMSec
|
public static long nsecToMSec(long ns) {
return ns / NSEC_PER_MSEC;
}
|
@param sValue Value to parse, which may be {@code null}.
@param defaultValue Value to return if {@code sValue} is {@code null}.
@param settingName Name of the parameter or setting. On invalid input, this value is included in the exception message. Otherwise,
this parameter is unused.
@return The {@link TimeValue} which the input string represents, or {@code defaultValue} if the input is {@code null}.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 448
|
[
"ns"
] | true
| 1
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
execute_query
|
def execute_query(
self,
sql: str | list[str],
database: str | None = None,
cluster_identifier: str | None = None,
db_user: str | None = None,
parameters: Iterable | None = None,
secret_arn: str | None = None,
statement_name: str | None = None,
with_event: bool = False,
wait_for_completion: bool = True,
poll_interval: int = 10,
workgroup_name: str | None = None,
session_id: str | None = None,
session_keep_alive_seconds: int | None = None,
) -> QueryExecutionOutput:
"""
Execute a statement against Amazon Redshift.
:param sql: the SQL statement or list of SQL statement to run
:param database: the name of the database
:param cluster_identifier: unique identifier of a cluster
:param db_user: the database username
:param parameters: the parameters for the SQL statement
:param secret_arn: the name or ARN of the secret that enables db access
:param statement_name: the name of the SQL statement
:param with_event: whether to send an event to EventBridge
:param wait_for_completion: whether to wait for a result
:param poll_interval: how often in seconds to check the query status
:param workgroup_name: name of the Redshift Serverless workgroup. Mutually exclusive with
`cluster_identifier`. Specify this parameter to query Redshift Serverless. More info
https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-serverless.html
:param session_id: the session identifier of the query
:param session_keep_alive_seconds: duration in seconds to keep the session alive after the query
finishes. The maximum time a session can keep alive is 24 hours
:returns statement_id: str, the UUID of the statement
"""
kwargs: dict[str, Any] = {
"ClusterIdentifier": cluster_identifier,
"Database": database,
"DbUser": db_user,
"Parameters": parameters,
"WithEvent": with_event,
"SecretArn": secret_arn,
"StatementName": statement_name,
"WorkgroupName": workgroup_name,
"SessionId": session_id,
"SessionKeepAliveSeconds": session_keep_alive_seconds,
}
if sum(x is not None for x in (cluster_identifier, workgroup_name, session_id)) != 1:
raise ValueError(
"Exactly one of cluster_identifier, workgroup_name, or session_id must be provided"
)
if session_id is not None:
msg = "session_id must be a valid UUID4"
try:
if UUID(session_id).version != 4:
raise ValueError(msg)
except ValueError:
raise ValueError(msg)
if session_keep_alive_seconds is not None and (
session_keep_alive_seconds < 0 or duration(seconds=session_keep_alive_seconds).hours > 24
):
raise ValueError("Session keep alive duration must be between 0 and 86400 seconds.")
if isinstance(sql, list):
kwargs["Sqls"] = sql
resp = self.conn.batch_execute_statement(**trim_none_values(kwargs))
else:
kwargs["Sql"] = sql
resp = self.conn.execute_statement(**trim_none_values(kwargs))
statement_id = resp["Id"]
if wait_for_completion:
self.wait_for_results(statement_id, poll_interval=poll_interval)
return QueryExecutionOutput(statement_id=statement_id, session_id=resp.get("SessionId"))
|
Execute a statement against Amazon Redshift.
:param sql: the SQL statement or list of SQL statement to run
:param database: the name of the database
:param cluster_identifier: unique identifier of a cluster
:param db_user: the database username
:param parameters: the parameters for the SQL statement
:param secret_arn: the name or ARN of the secret that enables db access
:param statement_name: the name of the SQL statement
:param with_event: whether to send an event to EventBridge
:param wait_for_completion: whether to wait for a result
:param poll_interval: how often in seconds to check the query status
:param workgroup_name: name of the Redshift Serverless workgroup. Mutually exclusive with
`cluster_identifier`. Specify this parameter to query Redshift Serverless. More info
https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-serverless.html
:param session_id: the session identifier of the query
:param session_keep_alive_seconds: duration in seconds to keep the session alive after the query
finishes. The maximum time a session can keep alive is 24 hours
:returns statement_id: str, the UUID of the statement
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_data.py
| 79
|
[
"self",
"sql",
"database",
"cluster_identifier",
"db_user",
"parameters",
"secret_arn",
"statement_name",
"with_event",
"wait_for_completion",
"poll_interval",
"workgroup_name",
"session_id",
"session_keep_alive_seconds"
] |
QueryExecutionOutput
| true
| 10
| 6.16
|
apache/airflow
| 43,597
|
sphinx
| false
|
enableSubstitutionForNamespaceExports
|
function enableSubstitutionForNamespaceExports() {
if ((enabledSubstitutions & TypeScriptSubstitutionFlags.NamespaceExports) === 0) {
enabledSubstitutions |= TypeScriptSubstitutionFlags.NamespaceExports;
// We need to enable substitutions for identifiers and shorthand property assignments. This allows us to
// substitute the names of exported members of a namespace.
context.enableSubstitution(SyntaxKind.Identifier);
context.enableSubstitution(SyntaxKind.ShorthandPropertyAssignment);
// We need to be notified when entering and exiting namespaces.
context.enableEmitNotification(SyntaxKind.ModuleDeclaration);
}
}
|
Gets the expression used to refer to a namespace or enum within the body
of its declaration.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,580
|
[] | false
| 2
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getLayout
|
protected final Layout getLayout() {
if (this.layout == null) {
Layout createdLayout = getLayoutFactory().getLayout(this.source);
Assert.state(createdLayout != null, "Unable to detect layout");
this.layout = createdLayout;
}
return this.layout;
}
|
Return the {@link File} to use to back up the original source.
@return the file to use to back up the original source
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 375
|
[] |
Layout
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
describe_jobs
|
def describe_jobs(self, jobs: list[str]) -> dict:
"""
Get job descriptions from AWS Batch.
:param jobs: a list of JobId to describe
:return: an API response to describe jobs
"""
...
|
Get job descriptions from AWS Batch.
:param jobs: a list of JobId to describe
:return: an API response to describe jobs
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/batch_client.py
| 61
|
[
"self",
"jobs"
] |
dict
| true
| 1
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
appendln
|
public StrBuilder appendln(final long value) {
return append(value).appendNewLine();
}
|
Appends a long value followed by a new line to the string builder using {@code String.valueOf}.
@param value the value to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,015
|
[
"value"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getCSVInstance
|
public static StrTokenizer getCSVInstance(final String input) {
final StrTokenizer tok = getCSVClone();
tok.reset(input);
return tok;
}
|
Gets a new tokenizer instance which parses Comma Separated Value strings
initializing it with the given input. The default for CSV processing
will be trim whitespace from both ends (which can be overridden with
the setTrimmer method).
@param input the text to parse.
@return a new tokenizer instance which parses Comma Separated Value strings.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 164
|
[
"input"
] |
StrTokenizer
| true
| 1
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
hermadd
|
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmulx, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([2., 4., 6., 4.])
"""
return pu._add(c1, c2)
|
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmulx, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([2., 4., 6., 4.])
|
python
|
numpy/polynomial/hermite.py
| 312
|
[
"c1",
"c2"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
asSslStoreBundle
|
private static SslStoreBundle asSslStoreBundle(JksSslBundleProperties properties, ResourceLoader resourceLoader) {
JksSslStoreDetails keyStoreDetails = asStoreDetails(properties.getKeystore());
JksSslStoreDetails trustStoreDetails = asStoreDetails(properties.getTruststore());
return new JksSslStoreBundle(keyStoreDetails, trustStoreDetails, resourceLoader);
}
|
Get an {@link SslBundle} for the given {@link JksSslBundleProperties}.
@param properties the source properties
@param resourceLoader the resource loader used to load content
@return an {@link SslBundle} instance
@since 3.3.5
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/PropertiesSslBundle.java
| 172
|
[
"properties",
"resourceLoader"
] |
SslStoreBundle
| true
| 1
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
emptyArray
|
@SuppressWarnings("unchecked")
public static <L, R> ImmutablePair<L, R>[] emptyArray() {
return (ImmutablePair<L, R>[]) EMPTY_ARRAY;
}
|
Returns the empty array singleton that can be assigned without compiler warning.
@param <L> the left element type
@param <R> the right element type
@return the empty array singleton that can be assigned without compiler warning.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/tuple/ImmutablePair.java
| 65
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
optInt
|
public int optInt(String name) {
return optInt(name, 0);
}
|
Returns the value mapped by {@code name} if it exists and is an int or can be
coerced to an int. Returns 0 otherwise.
@param name the name of the property
@return the value of {@code 0}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 489
|
[
"name"
] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
arrayToObject
|
function arrayToObject(arr) {
const obj = {};
const keys = Object.keys(arr);
let i;
const len = keys.length;
let key;
for (i = 0; i < len; i++) {
key = keys[i];
obj[key] = arr[key];
}
return obj;
}
|
Convert an array to an object.
@param {Array<any>} arr - The array to convert to an object.
@returns An object with the same keys and values as the array.
|
javascript
|
lib/helpers/formDataToJSON.js
| 29
|
[
"arr"
] | false
| 2
| 6.24
|
axios/axios
| 108,381
|
jsdoc
| false
|
|
format
|
@Override
public String format(final Calendar calendar) {
return printer.format(calendar);
}
|
Formats a {@link Calendar} object.
@param calendar the calendar to format.
@return the formatted string.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateFormat.java
| 406
|
[
"calendar"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
additive_chi2_kernel
|
def additive_chi2_kernel(X, Y=None):
"""Compute the additive chi-squared kernel between observations in X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by:
.. code-block:: text
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
A feature array.
Y : array-like of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
Returns
-------
kernel : array-like of shape (n_samples_X, n_samples_Y)
The kernel matrix.
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
Examples
--------
>>> from sklearn.metrics.pairwise import additive_chi2_kernel
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> additive_chi2_kernel(X, Y)
array([[-1., -2.],
[-2., -1.]])
"""
xp, _, device_ = get_namespace_and_device(X, Y)
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False)
if xp.any(X < 0):
raise ValueError("X contains negative values.")
if Y is not X and xp.any(Y < 0):
raise ValueError("Y contains negative values.")
if _is_numpy_namespace(xp):
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
else:
dtype = _find_matching_floating_dtype(X, Y, xp=xp)
xb = X[:, None, :]
yb = Y[None, :, :]
nom = -((xb - yb) ** 2)
denom = xb + yb
nom = xp.where(denom == 0, xp.asarray(0, dtype=dtype, device=device_), nom)
denom = xp.where(denom == 0, xp.asarray(1, dtype=dtype, device=device_), denom)
return xp.sum(nom / denom, axis=2)
|
Compute the additive chi-squared kernel between observations in X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by:
.. code-block:: text
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
A feature array.
Y : array-like of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
Returns
-------
kernel : array-like of shape (n_samples_X, n_samples_Y)
The kernel matrix.
See Also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
Examples
--------
>>> from sklearn.metrics.pairwise import additive_chi2_kernel
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> additive_chi2_kernel(X, Y)
array([[-1., -2.],
[-2., -1.]])
|
python
|
sklearn/metrics/pairwise.py
| 1,753
|
[
"X",
"Y"
] | false
| 6
| 7.44
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
createImportCallExpressionAMD
|
function createImportCallExpressionAMD(arg: Expression | undefined, containsLexicalThis: boolean): Expression {
// improt("./blah")
// emit as
// define(["require", "exports", "blah"], function (require, exports) {
// ...
// new Promise(function (_a, _b) { require([x], _a, _b); }); /*Amd Require*/
// });
const resolve = factory.createUniqueName("resolve");
const reject = factory.createUniqueName("reject");
const parameters = [
factory.createParameterDeclaration(/*modifiers*/ undefined, /*dotDotDotToken*/ undefined, /*name*/ resolve),
factory.createParameterDeclaration(/*modifiers*/ undefined, /*dotDotDotToken*/ undefined, /*name*/ reject),
];
const body = factory.createBlock([
factory.createExpressionStatement(
factory.createCallExpression(
factory.createIdentifier("require"),
/*typeArguments*/ undefined,
[factory.createArrayLiteralExpression([arg || factory.createOmittedExpression()]), resolve, reject],
),
),
]);
let func: FunctionExpression | ArrowFunction;
if (languageVersion >= ScriptTarget.ES2015) {
func = factory.createArrowFunction(
/*modifiers*/ undefined,
/*typeParameters*/ undefined,
parameters,
/*type*/ undefined,
/*equalsGreaterThanToken*/ undefined,
body,
);
}
else {
func = factory.createFunctionExpression(
/*modifiers*/ undefined,
/*asteriskToken*/ undefined,
/*name*/ undefined,
/*typeParameters*/ undefined,
parameters,
/*type*/ undefined,
body,
);
// if there is a lexical 'this' in the import call arguments, ensure we indicate
// that this new function expression indicates it captures 'this' so that the
// es2015 transformer will properly substitute 'this' with '_this'.
if (containsLexicalThis) {
setEmitFlags(func, EmitFlags.CapturesThis);
}
}
const promise = factory.createNewExpression(factory.createIdentifier("Promise"), /*typeArguments*/ undefined, [func]);
if (getESModuleInterop(compilerOptions)) {
return factory.createCallExpression(factory.createPropertyAccessExpression(promise, factory.createIdentifier("then")), /*typeArguments*/ undefined, [emitHelpers().createImportStarCallbackHelper()]);
}
return promise;
}
|
Visits the body of a Block to hoist declarations.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 1,272
|
[
"arg",
"containsLexicalThis"
] | true
| 6
| 6.64
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
trimcoef
|
def trimcoef(c, tol=0):
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
array([0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([0.0003+0.j , 0.001 -0.001j])
"""
if tol < 0:
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1] * 0
else:
return c[:ind[-1] + 1].copy()
|
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
array([0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([0.0003+0.j , 0.001 -0.001j])
|
python
|
numpy/polynomial/polyutils.py
| 144
|
[
"c",
"tol"
] | false
| 4
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
partitionLag
|
public synchronized Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel) {
TopicPartitionState topicPartitionState = assignedState(tp);
if (topicPartitionState.position == null) {
return null;
} else if (isolationLevel == IsolationLevel.READ_COMMITTED) {
return topicPartitionState.lastStableOffset == null ? null : topicPartitionState.lastStableOffset - topicPartitionState.position.offset;
} else {
return topicPartitionState.highWatermark == null ? null : topicPartitionState.highWatermark - topicPartitionState.position.offset;
}
}
|
Attempt to complete validation with the end offset returned from the OffsetForLeaderEpoch request.
@return Log truncation details if detected and no reset policy is defined.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 640
|
[
"tp",
"isolationLevel"
] |
Long
| true
| 5
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
col
|
def col(col_name: Hashable) -> Expression:
"""
Generate deferred object representing a column of a DataFrame.
Any place which accepts ``lambda df: df[col_name]``, such as
:meth:`DataFrame.assign` or :meth:`DataFrame.loc`, can also accept
``pd.col(col_name)``.
.. versionadded:: 3.0.0
Parameters
----------
col_name : Hashable
Column name.
Returns
-------
`pandas.api.typing.Expression`
A deferred object representing a column of a DataFrame.
See Also
--------
DataFrame.query : Query columns of a dataframe using string expressions.
Examples
--------
You can use `col` in `assign`.
>>> df = pd.DataFrame({"name": ["beluga", "narwhal"], "speed": [100, 110]})
>>> df.assign(name_titlecase=pd.col("name").str.title())
name speed name_titlecase
0 beluga 100 Beluga
1 narwhal 110 Narwhal
You can also use it for filtering.
>>> df.loc[pd.col("speed") > 105]
name speed
1 narwhal 110
"""
if not isinstance(col_name, Hashable):
msg = f"Expected Hashable, got: {type(col_name)}"
raise TypeError(msg)
def func(df: DataFrame) -> Series:
if col_name not in df.columns:
columns_str = str(df.columns.tolist())
max_len = 90
if len(columns_str) > max_len:
columns_str = columns_str[:max_len] + "...]"
msg = (
f"Column '{col_name}' not found in given DataFrame.\n\n"
f"Hint: did you mean one of {columns_str} instead?"
)
raise ValueError(msg)
return df[col_name]
return Expression(func, f"col({col_name!r})")
|
Generate deferred object representing a column of a DataFrame.
Any place which accepts ``lambda df: df[col_name]``, such as
:meth:`DataFrame.assign` or :meth:`DataFrame.loc`, can also accept
``pd.col(col_name)``.
.. versionadded:: 3.0.0
Parameters
----------
col_name : Hashable
Column name.
Returns
-------
`pandas.api.typing.Expression`
A deferred object representing a column of a DataFrame.
See Also
--------
DataFrame.query : Query columns of a dataframe using string expressions.
Examples
--------
You can use `col` in `assign`.
>>> df = pd.DataFrame({"name": ["beluga", "narwhal"], "speed": [100, 110]})
>>> df.assign(name_titlecase=pd.col("name").str.title())
name speed name_titlecase
0 beluga 100 Beluga
1 narwhal 110 Narwhal
You can also use it for filtering.
>>> df.loc[pd.col("speed") > 105]
name speed
1 narwhal 110
|
python
|
pandas/core/col.py
| 255
|
[
"col_name"
] |
Expression
| true
| 4
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_optimize
|
def _optimize(
rebuild_ctx: Callable[[], Union[OptimizeContext, _NullDecorator]],
backend: Union[str, Callable[..., Any]] = "inductor",
*,
nopython: bool = False,
error_on_graph_break: Optional[bool] = None,
guard_export_fn: Optional[Callable[[_guards.GuardsSet], None]] = None,
guard_fail_fn: Optional[Callable[[GuardFail], None]] = None,
guard_filter_fn: Callable[[Sequence[GuardFilterEntry]], Sequence[bool]]
| None = None,
disable: bool = False,
dynamic: Optional[bool] = None,
package: Optional[CompilePackage] = None,
) -> Union[OptimizeContext, _NullDecorator]:
"""
The main entrypoint of TorchDynamo. Do graph capture and call
backend() to optimize extracted graphs.
Args:
backend: One of the two things:
- Either, a function/callable taking a torch.fx.GraphModule and
example_inputs and returning a python callable that runs the
graph faster.
One can also provide additional context for the backend, like
torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
See AOTAutogradMemoryEfficientFusionWithContext for the usage.
- Or, a string backend name in `torch._dynamo.list_backends()`
nopython: If True, graph breaks will be errors and there will
be a single whole-program graph.
error_on_graph_break: If not None, the current `error_on_graph_break` setting is set to the given value.
See `torch._dynamo.error_on_graph_break()` for more details on what `error_on_graph_break` means.
Unlike `nopython=True` (i.e. `fullgraph=True`), there is no guarantee of a single whole-program graph.
If `nopython` is True, `error_on_graph_break` does nothing.
disable: If True, turn this decorator into a no-op
dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
disable all dynamic shapes support (always specialize). If None, automatically
detect when sizes vary and generate dynamic kernels upon recompile.
Example Usage::
@torch._dynamo.optimize()
def toy_example(a, b): ...
"""
check_if_dynamo_supported()
check_for_incompatible_configs()
# Note: The hooks object could be global instead of passed around, *however* that would make
# for a confusing API usage and plumbing story wherein we nest multiple .optimize calls.
# There is some prior art around this, w/r/t nesting backend calls are enforced to be the same
# compiler, however, this feels onerous for callback and hooks, and it feels better to give our users an
# easier to understand UX at the cost of a little more plumbing on our end.
hooks = Hooks(
guard_export_fn=guard_export_fn,
guard_fail_fn=guard_fail_fn,
guard_filter_fn=guard_filter_fn,
)
torch._C._log_api_usage_once("torch._dynamo.optimize")
if (
disable
or os.environ.get("TORCHDYNAMO_DISABLE", "") == "1"
or (not justknobs_check("pytorch/compiler:enable_dynamo"))
):
return _NullDecorator()
if nopython and not config.debug_force_graph_break_on_leaf_return:
return optimize_assert(
backend,
dynamic=dynamic,
hooks=hooks,
rebuild_ctx=rebuild_ctx,
package=package,
)
backend = get_compiler_fn(backend)
# Find if backend has any extra context manager
backend_ctx_ctor = getattr(backend, "backend_ctx_ctor", null_context)
# The backend function is stashed in the callable returned by
# _optimize_catch_errors in the field _torchdynamo_orig_backend. This can
# be used by eval_frame.c to insert a guard on the backend.
# With CachingPrecompile, instantiate an uninitialized CompilePackage
# which gets initialized by _optimize_catch_errors.__call__ once we have a function
if config.caching_precompile and package is None:
from .package import CompilePackage
package = CompilePackage(fn=None, dynamo=None, ignore_inlined_sources=False)
return _optimize_catch_errors(
convert_frame.convert_frame(
# pyrefly: ignore [bad-argument-type]
backend,
hooks,
package=package,
),
hooks,
backend_ctx_ctor,
fullgraph=False,
error_on_graph_break=error_on_graph_break
and not config.debug_force_graph_break_on_leaf_return,
dynamic=dynamic,
compiler_config=(
backend.get_compiler_config()
if hasattr(backend, "get_compiler_config")
else None
),
rebuild_ctx=rebuild_ctx,
package=package,
)
|
The main entrypoint of TorchDynamo. Do graph capture and call
backend() to optimize extracted graphs.
Args:
backend: One of the two things:
- Either, a function/callable taking a torch.fx.GraphModule and
example_inputs and returning a python callable that runs the
graph faster.
One can also provide additional context for the backend, like
torch.jit.fuser("fuser2"), by setting the backend_ctx_ctor attribute.
See AOTAutogradMemoryEfficientFusionWithContext for the usage.
- Or, a string backend name in `torch._dynamo.list_backends()`
nopython: If True, graph breaks will be errors and there will
be a single whole-program graph.
error_on_graph_break: If not None, the current `error_on_graph_break` setting is set to the given value.
See `torch._dynamo.error_on_graph_break()` for more details on what `error_on_graph_break` means.
Unlike `nopython=True` (i.e. `fullgraph=True`), there is no guarantee of a single whole-program graph.
If `nopython` is True, `error_on_graph_break` does nothing.
disable: If True, turn this decorator into a no-op
dynamic: If True, upfront compile as dynamic a kernel as possible. If False,
disable all dynamic shapes support (always specialize). If None, automatically
detect when sizes vary and generate dynamic kernels upon recompile.
Example Usage::
@torch._dynamo.optimize()
def toy_example(a, b): ...
|
python
|
torch/_dynamo/eval_frame.py
| 1,428
|
[
"rebuild_ctx",
"backend",
"nopython",
"error_on_graph_break",
"guard_export_fn",
"guard_fail_fn",
"guard_filter_fn",
"disable",
"dynamic",
"package"
] |
Union[OptimizeContext, _NullDecorator]
| true
| 10
| 6.48
|
pytorch/pytorch
| 96,034
|
google
| false
|
dt64arr_to_periodarr
|
def dt64arr_to_periodarr(
data, freq, tz=None
) -> tuple[npt.NDArray[np.int64], BaseOffset]:
"""
Convert a datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int64]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
"""
if not isinstance(data.dtype, np.dtype) or data.dtype.kind != "M":
raise ValueError(f"Wrong dtype: {data.dtype}")
if freq is None:
if isinstance(data, ABCIndex):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
elif isinstance(data, (ABCIndex, ABCSeries)):
data = data._values
reso = get_unit_from_dtype(data.dtype)
freq = Period._maybe_convert_freq(freq)
base = freq._period_dtype_code
return c_dt64arr_to_periodarr(data.view("i8"), base, tz, reso=reso), freq
|
Convert a datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int64]
freq : Tick
The frequency extracted from the Series or DatetimeIndex if that's
used.
|
python
|
pandas/core/arrays/period.py
| 1,334
|
[
"data",
"freq",
"tz"
] |
tuple[npt.NDArray[np.int64], BaseOffset]
| true
| 7
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
alterShareGroupOffsets
|
AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map<TopicPartition, Long> offsets, AlterShareGroupOffsetsOptions options);
|
Alters offsets for the specified group. In order to succeed, the group must be empty.
<p>This operation is not transactional, so it may succeed for some partitions while fail for others.
@param groupId The group for which to alter offsets.
@param offsets A map of offsets by partition. Partitions not specified in the map are ignored.
@param options The options to use when altering the offsets.
@return The AlterShareGroupOffsetsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,955
|
[
"groupId",
"offsets",
"options"
] |
AlterShareGroupOffsetsResult
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
beginWaitingFor
|
@GuardedBy("lock")
private void beginWaitingFor(Guard guard) {
int waiters = guard.waiterCount++;
if (waiters == 0) {
// push guard onto activeGuards
guard.next = activeGuards;
activeGuards = guard;
}
}
|
Records that the current thread is about to wait on the specified guard.
|
java
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
| 1,152
|
[
"guard"
] |
void
| true
| 2
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
default_dtypes
|
def default_dtypes(self, /, *, device: _Device | None = None) -> DefaultDTypes:
"""
The default data types used for new Dask arrays.
For Dask, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new Dask
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': dask.float64,
'complex floating': dask.complex128,
'integral': dask.int64,
'indexing': dask.int64}
"""
if device not in ["cpu", _DASK_DEVICE, None]:
raise ValueError(
f'Device not understood. Only "cpu" or _DASK_DEVICE is allowed, '
f"but received: {device!r}"
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
|
The default data types used for new Dask arrays.
For Dask, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new Dask
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': dask.float64,
'complex floating': dask.complex128,
'integral': dask.int64,
'indexing': dask.int64}
|
python
|
sklearn/externals/array_api_compat/dask/array/_info.py
| 172
|
[
"self",
"device"
] |
DefaultDTypes
| true
| 2
| 7.84
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
get_job_state
|
def get_job_state(self, job_name: str, run_id: str) -> str:
"""
Get the status of a job run.
:param job_name: The name of the job being processed during this run.
:param run_id: The unique identifier of the job run.
:return: State of the job run.
'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
"""
response = self.conn.describe_job_run(Name=job_name, RunId=run_id)
return response["State"]
|
Get the status of a job run.
:param job_name: The name of the job being processed during this run.
:param run_id: The unique identifier of the job run.
:return: State of the job run.
'STARTING'|'RUNNING'|'STOPPING'|'STOPPED'|'SUCCEEDED'|'FAILED'|'TIMEOUT'
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_databrew.py
| 58
|
[
"self",
"job_name",
"run_id"
] |
str
| true
| 1
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
isJulUsingASingleConsoleHandlerAtMost
|
private boolean isJulUsingASingleConsoleHandlerAtMost() {
java.util.logging.Logger rootLogger = java.util.logging.LogManager.getLogManager().getLogger("");
Handler[] handlers = rootLogger.getHandlers();
return handlers.length == 0 || (handlers.length == 1 && handlers[0] instanceof ConsoleHandler);
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 198
|
[] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
find_guarded_entry
|
def find_guarded_entry(
cls: type[GuardedCache[T]],
key: str,
local: bool,
remote_cache: RemoteCache[JsonDataTy] | None,
evaluate_guards: Callable[[str, list[int] | list[torch.SymInt]], bool],
hints: list[int],
) -> tuple[T | None, bytes | None, dict[str, str]]:
"""
Find the first cache entry in iterate_over_candidates that passes `evaluate_guards`.
Args:
key: The cache key to look up
local: Whether to check the local cache
remote_cache: The remote cache to check, if any
evaluate_guards: Function that evaluates whether a guard passes the check,
given a list of hint values and the guard expression.
hints: List of symint hints paired with evaluate_guards
Returns:
A tuple of (graph, pickled_content) if found, or (None, None) if not found
"""
graph = None
pickled_content = None
result_status = "full_miss"
sample_guards_expr = None
# Iterate over any entries in the subdir for this key and evaluate
# guards to determine whether there's a hit.
for candidate, content in cls.iterate_over_candidates(local, remote_cache, key):
assert hasattr(candidate, "guards_expr")
if not candidate.guards_expr: # type: ignore[attr-defined]
# No guards to evaluate, so this is a hit.
graph = candidate
pickled_content = content
result_status = "hit"
break
# Evaluate the guard expression in the current context.
# If there's not a cache hit, we don't want the evaluation to
# affect the current env, e.g., cause the creation of new guards,
# so we evaluate with the hints instead of the symbols.
hit = bool(evaluate_guards(candidate.guards_expr, hints)) # type: ignore[attr-defined]
if hit:
graph = candidate
pickled_content = content
result_status = "hit"
sample_guards_expr = candidate.guards_expr
break
else:
# At least one guard missed, log this
result_status = "guard_miss"
sample_guards_expr = candidate.guards_expr
info = {"cache_status_detailed": result_status}
if sample_guards_expr is not None:
info["cache_status_guard_expr"] = sample_guards_expr
return graph, pickled_content, info
|
Find the first cache entry in iterate_over_candidates that passes `evaluate_guards`.
Args:
key: The cache key to look up
local: Whether to check the local cache
remote_cache: The remote cache to check, if any
evaluate_guards: Function that evaluates whether a guard passes the check,
given a list of hint values and the guard expression.
hints: List of symint hints paired with evaluate_guards
Returns:
A tuple of (graph, pickled_content) if found, or (None, None) if not found
|
python
|
torch/_inductor/codecache.py
| 1,059
|
[
"cls",
"key",
"local",
"remote_cache",
"evaluate_guards",
"hints"
] |
tuple[T | None, bytes | None, dict[str, str]]
| true
| 6
| 7.84
|
pytorch/pytorch
| 96,034
|
google
| false
|
dotProduct
|
private static double dotProduct(double x1, double y1, double z1, double x2, double y2, double z2) {
return x1 * x2 + y1 * y2 + z1 * z2;
}
|
Calculate the dot product between two 3D coordinates.
@param x1 The first 3D coordinate from the first set of coordinates.
@param y1 The second 3D coordinate from the first set of coordinates.
@param z1 The third 3D coordinate from the first set of coordinates.
@param x2 The first 3D coordinate from the second set of coordinates.
@param y2 The second 3D coordinate from the second set of coordinates.
@param z2 The third 3D coordinate from the second set of coordinates.
@return The dot product.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java
| 218
|
[
"x1",
"y1",
"z1",
"x2",
"y2",
"z2"
] | true
| 1
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
first
|
def first(
self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True
) -> NDFrameT:
"""
Compute the first entry of each column within each group.
Defaults to skipping NA elements.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
min_count : int, default -1
The required number of valid values to perform the operation. If fewer
than ``min_count`` valid values are present the result will be NA.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 2.2.1
Returns
-------
Series or DataFrame
First values within each group.
See Also
--------
DataFrame.groupby : Apply a function groupby to each row or column of a
DataFrame.
core.groupby.DataFrameGroupBy.last : Compute the last non-null entry
of each column.
core.groupby.DataFrameGroupBy.nth : Take the nth row from each group.
Examples
--------
>>> df = pd.DataFrame(
... dict(
... A=[1, 1, 3],
... B=[None, 5, 6],
... C=[1, 2, 3],
... D=["3/11/2000", "3/12/2000", "3/13/2000"],
... )
... )
>>> df["D"] = pd.to_datetime(df["D"])
>>> df.groupby("A").first()
B C D
A
1 5.0 1 2000-03-11
3 6.0 3 2000-03-13
>>> df.groupby("A").first(min_count=2)
B C D
A
1 NaN 1.0 2000-03-11
3 NaN NaN NaT
>>> df.groupby("A").first(numeric_only=True)
B C
A
1 5.0 1
3 6.0 3
"""
def first_compat(obj: NDFrameT):
def first(x: Series):
"""Helper function for first item that isn't NA."""
arr = x.array[notna(x.array)]
if not len(arr):
return x.array.dtype.na_value
return arr[0]
if isinstance(obj, DataFrame):
return obj.apply(first)
elif isinstance(obj, Series):
return first(obj)
else: # pragma: no cover
raise TypeError(type(obj))
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
alias="first",
npfunc=first_compat,
skipna=skipna,
)
|
Compute the first entry of each column within each group.
Defaults to skipping NA elements.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
min_count : int, default -1
The required number of valid values to perform the operation. If fewer
than ``min_count`` valid values are present the result will be NA.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 2.2.1
Returns
-------
Series or DataFrame
First values within each group.
See Also
--------
DataFrame.groupby : Apply a function groupby to each row or column of a
DataFrame.
core.groupby.DataFrameGroupBy.last : Compute the last non-null entry
of each column.
core.groupby.DataFrameGroupBy.nth : Take the nth row from each group.
Examples
--------
>>> df = pd.DataFrame(
... dict(
... A=[1, 1, 3],
... B=[None, 5, 6],
... C=[1, 2, 3],
... D=["3/11/2000", "3/12/2000", "3/13/2000"],
... )
... )
>>> df["D"] = pd.to_datetime(df["D"])
>>> df.groupby("A").first()
B C D
A
1 5.0 1 2000-03-11
3 6.0 3 2000-03-13
>>> df.groupby("A").first(min_count=2)
B C D
A
1 NaN 1.0 2000-03-11
3 NaN NaN NaT
>>> df.groupby("A").first(numeric_only=True)
B C
A
1 5.0 1
3 6.0 3
|
python
|
pandas/core/groupby/groupby.py
| 3,255
|
[
"self",
"numeric_only",
"min_count",
"skipna"
] |
NDFrameT
| true
| 5
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_get_dag_with_task
|
def _get_dag_with_task(
dagbag: DagBag, dag_id: str, task_id: str | None = None
) -> tuple[DAG, BaseOperator | MappedOperator | None]:
"""
Retrieve a DAG and optionally a task from the DagBag.
:param dagbag: DagBag to retrieve from
:param dag_id: DAG ID to retrieve
:param task_id: Optional task ID to retrieve from the DAG
:return: tuple of (dag, task) where task is None if not requested
:raises ValueError: If DAG or task is not found
"""
if dag_id not in dagbag.dags:
raise ValueError(
f"DAG '{dag_id}' not found in DagBag. "
f"This typically indicates a race condition where the DAG was removed or failed to parse."
)
dag = dagbag.dags[dag_id]
if task_id is not None:
try:
task = dag.get_task(task_id)
return dag, task
except TaskNotFound:
raise ValueError(
f"Task '{task_id}' not found in DAG '{dag_id}'. "
f"This typically indicates a race condition where the task was removed or the DAG structure changed."
) from None
return dag, None
|
Retrieve a DAG and optionally a task from the DagBag.
:param dagbag: DagBag to retrieve from
:param dag_id: DAG ID to retrieve
:param task_id: Optional task ID to retrieve from the DAG
:return: tuple of (dag, task) where task is None if not requested
:raises ValueError: If DAG or task is not found
|
python
|
airflow-core/src/airflow/dag_processing/processor.py
| 255
|
[
"dagbag",
"dag_id",
"task_id"
] |
tuple[DAG, BaseOperator | MappedOperator | None]
| true
| 3
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
_execute_work
|
def _execute_work(log: Logger, workload: workloads.ExecuteTask, team_conf) -> None:
"""
Execute command received and stores result state in queue.
:param log: Logger instance
:param workload: The workload to execute
:param team_conf: Team-specific executor configuration
"""
from airflow.sdk.execution_time.supervisor import supervise
# Create team suffix for process title
team_suffix = f" [{team_conf.team_name}]" if team_conf.team_name else ""
setproctitle(f"airflow worker -- LocalExecutor{team_suffix}: {workload.ti.id}", log)
base_url = team_conf.get("api", "base_url", fallback="/")
# If it's a relative URL, use localhost:8080 as the default
if base_url.startswith("/"):
base_url = f"http://localhost:8080{base_url}"
default_execution_api_server = f"{base_url.rstrip('/')}/execution/"
# This will return the exit code of the task process, but we don't care about that, just if the
# _supervisor_ had an error reporting the state back (which will result in an exception.)
supervise(
# This is the "wrong" ti type, but it duck types the same. TODO: Create a protocol for this.
ti=workload.ti, # type: ignore[arg-type]
dag_rel_path=workload.dag_rel_path,
bundle_info=workload.bundle_info,
token=workload.token,
server=team_conf.get("core", "execution_api_server_url", fallback=default_execution_api_server),
log_path=workload.log_path,
)
|
Execute command received and stores result state in queue.
:param log: Logger instance
:param workload: The workload to execute
:param team_conf: Team-specific executor configuration
|
python
|
airflow-core/src/airflow/executors/local_executor.py
| 111
|
[
"log",
"workload",
"team_conf"
] |
None
| true
| 3
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
toIntValue
|
public static int toIntValue(final Character ch) {
return toIntValue(toChar(ch));
}
|
Converts the character to the Integer it represents, throwing an
exception if the character is not numeric.
<p>This method converts the char '1' to the int 1 and so on.</p>
<pre>
CharUtils.toIntValue('3') = 3
CharUtils.toIntValue(null) throws IllegalArgumentException
CharUtils.toIntValue('A') throws IllegalArgumentException
</pre>
@param ch the character to convert, not null
@return the int value of the character
@throws NullPointerException if the Character is null
@throws IllegalArgumentException if the Character is not ASCII numeric
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 431
|
[
"ch"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkNoOverflow
|
private static int checkNoOverflow(long result) {
checkArgument(
result == (int) result,
"the total number of elements (%s) in the arrays must fit in an int",
result);
return (int) result;
}
|
Returns the values from each provided array combined into a single array. For example, {@code
concat(new boolean[] {a, b}, new boolean[] {}, new boolean[] {c}} returns the array {@code {a,
b, c}}.
@param arrays zero or more {@code boolean} arrays
@return a single array containing all the values from the source arrays, in order
@throws IllegalArgumentException if the total number of elements in {@code arrays} does not fit
in an {@code int}
|
java
|
android/guava/src/com/google/common/primitives/Booleans.java
| 250
|
[
"result"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
prod
|
def prod(
self, numeric_only: bool = False, min_count: int = 0, skipna: bool = True
) -> NDFrameT:
"""
Compute prod of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Computed prod of values within each group.
See Also
--------
Series.prod : Return the product of the values over the requested axis.
DataFrame.prod : Return the product of the values over the requested axis.
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 4], index=lst)
>>> ser
a 1
a 2
b 3
b 4
dtype: int64
>>> ser.groupby(level=0).prod()
a 2
b 12
dtype: int64
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]
>>> df = pd.DataFrame(
... data,
... columns=["a", "b", "c"],
... index=["tiger", "leopard", "cheetah", "lion"],
... )
>>> df
a b c
tiger 1 8 2
leopard 1 2 5
cheetah 2 5 8
lion 2 6 9
>>> df.groupby("a").prod()
b c
a
1 16 10
2 30 72
"""
return self._agg_general(
numeric_only=numeric_only,
min_count=min_count,
skipna=skipna,
alias="prod",
npfunc=np.prod,
)
|
Compute prod of group values.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Computed prod of values within each group.
See Also
--------
Series.prod : Return the product of the values over the requested axis.
DataFrame.prod : Return the product of the values over the requested axis.
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 4], index=lst)
>>> ser
a 1
a 2
b 3
b 4
dtype: int64
>>> ser.groupby(level=0).prod()
a 2
b 12
dtype: int64
For DataFrameGroupBy:
>>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]
>>> df = pd.DataFrame(
... data,
... columns=["a", "b", "c"],
... index=["tiger", "leopard", "cheetah", "lion"],
... )
>>> df
a b c
tiger 1 8 2
leopard 1 2 5
cheetah 2 5 8
lion 2 6 9
>>> df.groupby("a").prod()
b c
a
1 16 10
2 30 72
|
python
|
pandas/core/groupby/groupby.py
| 3,031
|
[
"self",
"numeric_only",
"min_count",
"skipna"
] |
NDFrameT
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
ffill
|
def ffill(self, limit: int | None = None):
"""
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.ffill: Returns Series with minimum number of char in object.
DataFrame.ffill: Object with missing values filled or None if inplace=True.
Series.fillna: Fill NaN values of a Series.
DataFrame.fillna: Fill NaN values of a DataFrame.
Examples
--------
For SeriesGroupBy:
>>> key = [0, 0, 1, 1]
>>> ser = pd.Series([np.nan, 2, 3, np.nan], index=key)
>>> ser
0 NaN
0 2.0
1 3.0
1 NaN
dtype: float64
>>> ser.groupby(level=0).ffill()
0 NaN
0 2.0
1 3.0
1 3.0
dtype: float64
For DataFrameGroupBy:
>>> df = pd.DataFrame(
... {
... "key": [0, 0, 1, 1, 1],
... "A": [np.nan, 2, np.nan, 3, np.nan],
... "B": [2, 3, np.nan, np.nan, np.nan],
... "C": [np.nan, np.nan, 2, np.nan, np.nan],
... }
... )
>>> df
key A B C
0 0 NaN 2.0 NaN
1 0 2.0 3.0 NaN
2 1 NaN NaN 2.0
3 1 3.0 NaN NaN
4 1 NaN NaN NaN
Propagate non-null values forward or backward within each group along columns.
>>> df.groupby("key").ffill()
A B C
0 NaN 2.0 NaN
1 2.0 3.0 NaN
2 NaN NaN 2.0
3 3.0 NaN 2.0
4 3.0 NaN 2.0
Propagate non-null values forward or backward within each group along rows.
>>> df.T.groupby(np.array([0, 0, 1, 1])).ffill().T
key A B C
0 0.0 0.0 2.0 2.0
1 0.0 2.0 3.0 3.0
2 1.0 1.0 NaN 2.0
3 1.0 3.0 NaN NaN
4 1.0 1.0 NaN NaN
Only replace the first NaN element within a group along columns.
>>> df.groupby("key").ffill(limit=1)
A B C
0 NaN 2.0 NaN
1 2.0 3.0 NaN
2 NaN NaN 2.0
3 3.0 NaN 2.0
4 3.0 NaN NaN
"""
return self._fill("ffill", limit=limit)
|
Forward fill the values.
Parameters
----------
limit : int, optional
Limit of how many values to fill.
Returns
-------
Series or DataFrame
Object with missing values filled.
See Also
--------
Series.ffill: Returns Series with minimum number of char in object.
DataFrame.ffill: Object with missing values filled or None if inplace=True.
Series.fillna: Fill NaN values of a Series.
DataFrame.fillna: Fill NaN values of a DataFrame.
Examples
--------
For SeriesGroupBy:
>>> key = [0, 0, 1, 1]
>>> ser = pd.Series([np.nan, 2, 3, np.nan], index=key)
>>> ser
0 NaN
0 2.0
1 3.0
1 NaN
dtype: float64
>>> ser.groupby(level=0).ffill()
0 NaN
0 2.0
1 3.0
1 3.0
dtype: float64
For DataFrameGroupBy:
>>> df = pd.DataFrame(
... {
... "key": [0, 0, 1, 1, 1],
... "A": [np.nan, 2, np.nan, 3, np.nan],
... "B": [2, 3, np.nan, np.nan, np.nan],
... "C": [np.nan, np.nan, 2, np.nan, np.nan],
... }
... )
>>> df
key A B C
0 0 NaN 2.0 NaN
1 0 2.0 3.0 NaN
2 1 NaN NaN 2.0
3 1 3.0 NaN NaN
4 1 NaN NaN NaN
Propagate non-null values forward or backward within each group along columns.
>>> df.groupby("key").ffill()
A B C
0 NaN 2.0 NaN
1 2.0 3.0 NaN
2 NaN NaN 2.0
3 3.0 NaN 2.0
4 3.0 NaN 2.0
Propagate non-null values forward or backward within each group along rows.
>>> df.T.groupby(np.array([0, 0, 1, 1])).ffill().T
key A B C
0 0.0 0.0 2.0 2.0
1 0.0 2.0 3.0 3.0
2 1.0 1.0 NaN 2.0
3 1.0 3.0 NaN NaN
4 1.0 1.0 NaN NaN
Only replace the first NaN element within a group along columns.
>>> df.groupby("key").ffill(limit=1)
A B C
0 NaN 2.0 NaN
1 2.0 3.0 NaN
2 NaN NaN 2.0
3 3.0 NaN 2.0
4 3.0 NaN NaN
|
python
|
pandas/core/groupby/groupby.py
| 4,090
|
[
"self",
"limit"
] | true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.