function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
get_airflow_health
|
def get_airflow_health() -> dict[str, Any]:
"""Get the health for Airflow metadatabase, scheduler and triggerer."""
metadatabase_status = HEALTHY
latest_scheduler_heartbeat = None
latest_triggerer_heartbeat = None
latest_dag_processor_heartbeat = None
scheduler_status = UNHEALTHY
triggerer_status: str | None = UNHEALTHY
dag_processor_status: str | None = UNHEALTHY
try:
latest_scheduler_job = SchedulerJobRunner.most_recent_job()
if latest_scheduler_job:
if latest_scheduler_job.latest_heartbeat:
latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()
if latest_scheduler_job.is_alive():
scheduler_status = HEALTHY
except Exception:
metadatabase_status = UNHEALTHY
try:
latest_triggerer_job = TriggererJobRunner.most_recent_job()
if latest_triggerer_job:
if latest_triggerer_job.latest_heartbeat:
latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()
if latest_triggerer_job.is_alive():
triggerer_status = HEALTHY
else:
triggerer_status = None
except Exception:
metadatabase_status = UNHEALTHY
try:
latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()
if latest_dag_processor_job:
if latest_dag_processor_job.latest_heartbeat:
latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()
if latest_dag_processor_job.is_alive():
dag_processor_status = HEALTHY
else:
dag_processor_status = None
except Exception:
metadatabase_status = UNHEALTHY
airflow_health_status = {
"metadatabase": {"status": metadatabase_status},
"scheduler": {
"status": scheduler_status,
"latest_scheduler_heartbeat": latest_scheduler_heartbeat,
},
"triggerer": {
"status": triggerer_status,
"latest_triggerer_heartbeat": latest_triggerer_heartbeat,
},
"dag_processor": {
"status": dag_processor_status,
"latest_dag_processor_heartbeat": latest_dag_processor_heartbeat,
},
}
return airflow_health_status
|
Get the health for Airflow metadatabase, scheduler and triggerer.
|
python
|
airflow-core/src/airflow/api/common/airflow_health.py
| 29
|
[] |
dict[str, Any]
| true
| 12
| 6.48
|
apache/airflow
| 43,597
|
unknown
| false
|
get_executor_names
|
def get_executor_names(cls, validate_teams: bool = True) -> list[ExecutorName]:
"""
Return the executor names from Airflow configuration.
:param validate_teams: Whether to validate that team names exist in database
:return: List of executor names from Airflow configuration
"""
return cls._get_executor_names(validate_teams=validate_teams)
|
Return the executor names from Airflow configuration.
:param validate_teams: Whether to validate that team names exist in database
:return: List of executor names from Airflow configuration
|
python
|
airflow-core/src/airflow/executors/executor_loader.py
| 260
|
[
"cls",
"validate_teams"
] |
list[ExecutorName]
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
createDate
|
function createDate(year: number, month: number, date: number): Date {
// The `newDate` is set to midnight (UTC) on January 1st 1970.
// - In PST this will be December 31st 1969 at 4pm.
// - In GMT this will be January 1st 1970 at 1am.
// Note that they even have different years, dates and months!
const newDate = new Date(0);
// `setFullYear()` allows years like 0001 to be set correctly. This function does not
// change the internal time of the date.
// Consider calling `setFullYear(2019, 8, 20)` (September 20, 2019).
// - In PST this will now be September 20, 2019 at 4pm
// - In GMT this will now be September 20, 2019 at 1am
newDate.setFullYear(year, month, date);
// We want the final date to be at local midnight, so we reset the time.
// - In PST this will now be September 20, 2019 at 12am
// - In GMT this will now be September 20, 2019 at 12am
newDate.setHours(0, 0, 0);
return newDate;
}
|
Create a new Date object with the given date value, and the time set to midnight.
We cannot use `new Date(year, month, date)` because it maps years between 0 and 99 to 1900-1999.
See: https://github.com/angular/angular/issues/40377
Note that this function returns a Date object whose time is midnight in the current locale's
timezone. In the future we might want to change this to be midnight in UTC, but this would be a
considerable breaking change.
|
typescript
|
packages/common/src/i18n/format_date.ts
| 168
|
[
"year",
"month",
"date"
] | true
| 1
| 6
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
onCancelled
|
@Override
protected void onCancelled() {
synchronized (this) {
if (scheduledPeriodicRun != null) {
scheduledPeriodicRun.cancel();
}
}
markAsCompleted();
}
|
Download, update, and clean up GeoIP databases as required by the GeoIP processors in the cluster.
Guaranteed to not be called concurrently.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/AbstractGeoIpDownloader.java
| 158
|
[] |
void
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
resolve
|
default @Nullable File resolve(JarEntry entry, String newName) throws IOException {
return resolve(entry.getName(), newName);
}
|
Resolves the given {@link JarEntry} to a file.
@param entry the jar entry
@param newName the new name of the file
@return file where the contents should be written or {@code null} if this entry
should be skipped
@throws IOException if something went wrong
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/ExtractCommand.java
| 378
|
[
"entry",
"newName"
] |
File
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
throwIfNoTransactionManager
|
private void throwIfNoTransactionManager() {
if (transactionManager == null)
throw new IllegalStateException("Cannot use transactional methods without enabling transactions " +
"by setting the " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " configuration property");
}
|
computes partition for given record.
if the record has partition returns the value otherwise
if custom partitioner is specified, call it to compute partition
otherwise try to calculate partition based on key.
If there is no key or key should be ignored return
RecordMetadata.UNKNOWN_PARTITION to indicate any partition
can be used (the partition is then calculated by built-in
partitioning logic).
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
| 1,619
|
[] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
invokeAwareMethods
|
private void invokeAwareMethods(Object instance) {
if (instance instanceof Aware) {
if (instance instanceof BeanClassLoaderAware beanClassLoaderAwareInstance) {
beanClassLoaderAwareInstance.setBeanClassLoader(this.beanClassLoader);
}
if (instance instanceof BeanFactoryAware beanFactoryAwareInstance) {
beanFactoryAwareInstance.setBeanFactory(this.beanFactory);
}
if (instance instanceof EnvironmentAware environmentAwareInstance) {
environmentAwareInstance.setEnvironment(this.environment);
}
if (instance instanceof ResourceLoaderAware resourceLoaderAwareInstance) {
resourceLoaderAwareInstance.setResourceLoader(this.resourceLoader);
}
}
}
|
Returns the auto-configurations excluded by the
{@code spring.autoconfigure.exclude} property.
@return excluded auto-configurations
@since 2.3.2
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
| 329
|
[
"instance"
] |
void
| true
| 6
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
unescapeXml
|
public static final String unescapeXml(final String input) {
return UNESCAPE_XML.translate(input);
}
|
Unescapes a string containing XML entity escapes to a string
containing the actual Unicode characters corresponding to the
escapes.
<p>Supports only the five basic XML entities (gt, lt, quot, amp, apos).
Does not support DTDs or external entities.</p>
<p>Note that numerical \\u Unicode codes are unescaped to their respective
Unicode characters. This may change in future releases.</p>
@param input the {@link String} to unescape, may be null
@return a new unescaped {@link String}, {@code null} if null string input
@see #escapeXml(String)
@see #escapeXml10(String)
@see #escapeXml11(String)
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 779
|
[
"input"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
mnk_symbolic
|
def mnk_symbolic(
self,
) -> tuple[sympy.Integer, sympy.Integer, sympy.Integer]:
"""
Get the symbolic M, N, K dimensions for matrix multiplication.
Handles both 2D (MM) and 3D (BMM) tensors.
M is extracted from the second-to-last dimension of the first operand (mat1).
N is extracted from the last dimension of the second operand (mat2).
K is extracted from the last dimension of the first operand (mat1).
Returns:
A tuple of (M, N, K) dimensions
"""
mat1 = self.nodes()[self._mat1_idx]
mat2 = self.nodes()[self._mat2_idx]
m = mat1.get_size()[-2] # M from second-to-last dimension of mat1
k = mat1.get_size()[-1] # K from last dimension of mat1
n = mat2.get_size()[-1] # N from last dimension of mat2
# Ensure K dimensions match between operands
k0 = mat2.get_size()[-2] # K from second-to-last dimension of mat2
V.graph.sizevars.check_equals(k, k0)
return (m, n, k)
|
Get the symbolic M, N, K dimensions for matrix multiplication.
Handles both 2D (MM) and 3D (BMM) tensors.
M is extracted from the second-to-last dimension of the first operand (mat1).
N is extracted from the last dimension of the second operand (mat2).
K is extracted from the last dimension of the first operand (mat1).
Returns:
A tuple of (M, N, K) dimensions
|
python
|
torch/_inductor/kernel_inputs.py
| 249
|
[
"self"
] |
tuple[sympy.Integer, sympy.Integer, sympy.Integer]
| true
| 1
| 7.04
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
indexOfIgnoreCase
|
@Deprecated
public static int indexOfIgnoreCase(final CharSequence str, final CharSequence searchStr) {
return Strings.CI.indexOf(str, searchStr);
}
|
Case in-sensitive find of the first index within a CharSequence.
<p>
A {@code null} CharSequence will return {@code -1}. A negative start position is treated as zero. An empty ("") search CharSequence always matches. A
start position greater than the string length only matches an empty search CharSequence.
</p>
<pre>
StringUtils.indexOfIgnoreCase(null, *) = -1
StringUtils.indexOfIgnoreCase(*, null) = -1
StringUtils.indexOfIgnoreCase("", "") = 0
StringUtils.indexOfIgnoreCase(" ", " ") = 0
StringUtils.indexOfIgnoreCase("aabaabaa", "a") = 0
StringUtils.indexOfIgnoreCase("aabaabaa", "b") = 2
StringUtils.indexOfIgnoreCase("aabaabaa", "ab") = 1
</pre>
@param str the CharSequence to check, may be null.
@param searchStr the CharSequence to find, may be null.
@return the first index of the search CharSequence, -1 if no match or {@code null} string input.
@since 2.5
@since 3.0 Changed signature from indexOfIgnoreCase(String, String) to indexOfIgnoreCase(CharSequence, CharSequence)
@deprecated Use {@link Strings#indexOf(CharSequence, CharSequence) Strings.CI.indexOf(CharSequence, CharSequence)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,066
|
[
"str",
"searchStr"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
visitExportDeclaration
|
function visitExportDeclaration(node: ExportDeclaration): VisitResult<Statement | undefined> {
if (node.isTypeOnly) {
return undefined;
}
if (!node.exportClause || isNamespaceExport(node.exportClause)) {
// never elide `export <whatever> from <whereever>` declarations -
// they should be kept for sideffects/untyped exports, even when the
// type checker doesn't know about any exports
return factory.updateExportDeclaration(
node,
node.modifiers,
node.isTypeOnly,
node.exportClause,
node.moduleSpecifier,
node.attributes,
);
}
// Elide the export declaration if all of its named exports are elided.
const allowEmpty = !!compilerOptions.verbatimModuleSyntax;
const exportClause = visitNode(
node.exportClause,
(bindings: NamedExportBindings) => visitNamedExportBindings(bindings, allowEmpty),
isNamedExportBindings,
);
return exportClause
? factory.updateExportDeclaration(
node,
/*modifiers*/ undefined,
node.isTypeOnly,
exportClause,
node.moduleSpecifier,
node.attributes,
)
: undefined;
}
|
Visits an export declaration, eliding it if it does not contain a clause that resolves to a value.
@param node The export declaration node.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,337
|
[
"node"
] | true
| 5
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
max
|
public static long max(long... array) {
checkArgument(array.length > 0);
long max = flip(array[0]);
for (int i = 1; i < array.length; i++) {
long next = flip(array[i]);
if (next > max) {
max = next;
}
}
return flip(max);
}
|
Returns the greatest value present in {@code array}, treating values as unsigned.
@param array a <i>nonempty</i> array of unsigned {@code long} values
@return the value present in {@code array} that is greater than or equal to every other value
in the array according to {@link #compare}
@throws IllegalArgumentException if {@code array} is empty
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLongs.java
| 110
|
[] | true
| 3
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
|
initialize_config
|
def initialize_config() -> AirflowConfigParser:
"""
Load the Airflow config files.
Called for you automatically as part of the Airflow boot process.
"""
airflow_config_parser = AirflowConfigParser()
if airflow_config_parser.getboolean("core", "unit_test_mode"):
airflow_config_parser.load_test_config()
else:
load_standard_airflow_configuration(airflow_config_parser)
# If the user set unit_test_mode in the airflow.cfg, we still
# want to respect that and then load the default unit test configuration
# file on top of it.
if airflow_config_parser.getboolean("core", "unit_test_mode"):
airflow_config_parser.load_test_config()
return airflow_config_parser
|
Load the Airflow config files.
Called for you automatically as part of the Airflow boot process.
|
python
|
airflow-core/src/airflow/configuration.py
| 789
|
[] |
AirflowConfigParser
| true
| 4
| 7.2
|
apache/airflow
| 43,597
|
unknown
| false
|
record
|
public void record(double value, long timeMs) {
if (shouldRecord()) {
recordInternal(value, timeMs, true);
}
}
|
Record a value at a known time. This method is slightly faster than {@link #record(double)} since it will reuse
the time stamp.
@param value The value we are recording
@param timeMs The current POSIX time in milliseconds
@throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum
bound
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java
| 209
|
[
"value",
"timeMs"
] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
getAccessibleMethodFromInterfaceNest
|
private static Method getAccessibleMethodFromInterfaceNest(Class<?> cls, final String methodName, final Class<?>... parameterTypes) {
// Search up the superclass chain
for (; cls != null; cls = cls.getSuperclass()) {
// Check the implemented interfaces of the parent class
final Class<?>[] interfaces = cls.getInterfaces();
for (final Class<?> anInterface : interfaces) {
// Is this interface public?
if (!ClassUtils.isPublic(anInterface)) {
continue;
}
// Does the method exist on this interface?
try {
return anInterface.getDeclaredMethod(methodName, parameterTypes);
} catch (final NoSuchMethodException ignored) {
/*
* Swallow, if no method is found after the loop then this method returns null.
*/
}
// Recursively check our parent interfaces
final Method method = getAccessibleMethodFromInterfaceNest(anInterface, methodName, parameterTypes);
if (method != null) {
return method;
}
}
}
return null;
}
|
Gets an accessible method (that is, one that can be invoked via
reflection) that implements the specified method, by scanning through
all implemented interfaces and subinterfaces. If no such method
can be found, return {@code null}.
<p>
There isn't any good reason why this method must be {@code private}.
It is because there doesn't seem any reason why other classes should
call this rather than the higher level methods.
</p>
@param cls Parent class for the interfaces to be checked.
@param methodName Method name of the method we wish to call.
@param parameterTypes The parameter type signatures.
@return the accessible method or {@code null} if not found.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 167
|
[
"cls",
"methodName"
] |
Method
| true
| 5
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
bindEach
|
function bindEach(nodes: NodeArray<Node> | undefined, bindFunction: (node: Node) => void = bind): void {
if (nodes === undefined) {
return;
}
forEach(nodes, bindFunction);
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 1,083
|
[
"nodes",
"bindFunction"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
compareIgnoreCase
|
public static int compareIgnoreCase(final String str1, final String str2, final boolean nullIsLess) {
if (str1 == str2) { // NOSONARLINT this intentionally uses == to allow for both null
return 0;
}
if (str1 == null) {
return nullIsLess ? -1 : 1;
}
if (str2 == null) {
return nullIsLess ? 1 : -1;
}
return str1.compareToIgnoreCase(str2);
}
|
Compares two Strings lexicographically, ignoring case differences, as per {@link String#compareToIgnoreCase(String)}, returning :
<ul>
<li>{@code int = 0}, if {@code str1} is equal to {@code str2} (or both {@code null})</li>
<li>{@code int < 0}, if {@code str1} is less than {@code str2}</li>
<li>{@code int > 0}, if {@code str1} is greater than {@code str2}</li>
</ul>
<p>
This is a {@code null} safe version of :
</p>
<pre>
str1.compareToIgnoreCase(str2)
</pre>
<p>
{@code null} inputs are handled according to the {@code nullIsLess} parameter. Two {@code null} references are considered equal. Comparison is case
insensitive.
</p>
<pre>{@code
StringUtils.compareIgnoreCase(null, null, *) = 0
StringUtils.compareIgnoreCase(null , "a", true) < 0
StringUtils.compareIgnoreCase(null , "a", false) > 0
StringUtils.compareIgnoreCase("a", null, true) > 0
StringUtils.compareIgnoreCase("a", null, false) < 0
StringUtils.compareIgnoreCase("abc", "abc", *) = 0
StringUtils.compareIgnoreCase("abc", "ABC", *) = 0
StringUtils.compareIgnoreCase("a", "b", *) < 0
StringUtils.compareIgnoreCase("b", "a", *) > 0
StringUtils.compareIgnoreCase("a", "B", *) < 0
StringUtils.compareIgnoreCase("A", "b", *) < 0
StringUtils.compareIgnoreCase("ab", "abc", *) < 0
}</pre>
@param str1 the String to compare from.
@param str2 the String to compare to.
@param nullIsLess whether consider {@code null} value less than non-{@code null} value.
@return < 0, 0, > 0, if {@code str1} is respectively less, equal ou greater than {@code str2}, ignoring case differences.
@see String#compareToIgnoreCase(String)
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 954
|
[
"str1",
"str2",
"nullIsLess"
] | true
| 6
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
describeStreamsGroups
|
default DescribeStreamsGroupsResult describeStreamsGroups(Collection<String> groupIds) {
return describeStreamsGroups(groupIds, new DescribeStreamsGroupsOptions());
}
|
Describe streams groups in the cluster, with the default options.
<p>
This is a convenience method for {@link #describeStreamsGroups(Collection, DescribeStreamsGroupsOptions)}
with default options. See the overload for more details.
@param groupIds The IDs of the groups to describe.
@return The DescribeStreamsGroupsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 2,056
|
[
"groupIds"
] |
DescribeStreamsGroupsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
get_used_airflow_sources
|
def get_used_airflow_sources() -> Path:
"""
Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found
upwards in directory tree or sources where Breeze was installed from.
:return: the Path for Airflow sources we use.
"""
current_sources = search_upwards_for_airflow_root_path(Path.cwd())
if current_sources is None:
current_sources = get_installation_airflow_sources()
if current_sources is None:
warn_non_editable()
sys.exit(1)
return current_sources
|
Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found
upwards in directory tree or sources where Breeze was installed from.
:return: the Path for Airflow sources we use.
|
python
|
dev/breeze/src/airflow_breeze/utils/path_utils.py
| 169
|
[] |
Path
| true
| 3
| 8.24
|
apache/airflow
| 43,597
|
unknown
| false
|
_precompute_node_output_sets
|
def _precompute_node_output_sets(
snodes: list[BaseSchedulerNode],
) -> dict[BaseSchedulerNode, OrderedSet[str]]:
"""
Pre-compute output name sets for all nodes.
This optimization avoids creating OrderedSet objects repeatedly during
exposed time calculations.
Returns:
dict mapping each node to a set of its output names
"""
return {
snode: OrderedSet(o.get_name() for o in snode.get_outputs()) for snode in snodes
}
|
Pre-compute output name sets for all nodes.
This optimization avoids creating OrderedSet objects repeatedly during
exposed time calculations.
Returns:
dict mapping each node to a set of its output names
|
python
|
torch/_inductor/comms.py
| 412
|
[
"snodes"
] |
dict[BaseSchedulerNode, OrderedSet[str]]
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
format_array
|
def format_array(
values: ArrayLike,
formatter: Callable | None,
float_format: FloatFormatType | None = None,
na_rep: str = "NaN",
digits: int | None = None,
space: str | int | None = None,
justify: str = "right",
decimal: str = ".",
leading_space: bool | None = True,
quoting: int | None = None,
fallback_formatter: Callable | None = None,
) -> list[str]:
"""
Format an array for printing.
Parameters
----------
values : np.ndarray or ExtensionArray
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional, default True
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._get_values_for_csv), we don't want the
leading space since it should be left-aligned.
fallback_formatter
Returns
-------
List[str]
"""
fmt_klass: type[_GenericArrayFormatter]
if lib.is_np_dtype(values.dtype, "M"):
fmt_klass = _Datetime64Formatter
values = cast(DatetimeArray, values)
elif isinstance(values.dtype, DatetimeTZDtype):
fmt_klass = _Datetime64TZFormatter
values = cast(DatetimeArray, values)
elif lib.is_np_dtype(values.dtype, "m"):
fmt_klass = _Timedelta64Formatter
values = cast(TimedeltaArray, values)
elif isinstance(values.dtype, ExtensionDtype):
fmt_klass = _ExtensionArrayFormatter
elif lib.is_np_dtype(values.dtype, "fc"):
fmt_klass = FloatArrayFormatter
elif lib.is_np_dtype(values.dtype, "iu"):
fmt_klass = _IntArrayFormatter
else:
fmt_klass = _GenericArrayFormatter
if space is None:
space = 12
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(
values,
digits=digits,
na_rep=na_rep,
float_format=float_format,
formatter=formatter,
space=space,
justify=justify,
decimal=decimal,
leading_space=leading_space,
quoting=quoting,
fallback_formatter=fallback_formatter,
)
return fmt_obj.get_result()
|
Format an array for printing.
Parameters
----------
values : np.ndarray or ExtensionArray
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional, default True
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._get_values_for_csv), we don't want the
leading space since it should be left-aligned.
fallback_formatter
Returns
-------
List[str]
|
python
|
pandas/io/formats/format.py
| 1,090
|
[
"values",
"formatter",
"float_format",
"na_rep",
"digits",
"space",
"justify",
"decimal",
"leading_space",
"quoting",
"fallback_formatter"
] |
list[str]
| true
| 11
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
callAs
|
<T> T callAs(Subject subject, Callable<T> action) throws CompletionException;
|
Executes a {@code Callable} with {@code subject} as the
current subject.
@param subject the {@code Subject} that the specified {@code action}
will run as. This parameter may be {@code null}.
@param action the code to be run with {@code subject} as its current
subject. Must not be {@code null}.
@param <T> the type of value returned by the {@code call} method
of {@code action}
@return the value returned by the {@code call} method of {@code action}
@throws NullPointerException if {@code action} is {@code null}
@throws CompletionException if {@code action.call()} throws an exception.
The cause of the {@code CompletionException} is set to the exception
thrown by {@code action.call()}.
@see #current()
@see Subject#current()
@see Subject#callAs(Subject, Callable)
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/SecurityManagerCompatibility.java
| 101
|
[
"subject",
"action"
] |
T
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
take_nd
|
def take_nd(
self,
indexer: npt.NDArray[np.intp],
axis: AxisInt,
new_mgr_locs: BlockPlacement | None = None,
fill_value=lib.no_default,
) -> Block:
"""
Take values according to indexer and return them as a block.
"""
values = self.values
if fill_value is lib.no_default:
fill_value = self.fill_value
allow_fill = False
else:
allow_fill = True
# Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype
new_values = algos.take_nd(
values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value
)
# Called from three places in managers, all of which satisfy
# these assertions
if isinstance(self, ExtensionBlock):
# NB: in this case, the 'axis' kwarg will be ignored in the
# algos.take_nd call above.
assert not (self.ndim == 1 and new_mgr_locs is None)
assert not (axis == 0 and new_mgr_locs is None)
if new_mgr_locs is None:
new_mgr_locs = self._mgr_locs
if new_values.dtype != self.dtype:
return self.make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
|
Take values according to indexer and return them as a block.
|
python
|
pandas/core/internals/blocks.py
| 996
|
[
"self",
"indexer",
"axis",
"new_mgr_locs",
"fill_value"
] |
Block
| true
| 9
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
fuzz_scalar
|
def fuzz_scalar(spec, seed: int | None = None) -> float | int | bool | complex:
"""
Create a Python scalar value from a ScalarSpec.
Args:
spec: ScalarSpec containing the desired dtype and optionally a constant value
seed: Random seed for reproducibility. If None, uses current random state.
Returns:
Python scalar (float, int, bool, complex) matching the dtype
"""
# If a constant value is specified, use it directly
if spec.constant is not None:
return spec.constant
# Create a local random instance to avoid interfering with global state
if seed is not None:
local_random = random.Random(seed)
# Save and restore global random state
old_random_state = random.getstate()
try:
random.setstate(local_random.getstate())
# Create a scalar value based on dtype
if spec.dtype.is_floating_point:
return random.uniform(-10.0, 10.0)
elif spec.dtype in [torch.complex64, torch.complex128]:
# Only generate complex values if not avoiding complex dtypes
if FuzzerConfig.avoid_complex:
raise ValueError(
"Cannot generate complex values with avoid_complex=True"
)
return complex(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0))
else: # integer or bool
if spec.dtype == torch.bool:
return random.choice([True, False])
else:
return random.randint(-10, 10)
finally:
# Restore original random state
random.setstate(old_random_state)
else:
# Use current random state when no seed provided
# Create a scalar value based on dtype
if spec.dtype.is_floating_point:
return random.uniform(-10.0, 10.0)
elif spec.dtype in [torch.complex64, torch.complex128]:
# Only generate complex values if not avoiding complex dtypes
if FuzzerConfig.avoid_complex:
raise ValueError(
"Cannot generate complex values with avoid_complex=True"
)
return complex(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0))
else: # integer or bool
if spec.dtype == torch.bool:
return random.choice([True, False])
else:
return random.randint(-10, 10)
|
Create a Python scalar value from a ScalarSpec.
Args:
spec: ScalarSpec containing the desired dtype and optionally a constant value
seed: Random seed for reproducibility. If None, uses current random state.
Returns:
Python scalar (float, int, bool, complex) matching the dtype
|
python
|
tools/experimental/torchfuzz/tensor_fuzzer.py
| 495
|
[
"spec",
"seed"
] |
float | int | bool | complex
| true
| 16
| 6.8
|
pytorch/pytorch
| 96,034
|
google
| false
|
shouldBlock
|
boolean shouldBlock();
|
Return whether the caller is still awaiting an IO event.
@return true if so, false otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 646
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getAcknowledgementBatches
|
public List<AcknowledgementBatch> getAcknowledgementBatches() {
List<AcknowledgementBatch> batches = new ArrayList<>();
if (acknowledgements.isEmpty())
return batches;
AcknowledgementBatch currentBatch = null;
for (Map.Entry<Long, AcknowledgeType> entry : acknowledgements.entrySet()) {
if (currentBatch == null) {
currentBatch = new AcknowledgementBatch();
currentBatch.setFirstOffset(entry.getKey());
} else {
currentBatch = maybeCreateNewBatch(currentBatch, entry.getKey(), batches);
}
currentBatch.setLastOffset(entry.getKey());
if (entry.getValue() != null) {
currentBatch.acknowledgeTypes().add(entry.getValue().id);
} else {
currentBatch.acknowledgeTypes().add(ACKNOWLEDGE_TYPE_GAP);
}
}
List<AcknowledgementBatch> optimalBatches = maybeOptimiseAcknowledgeTypes(currentBatch);
optimalBatches.forEach(batch -> {
if (canOptimiseForSingleAcknowledgeType(batch)) {
// If the batch had a single acknowledgement type, we optimise the array independent
// of the number of records.
batch.acknowledgeTypes().subList(1, batch.acknowledgeTypes().size()).clear();
}
batches.add(batch);
});
return batches;
}
|
Converts the acknowledgements into a list of {@link AcknowledgementBatch} which can easily
be converted into the form required for the RPC requests.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java
| 177
|
[] | true
| 5
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
hoursFrac
|
public double hoursFrac() {
return ((double) nanos()) / C5;
}
|
@return the number of {@link #timeUnit()} units this value contains
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 202
|
[] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
join
|
public static String join(final boolean[] array, final char delimiter) {
if (array == null) {
return null;
}
return join(array, delimiter, 0, array.length);
}
|
Joins the elements of the provided array into a single String containing the provided list of elements.
<p>
No delimiter is added before or after the list. Null objects or empty strings within the array are represented by empty strings.
</p>
<pre>
StringUtils.join(null, *) = null
StringUtils.join([], *) = ""
StringUtils.join([null], *) = ""
StringUtils.join([false, false], ';') = "false;false"
</pre>
@param array the array of values to join together, may be null.
@param delimiter the separator character to use.
@return the joined String, {@code null} if null array input.
@since 3.12.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,821
|
[
"array",
"delimiter"
] |
String
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_load_into_existing_table
|
def _load_into_existing_table(self) -> str:
"""
Import S3 key or keys in an existing DynamoDB table.
:return:The Amazon resource number (ARN)
"""
if not self.wait_for_completion:
raise ValueError("wait_for_completion must be set to True when loading into an existing table")
table_keys = [key["AttributeName"] for key in self.dynamodb_key_schema]
dynamodb_hook = DynamoDBHook(
aws_conn_id=self.aws_conn_id, table_name=self.dynamodb_table_name, table_keys=table_keys
)
client = dynamodb_hook.client
self.log.info("Loading from S3 into a tmp DynamoDB table %s", self.tmp_table_name)
self._load_into_new_table(table_name=self.tmp_table_name, delete_on_error=self.delete_on_error)
total_items = 0
try:
paginator = client.get_paginator("scan")
paginate = paginator.paginate(
TableName=self.tmp_table_name,
Select="ALL_ATTRIBUTES",
ReturnConsumedCapacity="NONE",
ConsistentRead=True,
)
self.log.info(
"Loading data from %s to %s DynamoDB table", self.tmp_table_name, self.dynamodb_table_name
)
for page in paginate:
total_items += page.get("Count", 0)
dynamodb_hook.write_batch_data(items=page["Items"])
self.log.info("Number of items loaded: %s", total_items)
finally:
self.log.info("Delete tmp DynamoDB table %s", self.tmp_table_name)
client.delete_table(TableName=self.tmp_table_name)
return dynamodb_hook.get_conn().Table(self.dynamodb_table_name).table_arn
|
Import S3 key or keys in an existing DynamoDB table.
:return:The Amazon resource number (ARN)
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/transfers/s3_to_dynamodb.py
| 207
|
[
"self"
] |
str
| true
| 3
| 6.56
|
apache/airflow
| 43,597
|
unknown
| false
|
compare
|
@InlineMe(replacement = "Character.compare(a, b)")
public static int compare(char a, char b) {
return Character.compare(a, b);
}
|
Compares the two specified {@code char} values. The sign of the value returned is the same as
that of {@code ((Character) a).compareTo(b)}.
<p><b>Note:</b> this method is now unnecessary and should be treated as deprecated; use the
equivalent {@link Character#compare} method instead.
@param a the first {@code char} to compare
@param b the second {@code char} to compare
@return a negative value if {@code a} is less than {@code b}; a positive value if {@code a} is
greater than {@code b}; or zero if they are equal
|
java
|
android/guava/src/com/google/common/primitives/Chars.java
| 121
|
[
"a",
"b"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
_lock_backfills
|
def _lock_backfills(self, dag_runs: Collection[DagRun], session: Session) -> dict[int, Backfill]:
"""
Lock Backfill rows to prevent race conditions when multiple schedulers run concurrently.
:param dag_runs: Collection of Dag runs to process
:param session: DB session
:return: Dict mapping backfill_id to locked Backfill objects
"""
if not (backfill_ids := {dr.backfill_id for dr in dag_runs if dr.backfill_id is not None}):
return {}
locked_backfills = {
b.id: b
for b in session.scalars(
select(Backfill).where(Backfill.id.in_(backfill_ids)).with_for_update(skip_locked=True)
)
}
if skipped_backfills := backfill_ids - locked_backfills.keys():
self.log.debug(
"Skipping backfill runs for backfill_ids=%s - locked by another scheduler",
skipped_backfills,
)
return locked_backfills
|
Lock Backfill rows to prevent race conditions when multiple schedulers run concurrently.
:param dag_runs: Collection of Dag runs to process
:param session: DB session
:return: Dict mapping backfill_id to locked Backfill objects
|
python
|
airflow-core/src/airflow/jobs/scheduler_job_runner.py
| 1,965
|
[
"self",
"dag_runs",
"session"
] |
dict[int, Backfill]
| true
| 3
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
resolvePathsConfig
|
function resolvePathsConfig(options: TsConfig, cwd: string) {
if (options?.compilerOptions?.paths) {
const paths = Object.entries(options.compilerOptions.paths)
const resolvedPaths = paths.map(([key, paths]) => {
return [key, paths.map((v) => path.resolve(cwd, v))] as const
})
return Object.fromEntries(resolvedPaths)
}
if (options.extends) {
const extendsPath = path.resolve(cwd, options.extends)
const extendsDir = path.dirname(extendsPath)
const extendsConfig = require(extendsPath)
return resolvePathsConfig(extendsConfig, extendsDir)
}
return []
}
|
Recursive function to resolve the paths config from a tsconfig.json, whether
it is in the config directly or via an inherited config (via "extends").
@param options
@param cwd
@returns
|
typescript
|
helpers/compile/plugins/resolvePathsPlugin.ts
| 18
|
[
"options",
"cwd"
] | false
| 3
| 7.28
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
is_non_overlapping_monotonic
|
def is_non_overlapping_monotonic(self) -> bool:
"""
Return a boolean whether the IntervalArray/IntervalIndex\
is non-overlapping and monotonic.
Non-overlapping means (no Intervals share points), and monotonic means
either monotonic increasing or monotonic decreasing.
See Also
--------
overlaps : Check if two IntervalIndex objects overlap.
Examples
--------
For arrays:
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.is_non_overlapping_monotonic
True
>>> interv_arr = pd.arrays.IntervalArray(
... [pd.Interval(0, 1), pd.Interval(-1, 0.1)]
... )
>>> interv_arr
<IntervalArray>
[(0.0, 1.0], (-1.0, 0.1]]
Length: 2, dtype: interval[float64, right]
>>> interv_arr.is_non_overlapping_monotonic
False
For Interval Index:
>>> interv_idx = pd.interval_range(start=0, end=2)
>>> interv_idx
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
>>> interv_idx.is_non_overlapping_monotonic
True
>>> interv_idx = pd.interval_range(start=0, end=2, closed="both")
>>> interv_idx
IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')
>>> interv_idx.is_non_overlapping_monotonic
False
"""
# must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... )
# or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...)
# we already require left <= right
# strict inequality for closed == 'both'; equality implies overlapping
# at a point when both sides of intervals are included
if self.closed == "both":
return bool(
(self._right[:-1] < self._left[1:]).all()
or (self._left[:-1] > self._right[1:]).all()
)
# non-strict inequality when closed != 'both'; at least one side is
# not included in the intervals, so equality does not imply overlapping
return bool(
(self._right[:-1] <= self._left[1:]).all()
or (self._left[:-1] >= self._right[1:]).all()
)
|
Return a boolean whether the IntervalArray/IntervalIndex\
is non-overlapping and monotonic.
Non-overlapping means (no Intervals share points), and monotonic means
either monotonic increasing or monotonic decreasing.
See Also
--------
overlaps : Check if two IntervalIndex objects overlap.
Examples
--------
For arrays:
>>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])
>>> interv_arr
<IntervalArray>
[(0, 1], (1, 5]]
Length: 2, dtype: interval[int64, right]
>>> interv_arr.is_non_overlapping_monotonic
True
>>> interv_arr = pd.arrays.IntervalArray(
... [pd.Interval(0, 1), pd.Interval(-1, 0.1)]
... )
>>> interv_arr
<IntervalArray>
[(0.0, 1.0], (-1.0, 0.1]]
Length: 2, dtype: interval[float64, right]
>>> interv_arr.is_non_overlapping_monotonic
False
For Interval Index:
>>> interv_idx = pd.interval_range(start=0, end=2)
>>> interv_idx
IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')
>>> interv_idx.is_non_overlapping_monotonic
True
>>> interv_idx = pd.interval_range(start=0, end=2, closed="both")
>>> interv_idx
IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')
>>> interv_idx.is_non_overlapping_monotonic
False
|
python
|
pandas/core/arrays/interval.py
| 1,709
|
[
"self"
] |
bool
| true
| 4
| 7.76
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
make_mask_none
|
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False])
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
|
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False])
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
|
python
|
numpy/ma/core.py
| 1,687
|
[
"newshape",
"dtype"
] | false
| 3
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
shouldBeMerged
|
private boolean shouldBeMerged(ItemMetadata itemMetadata) {
String sourceType = itemMetadata.getType();
return (sourceType != null && !deletedInCurrentBuild(sourceType) &&
!processedInCurrentBuild(sourceType));
}
|
Create a new {@code MetadataProcessor} instance.
@param processingEnvironment the processing environment of the build
@param previousMetadata any previous metadata or {@code null}
|
java
|
spring-context-indexer/src/main/java/org/springframework/context/index/processor/MetadataCollector.java
| 94
|
[
"itemMetadata"
] | true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
visitExpressionOfSpread
|
function visitExpressionOfSpread(node: Expression): SpreadSegment {
Debug.assertNode(node, isSpreadElement);
let expression = visitNode(node.expression, visitor, isExpression);
Debug.assert(expression);
// We don't need to pack already packed array literals, or existing calls to the `__read` helper.
const isCallToReadHelper = isCallToHelper(expression, "___read" as __String);
let kind = isCallToReadHelper || isPackedArrayLiteral(expression) ? SpreadSegmentKind.PackedSpread : SpreadSegmentKind.UnpackedSpread;
// We don't need the `__read` helper for array literals. Array packing will be performed by `__spreadArray`.
if (compilerOptions.downlevelIteration && kind === SpreadSegmentKind.UnpackedSpread && !isArrayLiteralExpression(expression) && !isCallToReadHelper) {
expression = emitHelpers().createReadHelper(expression, /*count*/ undefined);
// the `__read` helper returns a packed array, so we don't need to ensure a packed array
kind = SpreadSegmentKind.PackedSpread;
}
return createSpreadSegment(kind, expression);
}
|
Transforms an array of Expression nodes that contains a SpreadExpression.
@param elements The array of Expression nodes.
@param isArgumentList A value indicating whether to ensure that the result is a fresh array.
This should be `false` when spreading into an `ArrayLiteral`, and `true` when spreading into an
argument list.
@param multiLine A value indicating whether the result should be emitted on multiple lines.
|
typescript
|
src/compiler/transformers/es2015.ts
| 4,716
|
[
"node"
] | true
| 7
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
tags
|
private static <T> Map<String, String> tags(String key, T instance) {
Map<String, String> tags = new LinkedHashMap<>();
tags.put("config", key);
tags.put("class", instance.getClass().getSimpleName());
return tags;
}
|
Wrap an instance into a Plugin.
@param instance the instance to wrap
@param metrics the metrics
@param name extra tag name to add
@param value extra tag value to add
@param key the value for the <code>config</code> tag
@return the plugin
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/Plugin.java
| 95
|
[
"key",
"instance"
] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
appendFullDigits
|
private static void appendFullDigits(final Appendable buffer, int value, int minFieldWidth) throws IOException {
// specialized paths for 1 to 4 digits -> avoid the memory allocation from the temporary work array
// see LANG-1248
if (value < 10000) {
// less memory allocation path works for four digits or less
int nDigits = 4;
if (value < 1000) {
--nDigits;
if (value < 100) {
--nDigits;
if (value < 10) {
--nDigits;
}
}
}
// left zero pad
for (int i = minFieldWidth - nDigits; i > 0; --i) {
buffer.append('0');
}
switch (nDigits) {
case 4:
buffer.append((char) (value / 1000 + '0'));
value %= 1000;
// falls-through
case 3:
if (value >= 100) {
buffer.append((char) (value / 100 + '0'));
value %= 100;
} else {
buffer.append('0');
}
// falls-through
case 2:
if (value >= 10) {
buffer.append((char) (value / 10 + '0'));
value %= 10;
} else {
buffer.append('0');
}
// falls-through
case 1:
buffer.append((char) (value + '0'));
}
} else {
// more memory allocation path works for any digits
// build up decimal representation in reverse
final char[] work = new char[MAX_DIGITS];
int digit = 0;
while (value != 0) {
work[digit++] = (char) (value % 10 + '0');
value /= 10;
}
// pad with zeros
while (digit < minFieldWidth) {
buffer.append('0');
--minFieldWidth;
}
// reverse
while (--digit >= 0) {
buffer.append(work[digit]);
}
}
}
|
Appends all digits to the given buffer.
@param buffer the buffer to append to.
@param value the value to append digits from.
@param minFieldWidth Minimum field width.
@throws IOException If an I/O error occurs.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 927
|
[
"buffer",
"value",
"minFieldWidth"
] |
void
| true
| 11
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
declareObjectArrayOrNull
|
@Override
public <T> void declareObjectArrayOrNull(
BiConsumer<Value, List<T>> consumer,
ContextParser<Context, T> objectParser,
ParseField field
) {
declareField(
consumer,
(p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL ? null : parseArray(p, c, objectParser),
field,
ValueType.OBJECT_ARRAY_OR_NULL
);
}
|
Declare a field that is an array of objects or null. Used to avoid calling the consumer when used with
{@link #optionalConstructorArg()} or {@link #constructorArg()}.
@param consumer Consumer that will be passed as is to the {@link #declareField(BiConsumer, ContextParser, ParseField, ValueType)}.
@param objectParser Parser that will parse the objects in the array, checking for nulls.
@param field Field to declare.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java
| 230
|
[
"consumer",
"objectParser",
"field"
] |
void
| true
| 2
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
max_error
|
def max_error(y_true, y_pred):
"""
The max_error metric calculates the maximum residual error.
Read more in the :ref:`User Guide <max_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
Returns
-------
max_error : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import max_error
>>> y_true = [3, 2, 7, 1]
>>> y_pred = [4, 2, 7, 1]
>>> max_error(y_true, y_pred)
1.0
"""
xp, _ = get_namespace(y_true, y_pred)
y_type, y_true, y_pred, _, _ = _check_reg_targets(
y_true, y_pred, sample_weight=None, multioutput=None, xp=xp
)
if y_type == "continuous-multioutput":
raise ValueError("Multioutput not supported in max_error")
return float(xp.max(xp.abs(y_true - y_pred)))
|
The max_error metric calculates the maximum residual error.
Read more in the :ref:`User Guide <max_error>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
Returns
-------
max_error : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import max_error
>>> y_true = [3, 2, 7, 1]
>>> y_pred = [4, 2, 7, 1]
>>> max_error(y_true, y_pred)
1.0
|
python
|
sklearn/metrics/_regression.py
| 1,321
|
[
"y_true",
"y_pred"
] | false
| 2
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parseContextualModifier
|
function parseContextualModifier(t: SyntaxKind): boolean {
return token() === t && tryParse(nextTokenCanFollowModifier);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 2,754
|
[
"t"
] | true
| 2
| 6.64
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getFirstLookupIndex
|
private int getFirstLookupIndex(int nameHash) {
int lookupIndex = Arrays.binarySearch(this.nameHashLookups, 0, this.nameHashLookups.length, nameHash);
if (lookupIndex < 0) {
return -1;
}
while (lookupIndex > 0 && this.nameHashLookups[lookupIndex - 1] == nameHash) {
lookupIndex--;
}
return lookupIndex;
}
|
Return the entry at the specified index.
@param index the entry index
@return the entry
@throws IndexOutOfBoundsException if the index is out of bounds
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 278
|
[
"nameHash"
] | true
| 4
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
chebgauss
|
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
.. math:: w_i = \\pi / n
"""
ideg = pu._as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg))
w = np.ones(ideg) * (np.pi / ideg)
return x, w
|
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
.. math:: w_i = \\pi / n
|
python
|
numpy/polynomial/chebyshev.py
| 1,791
|
[
"deg"
] | false
| 2
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_discover_hooks_from_hook_class_names
|
def _discover_hooks_from_hook_class_names(
self,
hook_class_names_registered: set[str],
already_registered_warning_connection_types: set[str],
package_name: str,
provider: ProviderInfo,
provider_uses_connection_types: bool,
):
"""
Discover hooks from "hook-class-names' property.
This property is deprecated but we should support it in Airflow 2.
The hook-class-names array contained just Hook names without connection type,
therefore we need to import all those classes immediately to know which connection types
are supported. This makes it impossible to selectively only import those hooks that are used.
:param already_registered_warning_connection_types: list of connection hooks that we should warn
about when finished discovery
:param package_name: name of the provider package
:param provider: class that keeps information about version and details of the provider
:param provider_uses_connection_types: determines whether the provider uses "connection-types" new
form of passing connection types
:return:
"""
hook_class_names = provider.data.get("hook-class-names")
if hook_class_names:
for hook_class_name in hook_class_names:
if hook_class_name in hook_class_names_registered:
# Silently ignore the hook class - it's already marked for lazy-import by
# connection-types discovery
continue
hook_info = self._import_hook(
connection_type=None,
provider_info=provider,
hook_class_name=hook_class_name,
package_name=package_name,
)
if not hook_info:
# Problem why importing class - we ignore it. Log is written at import time
continue
already_registered = self._hook_provider_dict.get(hook_info.connection_type)
if already_registered:
if already_registered.package_name != package_name:
already_registered_warning_connection_types.add(hook_info.connection_type)
else:
if already_registered.hook_class_name != hook_class_name:
log.warning(
"The hook connection type '%s' is registered twice in the"
" package '%s' with different class names: '%s' and '%s'. "
" Please fix it!",
hook_info.connection_type,
package_name,
already_registered.hook_class_name,
hook_class_name,
)
else:
self._hook_provider_dict[hook_info.connection_type] = HookClassProvider(
hook_class_name=hook_class_name, package_name=package_name
)
self._hooks_lazy_dict[hook_info.connection_type] = hook_info
if not provider_uses_connection_types:
warnings.warn(
f"The provider {package_name} uses `hook-class-names` "
"property in provider-info and has no `connection-types` one. "
"The 'hook-class-names' property has been deprecated in favour "
"of 'connection-types' in Airflow 2.2. Use **both** in case you want to "
"have backwards compatibility with Airflow < 2.2",
DeprecationWarning,
stacklevel=1,
)
for already_registered_connection_type in already_registered_warning_connection_types:
log.warning(
"The connection_type '%s' has been already registered by provider '%s.'",
already_registered_connection_type,
self._hook_provider_dict[already_registered_connection_type].package_name,
)
|
Discover hooks from "hook-class-names' property.
This property is deprecated but we should support it in Airflow 2.
The hook-class-names array contained just Hook names without connection type,
therefore we need to import all those classes immediately to know which connection types
are supported. This makes it impossible to selectively only import those hooks that are used.
:param already_registered_warning_connection_types: list of connection hooks that we should warn
about when finished discovery
:param package_name: name of the provider package
:param provider: class that keeps information about version and details of the provider
:param provider_uses_connection_types: determines whether the provider uses "connection-types" new
form of passing connection types
:return:
|
python
|
airflow-core/src/airflow/providers_manager.py
| 667
|
[
"self",
"hook_class_names_registered",
"already_registered_warning_connection_types",
"package_name",
"provider",
"provider_uses_connection_types"
] | true
| 12
| 7.52
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
copyto
|
def copyto(dst, src, casting="same_kind", where=True):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
Examples
--------
>>> import numpy as np
>>> A = np.array([4, 5, 6])
>>> B = [1, 2, 3]
>>> np.copyto(A, B)
>>> A
array([1, 2, 3])
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> B = [[4, 5, 6], [7, 8, 9]]
>>> np.copyto(A, B)
>>> A
array([[4, 5, 6],
[7, 8, 9]])
"""
return (dst, src, where)
|
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
Examples
--------
>>> import numpy as np
>>> A = np.array([4, 5, 6])
>>> B = [1, 2, 3]
>>> np.copyto(A, B)
>>> A
array([1, 2, 3])
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> B = [[4, 5, 6], [7, 8, 9]]
>>> np.copyto(A, B)
>>> A
array([[4, 5, 6],
[7, 8, 9]])
|
python
|
numpy/_core/multiarray.py
| 1,086
|
[
"dst",
"src",
"casting",
"where"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
withOrigin
|
ConfigDataLocation withOrigin(@Nullable Origin origin) {
return new ConfigDataLocation(this.optional, this.value, origin);
}
|
Create a new {@link ConfigDataLocation} with a specific {@link Origin}.
@param origin the origin to set
@return a new {@link ConfigDataLocation} instance.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataLocation.java
| 136
|
[
"origin"
] |
ConfigDataLocation
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
broadcast_to
|
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple or int
The shape of the desired array. A single integer ``i`` is interpreted
as ``(i,)``.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
See Also
--------
broadcast
broadcast_arrays
broadcast_shapes
Examples
--------
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
|
Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple or int
The shape of the desired array. A single integer ``i`` is interpreted
as ``(i,)``.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
See Also
--------
broadcast
broadcast_arrays
broadcast_shapes
Examples
--------
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
|
python
|
numpy/lib/_stride_tricks_impl.py
| 401
|
[
"array",
"shape",
"subok"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
forBooleanValues
|
public static Frequencies forBooleanValues(MetricName falseMetricName, MetricName trueMetricName) {
List<Frequency> frequencies = new ArrayList<>();
if (falseMetricName != null) {
frequencies.add(new Frequency(falseMetricName, 0.0));
}
if (trueMetricName != null) {
frequencies.add(new Frequency(trueMetricName, 1.0));
}
if (frequencies.isEmpty()) {
throw new IllegalArgumentException("Must specify at least one metric name");
}
Frequency[] frequencyArray = frequencies.toArray(new Frequency[0]);
return new Frequencies(2, 0.0, 1.0, frequencyArray);
}
|
Create a Frequencies instance with metrics for the frequency of a boolean sensor that records 0.0 for
false and 1.0 for true.
@param falseMetricName the name of the metric capturing the frequency of failures; may be null if not needed
@param trueMetricName the name of the metric capturing the frequency of successes; may be null if not needed
@return the Frequencies instance; never null
@throws IllegalArgumentException if both {@code falseMetricName} and {@code trueMetricName} are null
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java
| 54
|
[
"falseMetricName",
"trueMetricName"
] |
Frequencies
| true
| 4
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
create_diagonal
|
def create_diagonal(
x: Array, /, *, offset: int = 0, xp: ModuleType | None = None
) -> Array:
"""
Construct a diagonal array.
Parameters
----------
x : array
An array having shape ``(*batch_dims, k)``.
offset : int, optional
Offset from the leading diagonal (default is ``0``).
Use positive ints for diagonals above the leading diagonal,
and negative ints for diagonals below the leading diagonal.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having shape ``(*batch_dims, k+abs(offset), k+abs(offset))`` with `x`
on the diagonal (offset by `offset`).
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([2, 4, 8])
>>> xpx.create_diagonal(x, xp=xp)
Array([[2, 0, 0],
[0, 4, 0],
[0, 0, 8]], dtype=array_api_strict.int64)
>>> xpx.create_diagonal(x, offset=-2, xp=xp)
Array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[2, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 8, 0, 0]], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(x)
if x.ndim == 0:
err_msg = "`x` must be at least 1-dimensional."
raise ValueError(err_msg)
x_shape = eager_shape(x)
batch_dims = x_shape[:-1]
n = x_shape[-1] + abs(offset)
diag = xp.zeros((*batch_dims, n**2), dtype=x.dtype, device=_compat.device(x))
target_slice = slice(
offset if offset >= 0 else abs(offset) * n,
min(n * (n - offset), diag.shape[-1]),
n + 1,
)
for index in ndindex(*batch_dims):
diag = at(diag)[(*index, target_slice)].set(x[(*index, slice(None))])
return xp.reshape(diag, (*batch_dims, n, n))
|
Construct a diagonal array.
Parameters
----------
x : array
An array having shape ``(*batch_dims, k)``.
offset : int, optional
Offset from the leading diagonal (default is ``0``).
Use positive ints for diagonals above the leading diagonal,
and negative ints for diagonals below the leading diagonal.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having shape ``(*batch_dims, k+abs(offset), k+abs(offset))`` with `x`
on the diagonal (offset by `offset`).
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([2, 4, 8])
>>> xpx.create_diagonal(x, xp=xp)
Array([[2, 0, 0],
[0, 4, 0],
[0, 0, 8]], dtype=array_api_strict.int64)
>>> xpx.create_diagonal(x, offset=-2, xp=xp)
Array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[2, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 8, 0, 0]], dtype=array_api_strict.int64)
|
python
|
sklearn/externals/array_api_extra/_lib/_funcs.py
| 395
|
[
"x",
"offset",
"xp"
] |
Array
| true
| 5
| 8.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
sqrt
|
def sqrt(x):
"""
Compute the square root of x.
For negative input elements, a complex value is returned
(unlike `numpy.sqrt` which returns NaN).
Parameters
----------
x : array_like
The input value(s).
Returns
-------
out : ndarray or scalar
The square root of `x`. If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.sqrt
Examples
--------
For real, non-negative inputs this works just like `numpy.sqrt`:
>>> import numpy as np
>>> np.emath.sqrt(1)
1.0
>>> np.emath.sqrt([1, 4])
array([1., 2.])
But it automatically handles negative inputs:
>>> np.emath.sqrt(-1)
1j
>>> np.emath.sqrt([-1,4])
array([0.+1.j, 2.+0.j])
Different results are expected because:
floating point 0.0 and -0.0 are distinct.
For more control, explicitly use complex() as follows:
>>> np.emath.sqrt(complex(-4.0, 0.0))
2j
>>> np.emath.sqrt(complex(-4.0, -0.0))
-2j
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
|
Compute the square root of x.
For negative input elements, a complex value is returned
(unlike `numpy.sqrt` which returns NaN).
Parameters
----------
x : array_like
The input value(s).
Returns
-------
out : ndarray or scalar
The square root of `x`. If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.sqrt
Examples
--------
For real, non-negative inputs this works just like `numpy.sqrt`:
>>> import numpy as np
>>> np.emath.sqrt(1)
1.0
>>> np.emath.sqrt([1, 4])
array([1., 2.])
But it automatically handles negative inputs:
>>> np.emath.sqrt(-1)
1j
>>> np.emath.sqrt([-1,4])
array([0.+1.j, 2.+0.j])
Different results are expected because:
floating point 0.0 and -0.0 are distinct.
For more control, explicitly use complex() as follows:
>>> np.emath.sqrt(complex(-4.0, 0.0))
2j
>>> np.emath.sqrt(complex(-4.0, -0.0))
-2j
|
python
|
numpy/lib/_scimath_impl.py
| 187
|
[
"x"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
init_X86_64Bit
|
private static void init_X86_64Bit() {
addProcessors(new Processor(Processor.Arch.BIT_64, Processor.Type.X86), "x86_64", "amd64", "em64t", "universal");
}
|
Gets a {@link Processor} object the given value {@link String}. The {@link String} must be like a value returned by the {@code "os.arch"} system
property.
@param value A {@link String} like a value returned by the {@code os.arch} System Property.
@return A {@link Processor} when it exists, else {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArchUtils.java
| 135
|
[] |
void
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
collect
|
def collect(self, intermediate=False, **kwargs):
"""Collect results as they return.
Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.
An example would be having the following tasks:
.. code-block:: python
from celery import group
from proj.celery import app
@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()
@app.task(trail=True)
def B(i):
return pow2.delay(i)
@app.task(trail=True)
def pow2(i):
return i ** 2
.. code-block:: pycon
>>> from celery.result import ResultBase
>>> from proj.tasks import A
>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
Note:
The ``Task.trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.
Yields:
Tuple[AsyncResult, Any]: tuples containing the result instance
of the child task, and the return value of that task.
"""
for _, R in self.iterdeps(intermediate=intermediate):
yield R, R.get(**kwargs)
|
Collect results as they return.
Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.
An example would be having the following tasks:
.. code-block:: python
from celery import group
from proj.celery import app
@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()
@app.task(trail=True)
def B(i):
return pow2.delay(i)
@app.task(trail=True)
def pow2(i):
return i ** 2
.. code-block:: pycon
>>> from celery.result import ResultBase
>>> from proj.tasks import A
>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
Note:
The ``Task.trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.
Yields:
Tuple[AsyncResult, Any]: tuples containing the result instance
of the child task, and the return value of that task.
|
python
|
celery/result.py
| 274
|
[
"self",
"intermediate"
] | false
| 2
| 7.52
|
celery/celery
| 27,741
|
unknown
| false
|
|
toHumanReadableString
|
public String toHumanReadableString(int fractionPieces) {
if (duration < 0) {
return Long.toString(duration);
}
long nanos = nanos();
if (nanos == 0) {
return "0s";
}
double value = nanos;
String suffix = "nanos";
if (nanos >= C6) {
value = daysFrac();
suffix = "d";
} else if (nanos >= C5) {
value = hoursFrac();
suffix = "h";
} else if (nanos >= C4) {
value = minutesFrac();
suffix = "m";
} else if (nanos >= C3) {
value = secondsFrac();
suffix = "s";
} else if (nanos >= C2) {
value = millisFrac();
suffix = "ms";
} else if (nanos >= C1) {
value = microsFrac();
suffix = "micros";
}
// Limit fraction pieces to a min of 0 and maximum of 10
return formatDecimal(value, Math.min(10, Math.max(0, fractionPieces))) + suffix;
}
|
Returns a {@link String} representation of the current {@link TimeValue}.
Note that this method might produce fractional time values (ex 1.6m) which cannot be
parsed by method like {@link TimeValue#parse(String, String, String, String)}. The number of
fractional decimals (up to 10 maximum) are truncated to the number of fraction pieces
specified.
Also note that the maximum string value that will be generated is
{@code 106751.9d} due to the way that values are internally converted
to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds)
@param fractionPieces the number of decimal places to include
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 247
|
[
"fractionPieces"
] |
String
| true
| 9
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
addAdvisors
|
public void addAdvisors(Collection<Advisor> advisors) {
if (isFrozen()) {
throw new AopConfigException("Cannot add advisor: Configuration is frozen.");
}
if (!CollectionUtils.isEmpty(advisors)) {
for (Advisor advisor : advisors) {
if (advisor instanceof IntroductionAdvisor introductionAdvisor) {
validateIntroductionAdvisor(introductionAdvisor);
}
Assert.notNull(advisor, "Advisor must not be null");
this.advisors.add(advisor);
}
adviceChanged();
}
}
|
Add all the given advisors to this proxy configuration.
@param advisors the advisors to register
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 379
|
[
"advisors"
] |
void
| true
| 4
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
slice_indexer
|
def slice_indexer(
self,
start: Hashable | None = None,
end: Hashable | None = None,
step: int | None = None,
) -> slice:
"""
Compute the slice indexer for input labels and step.
Index needs to be ordered and unique.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, default None
If None, defaults to 1.
Returns
-------
slice
A slice object.
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
See Also
--------
Index.slice_locs : Computes slice locations for input labels.
Index.get_slice_bound : Retrieves slice bound that corresponds to given label.
Notes
-----
This function assumes that the data is sorted, so use at your own peril.
Examples
--------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list("abcd"))
>>> idx.slice_indexer(start="b", end="c")
slice(1, 3, None)
>>> idx = pd.MultiIndex.from_arrays([list("abcd"), list("efgh")])
>>> idx.slice_indexer(start="b", end=("c", "g"))
slice(1, 3, None)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
|
Compute the slice indexer for input labels and step.
Index needs to be ordered and unique.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, default None
If None, defaults to 1.
Returns
-------
slice
A slice object.
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
See Also
--------
Index.slice_locs : Computes slice locations for input labels.
Index.get_slice_bound : Retrieves slice bound that corresponds to given label.
Notes
-----
This function assumes that the data is sorted, so use at your own peril.
Examples
--------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list("abcd"))
>>> idx.slice_indexer(start="b", end="c")
slice(1, 3, None)
>>> idx = pd.MultiIndex.from_arrays([list("abcd"), list("efgh")])
>>> idx.slice_indexer(start="b", end=("c", "g"))
slice(1, 3, None)
|
python
|
pandas/core/indexes/base.py
| 6,662
|
[
"self",
"start",
"end",
"step"
] |
slice
| true
| 3
| 8.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
buildRoundings
|
static RoundingInfo[] buildRoundings(ZoneId timeZone, String minimumInterval) {
int indexToSliceFrom = 0;
RoundingInfo[] roundings = new RoundingInfo[6];
roundings[0] = new RoundingInfo(Rounding.DateTimeUnit.SECOND_OF_MINUTE, timeZone, 1000L, "s", 1, 5, 10, 30);
roundings[1] = new RoundingInfo(Rounding.DateTimeUnit.MINUTE_OF_HOUR, timeZone, 60 * 1000L, "m", 1, 5, 10, 30);
roundings[2] = new RoundingInfo(Rounding.DateTimeUnit.HOUR_OF_DAY, timeZone, 60 * 60 * 1000L, "h", 1, 3, 12);
roundings[3] = new RoundingInfo(Rounding.DateTimeUnit.DAY_OF_MONTH, timeZone, 24 * 60 * 60 * 1000L, "d", 1, 7);
roundings[4] = new RoundingInfo(Rounding.DateTimeUnit.MONTH_OF_YEAR, timeZone, 30 * 24 * 60 * 60 * 1000L, "M", 1, 3);
roundings[5] = new RoundingInfo(
Rounding.DateTimeUnit.YEAR_OF_CENTURY,
timeZone,
365 * 24 * 60 * 60 * 1000L,
"y",
1,
5,
10,
20,
50,
100
);
for (int i = 0; i < roundings.length; i++) {
RoundingInfo roundingInfo = roundings[i];
if (roundingInfo.getDateTimeUnit().equals(minimumInterval)) {
indexToSliceFrom = i;
break;
}
}
return Arrays.copyOfRange(roundings, indexToSliceFrom, roundings.length);
}
|
Build roundings, computed dynamically as roundings are time zone dependent.
The current implementation probably should not be invoked in a tight loop.
@return Array of RoundingInfo
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java
| 80
|
[
"timeZone",
"minimumInterval"
] | true
| 3
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
destroyScopedBean
|
@Override
public void destroyScopedBean(String beanName) {
RootBeanDefinition mbd = getMergedLocalBeanDefinition(beanName);
if (mbd.isSingleton() || mbd.isPrototype()) {
throw new IllegalArgumentException(
"Bean name '" + beanName + "' does not correspond to an object in a mutable scope");
}
String scopeName = mbd.getScope();
Scope scope = this.scopes.get(scopeName);
if (scope == null) {
throw new IllegalStateException("No Scope SPI registered for scope name '" + scopeName + "'");
}
Object bean = scope.remove(beanName);
if (bean != null) {
destroyBean(beanName, bean, mbd);
}
}
|
Destroy the given bean instance (usually a prototype instance
obtained from this factory) according to the given bean definition.
@param beanName the name of the bean definition
@param bean the bean instance to destroy
@param mbd the merged bean definition
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,242
|
[
"beanName"
] |
void
| true
| 5
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
convertNamedExport
|
function convertNamedExport(
sourceFile: SourceFile,
assignment: BinaryExpression & { left: PropertyAccessExpression; },
changes: textChanges.ChangeTracker,
exports: ExportRenames,
): void {
// If "originalKeywordKind" was set, this is e.g. `exports.
const { text } = assignment.left.name;
const rename = exports.get(text);
if (rename !== undefined) {
/*
const _class = 0;
export { _class as class };
*/
const newNodes = [
makeConst(/*modifiers*/ undefined, rename, assignment.right),
makeExportDeclaration([factory.createExportSpecifier(/*isTypeOnly*/ false, rename, text)]),
];
changes.replaceNodeWithNodes(sourceFile, assignment.parent, newNodes);
}
else {
convertExportsPropertyAssignment(assignment, sourceFile, changes);
}
}
|
Convert `module.exports = { ... }` to individual exports..
We can't always do this if the module has interesting members -- then it will be a default export instead.
|
typescript
|
src/services/codefixes/convertToEsModule.ts
| 370
|
[
"sourceFile",
"assignment",
"changes",
"exports"
] | true
| 3
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
defaultTo
|
function defaultTo(value, defaultValue) {
return (value == null || value !== value) ? defaultValue : value;
}
|
Checks `value` to determine whether a default value should be returned in
its place. The `defaultValue` is returned if `value` is `NaN`, `null`,
or `undefined`.
@static
@memberOf _
@since 4.14.0
@category Util
@param {*} value The value to check.
@param {*} defaultValue The default value.
@returns {*} Returns the resolved value.
@example
_.defaultTo(1, 10);
// => 1
_.defaultTo(undefined, 10);
// => 10
|
javascript
|
lodash.js
| 15,529
|
[
"value",
"defaultValue"
] | false
| 3
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
toArray
|
private static String @Nullable [] toArray(@Nullable Collection<String> collection) {
return (collection != null) ? collection.toArray(String[]::new) : null;
}
|
Helper method that provides a null-safe way to convert a {@code String[]} to a
{@link Collection} for client libraries to use.
@param array the array to convert
@return a collection or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslOptions.java
| 119
|
[
"collection"
] | true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return CodeWarnings.class.getSimpleName() + this.warnings;
}
|
Return the currently registered warnings.
@return the warnings
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/CodeWarnings.java
| 172
|
[] |
String
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getAttribute
|
@Override
public @Nullable Object getAttribute(String name) {
BeanMetadataAttribute attribute = (BeanMetadataAttribute) super.getAttribute(name);
return (attribute != null ? attribute.getValue() : null);
}
|
Look up the given BeanMetadataAttribute in this accessor's set of attributes.
@param name the name of the attribute
@return the corresponding BeanMetadataAttribute object,
or {@code null} if no such attribute defined
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanMetadataAttributeAccessor.java
| 74
|
[
"name"
] |
Object
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
split
|
public void split() {
if (runningState != State.RUNNING) {
throw new IllegalStateException("Stopwatch is not running.");
}
stopTimeNanos = System.nanoTime();
splitState = SplitState.SPLIT;
splits.add(new Split(String.valueOf(splits.size()), Duration.ofNanos(stopTimeNanos - startTimeNanos)));
}
|
Splits the time.
<p>
This method sets the stop time of the watch to allow a time to be extracted. The start time is unaffected, enabling {@link #unsplit()} to continue the
timing from the original start point.
</p>
@throws IllegalStateException if this StopWatch is not running.
|
java
|
src/main/java/org/apache/commons/lang3/time/StopWatch.java
| 687
|
[] |
void
| true
| 2
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createConfiguration
|
public Configuration createConfiguration() throws IOException, TemplateException {
Configuration config = newConfiguration();
Properties props = new Properties();
// Load config file if specified.
if (this.configLocation != null) {
if (logger.isDebugEnabled()) {
logger.debug("Loading FreeMarker configuration from " + this.configLocation);
}
PropertiesLoaderUtils.fillProperties(props, this.configLocation);
}
// Merge local properties if specified.
if (this.freemarkerSettings != null) {
props.putAll(this.freemarkerSettings);
}
// FreeMarker will only accept known keys in its setSettings and
// setAllSharedVariables methods.
if (!props.isEmpty()) {
config.setSettings(props);
}
if (!CollectionUtils.isEmpty(this.freemarkerVariables)) {
config.setAllSharedVariables(new SimpleHash(this.freemarkerVariables, config.getObjectWrapper()));
}
if (this.defaultEncoding != null) {
config.setDefaultEncoding(this.defaultEncoding);
}
List<TemplateLoader> templateLoaders = new ArrayList<>(this.templateLoaders);
// Register template loaders that are supposed to kick in early.
if (this.preTemplateLoaders != null) {
templateLoaders.addAll(this.preTemplateLoaders);
}
// Register default template loaders.
if (this.templateLoaderPaths != null) {
for (String path : this.templateLoaderPaths) {
templateLoaders.add(getTemplateLoaderForPath(path));
}
}
postProcessTemplateLoaders(templateLoaders);
// Register template loaders that are supposed to kick in late.
if (this.postTemplateLoaders != null) {
templateLoaders.addAll(this.postTemplateLoaders);
}
TemplateLoader loader = getAggregateTemplateLoader(templateLoaders);
if (loader != null) {
config.setTemplateLoader(loader);
}
postProcessConfiguration(config);
return config;
}
|
Prepare the FreeMarker {@link Configuration} and return it.
@return the FreeMarker {@code Configuration} object
@throws IOException if the config file wasn't found
@throws TemplateException on FreeMarker initialization failure
|
java
|
spring-context-support/src/main/java/org/springframework/ui/freemarker/FreeMarkerConfigurationFactory.java
| 279
|
[] |
Configuration
| true
| 11
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
refreshBeanFactory
|
@Override
protected final void refreshBeanFactory() throws BeansException {
if (hasBeanFactory()) {
destroyBeans();
closeBeanFactory();
}
try {
DefaultListableBeanFactory beanFactory = createBeanFactory();
beanFactory.setSerializationId(getId());
beanFactory.setApplicationStartup(getApplicationStartup());
customizeBeanFactory(beanFactory);
loadBeanDefinitions(beanFactory);
this.beanFactory = beanFactory;
}
catch (IOException ex) {
throw new ApplicationContextException("I/O error parsing bean definition source for " + getDisplayName(), ex);
}
}
|
This implementation performs an actual refresh of this context's underlying
bean factory, shutting down the previous bean factory (if any) and
initializing a fresh bean factory for the next phase of the context's lifecycle.
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractRefreshableApplicationContext.java
| 118
|
[] |
void
| true
| 3
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
startupTimes
|
public ImmutableMap<Service, Long> startupTimes() {
return state.startupTimes();
}
|
Returns the service load times. This value will only return startup times for services that
have finished starting.
@return Map of services and their corresponding startup time in millis, the map entries will be
ordered by startup time.
|
java
|
android/guava/src/com/google/common/util/concurrent/ServiceManager.java
| 421
|
[] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
parseList
|
function parseList<T extends Node>(kind: ParsingContext, parseElement: () => T): NodeArray<T> {
const saveParsingContext = parsingContext;
parsingContext |= 1 << kind;
const list = [];
const listPos = getNodePos();
while (!isListTerminator(kind)) {
if (isListElement(kind, /*inErrorRecovery*/ false)) {
list.push(parseListElement(kind, parseElement));
continue;
}
if (abortParsingListOrMoveToNextToken(kind)) {
break;
}
}
parsingContext = saveParsingContext;
return createNodeArray(list, listPos);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,094
|
[
"kind",
"parseElement"
] | true
| 4
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
check_grad_usage
|
def check_grad_usage(defn_name: str, derivatives: Sequence[Derivative]) -> None:
"""
Check for some subtle mistakes one might make when writing derivatives.
These mistakes will compile, but will be latent until a function is
used with double backwards.
"""
uses_grad = False # true if any derivative uses "grad"
num_grads_uses = 0 # count of uses of "grads" or "grads[INDEX]"
uses_named_grads = False # true if any derivative uses "grad_{name}"
used_grads_indices: list[int] = [] # which indices of grads are used
for d in derivatives:
formula = d.formula
uses_grad = uses_grad or bool(
re.findall(IDENT_REGEX.format("grad"), formula)
)
num_grads_uses += len(re.findall(IDENT_REGEX.format("grads"), formula))
uses_named_grads = uses_named_grads or bool(d.named_gradients)
used_grads_indices.extend(used_gradient_indices(formula))
# This is a basic sanity check: the number of places we see
# "grads" should be no fewer than the number of indices we see
# inside "grads". They may not be equal because we may use
# "grads" without an index.
assert num_grads_uses >= len(used_grads_indices)
# Thus if the number is equal, every use of grads is also
# indexed.
only_used_grads_indices = num_grads_uses == len(used_grads_indices)
if uses_grad and num_grads_uses > 0:
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml illegally "
"mixes use of 'grad' and 'grads'. Consider replacing "
"occurrences of 'grad' with 'grads[0]'"
)
if only_used_grads_indices and set(used_grads_indices) == {0}:
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml solely "
"refers to 'grads[0]'. If the first output is indeed the "
"only differentiable output, replace 'grads[0]' with 'grad'; "
"otherwise, there is a likely error in your derivatives "
"declaration."
)
if uses_named_grads and (uses_grad or num_grads_uses > 0):
raise RuntimeError(
f"Derivative definition of {defn_name} in derivatives.yaml illegally "
'mixes use of "grad_RETURN_NAME" and "grad" or "grads[x]". Use '
"only one method for identifying gradients."
)
|
Check for some subtle mistakes one might make when writing derivatives.
These mistakes will compile, but will be latent until a function is
used with double backwards.
|
python
|
tools/autograd/load_derivatives.py
| 478
|
[
"defn_name",
"derivatives"
] |
None
| true
| 11
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
union
|
public ComposablePointcut union(MethodMatcher other) {
this.methodMatcher = MethodMatchers.union(this.methodMatcher, other);
return this;
}
|
Apply a union with the given MethodMatcher.
@param other the MethodMatcher to apply a union with
@return this composable pointcut (for call chaining)
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ComposablePointcut.java
| 136
|
[
"other"
] |
ComposablePointcut
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
build
|
public <T extends SimpleAsyncTaskExecutor> T build(Class<T> taskExecutorClass) {
return configure(BeanUtils.instantiateClass(taskExecutorClass));
}
|
Build a new {@link SimpleAsyncTaskExecutor} instance of the specified type and
configure it using this builder.
@param <T> the type of task executor
@param taskExecutorClass the template type to create
@return a configured {@link SimpleAsyncTaskExecutor} instance.
@see #build()
@see #configure(SimpleAsyncTaskExecutor)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskExecutorBuilder.java
| 254
|
[
"taskExecutorClass"
] |
T
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
append
|
private void append(char ch) {
try {
this.out.append(ch);
}
catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
|
Write the specified pairs to an already started {@link Series#OBJECT object
series}.
@param <N> the name type in the pair
@param <V> the value type in the pair
@param pairs a callback that will be used to provide each pair. Typically a
{@code forEach} method reference.
@see #writePairs(Consumer)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonValueWriter.java
| 305
|
[
"ch"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_fit
|
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
self._precomputed = True
else:
self._precomputed = False
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
else:
# only create `n_features_in_` in the precomputed case
_check_n_features(self, X, reset=True)
self._n_features = X.shape[1]
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
return missing_features_info[0]
|
Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
n_features)
The imputer mask of the original data.
|
python
|
sklearn/impute/_base.py
| 976
|
[
"self",
"X",
"y",
"precomputed"
] | false
| 7
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
getIndentationStringAtPosition
|
function getIndentationStringAtPosition(sourceFile: SourceFile, position: number): string {
const { text } = sourceFile;
const lineStart = getLineStartPositionForPosition(position, sourceFile);
let pos = lineStart;
for (; pos <= position && isWhiteSpaceSingleLine(text.charCodeAt(pos)); pos++);
return text.slice(lineStart, pos);
}
|
Checks if position points to a valid position to add JSDoc comments, and if so,
returns the appropriate template. Otherwise returns an empty string.
Valid positions are
- outside of comments, statements, and expressions, and
- preceding a:
- function/constructor/method declaration
- class declarations
- variable statements
- namespace declarations
- interface declarations
- method signatures
- type alias declarations
Hosts should ideally check that:
- The line is all whitespace up to 'position' before performing the insertion.
- If the keystroke sequence "/\*\*" induced the call, we also check that the next
non-whitespace character is '*', which (approximately) indicates whether we added
the second '*' to complete an existing (JSDoc) comment.
@param fileName The file in which to perform the check.
@param position The (character-indexed) position in the file where the check should
be performed.
@internal
|
typescript
|
src/services/jsDoc.ts
| 533
|
[
"sourceFile",
"position"
] | true
| 3
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
bind
|
public <T> BindResult<T> bind(String name, Bindable<T> target) {
return bind(ConfigurationPropertyName.of(name), target, null);
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources}.
@param name the configuration property name to bind
@param target the target bindable
@param <T> the bound type
@return the binding result (never {@code null})
@see #bind(ConfigurationPropertyName, Bindable, BindHandler)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 247
|
[
"name",
"target"
] | true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
inferred_type
|
def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
See Also
--------
Index.dtype : Return the dtype object of the underlying data.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.inferred_type
'integer'
"""
return lib.infer_dtype(self._values, skipna=False)
|
Return a string of the type inferred from the values.
See Also
--------
Index.dtype : Return the dtype object of the underlying data.
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Index([1, 2, 3], dtype='int64')
>>> idx.inferred_type
'integer'
|
python
|
pandas/core/indexes/base.py
| 2,531
|
[
"self"
] |
str_t
| true
| 1
| 6.24
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
negate
|
public static ClassFilter negate(ClassFilter classFilter) {
Assert.notNull(classFilter, "ClassFilter must not be null");
return new NegateClassFilter(classFilter);
}
|
Return a class filter that represents the logical negation of the specified
filter instance.
@param classFilter the {@link ClassFilter} to negate
@return a filter that represents the logical negation of the specified filter
@since 6.1
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ClassFilters.java
| 97
|
[
"classFilter"
] |
ClassFilter
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parseOpenSslEC
|
private static PrivateKey parseOpenSslEC(BufferedReader bReader, Supplier<char[]> passwordSupplier) throws IOException,
GeneralSecurityException {
StringBuilder sb = new StringBuilder();
String line = bReader.readLine();
Map<String, String> pemHeaders = new HashMap<>();
while (line != null) {
if (OPENSSL_EC_FOOTER.equals(line.trim())) {
break;
}
// Parse PEM headers according to https://www.ietf.org/rfc/rfc1421.txt
if (line.contains(":")) {
String[] header = line.split(":");
pemHeaders.put(header[0].trim(), header[1].trim());
} else {
sb.append(line.trim());
}
line = bReader.readLine();
}
if (null == line || OPENSSL_EC_FOOTER.equals(line.trim()) == false) {
throw new IOException("Malformed PEM file, PEM footer is invalid or missing");
}
byte[] keyBytes = possiblyDecryptPKCS1Key(pemHeaders, sb.toString(), passwordSupplier);
KeyFactory keyFactory = KeyFactory.getInstance("EC");
ECPrivateKeySpec ecSpec = parseEcDer(keyBytes);
return keyFactory.generatePrivate(ecSpec);
}
|
Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an EC private key encoded in
OpenSSL traditional format.
@param bReader the {@link BufferedReader} containing the key file contents
@param passwordSupplier A password supplier for the potentially encrypted (password protected) key
@return {@link PrivateKey}
@throws IOException if the file can't be read
@throws GeneralSecurityException if the private key can't be generated from the {@link ECPrivateKeySpec}
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
| 263
|
[
"bReader",
"passwordSupplier"
] |
PrivateKey
| true
| 6
| 7.44
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getResult
|
public static <T> T getResult(Future<T> future, long timeoutMs) {
try {
return future.get(timeoutMs, TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
if (e.getCause() instanceof IllegalStateException)
throw (IllegalStateException) e.getCause();
throw maybeWrapAsKafkaException(e.getCause());
} catch (InterruptedException e) {
throw new InterruptException(e);
} catch (java.util.concurrent.TimeoutException e) {
throw new TimeoutException(e);
}
}
|
Update subscription state and metadata using the provided committed offsets:
<li>Update partition offsets with the committed offsets</li>
<li>Update the metadata with any newer leader epoch discovered in the committed offsets
metadata</li>
</p>
This will ignore any partition included in the <code>offsetsAndMetadata</code> parameter that
may no longer be assigned.
@param offsetsAndMetadata Committed offsets and metadata to be used for updating the
subscription state and metadata object.
@param metadata Metadata object to update with a new leader epoch if discovered in the
committed offsets' metadata.
@param subscriptions Subscription state to update, setting partitions' offsets to the
committed offsets.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java
| 219
|
[
"future",
"timeoutMs"
] |
T
| true
| 5
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
size
|
public int size() {
return tracked.size();
}
|
It is possible for the {@link AsyncKafkaConsumer#close() consumer to close} before completing the processing of
all the events in the queue. In this case, we need to
{@link CompletableFuture#completeExceptionally(Throwable) expire} any remaining events.
<p/>
Check each of the {@link #add(CompletableEvent) previously-added} {@link CompletableEvent completable events},
and for any that are incomplete, expire them. Also check the core event queue for any incomplete events and
likewise expire them.
<p/>
<em>Note</em>: because this is called in the context of {@link AsyncKafkaConsumer#close() closing consumer},
don't take the deadline into consideration, just close it regardless.
@param events Events from a queue that have not yet been tracked that also need to be reviewed
@return The number of events that were expired
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java
| 155
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
merge
|
protected abstract Configurations merge(Set<Class<?>> mergedClasses);
|
Merge configurations.
@param mergedClasses the merged classes
@return a new configurations instance (must be of the same type as this instance)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/annotation/Configurations.java
| 123
|
[
"mergedClasses"
] |
Configurations
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
nop
|
@SuppressWarnings("unchecked")
static <R, E extends Throwable> FailableIntFunction<R, E> nop() {
return NOP;
}
|
Gets the NOP singleton.
@param <R> Return type.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableIntFunction.java
| 43
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
appendExportStatement
|
function appendExportStatement(statements: Statement[] | undefined, seen: IdentifierNameMap<boolean>, exportName: ModuleExportName, expression: Expression, location?: TextRange, allowComments?: boolean, liveBinding?: boolean): Statement[] | undefined {
if (exportName.kind !== SyntaxKind.StringLiteral) {
if (seen.has(exportName)) {
return statements;
}
seen.set(exportName, true);
}
statements = append(statements, createExportStatement(exportName, expression, location, allowComments, liveBinding));
return statements;
}
|
Appends the down-level representation of an export to a statement list, returning the
statement list.
@param statements A statement list to which the down-level export statements are to be
appended. If `statements` is `undefined`, a new array is allocated if statements are
appended.
@param exportName The name of the export.
@param expression The expression to export.
@param location The location to use for source maps and comments for the export.
@param allowComments Whether to allow comments on the export.
|
typescript
|
src/compiler/transformers/module/module.ts
| 2,131
|
[
"statements",
"seen",
"exportName",
"expression",
"location?",
"allowComments?",
"liveBinding?"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
parseObjectsInArray
|
private static <Value, Context, T> void parseObjectsInArray(
Consumer<Value> orderedModeCallback,
ParseField field,
BiFunction<XContentParser, Context, T> objectParser,
XContentParser p,
Value v,
Context c,
List<T> fields
) throws IOException {
orderedModeCallback.accept(v);
XContentParser.Token token;
while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.START_OBJECT) {
throw wrapCanBeObjectOrArrayOfObjects(field, p);
}
p.nextToken(); // Move to the first field in the object
fields.add(objectParser.apply(p, c));
p.nextToken(); // Move past the object, should be back to into the array
if (p.currentToken() != XContentParser.Token.END_OBJECT) {
throw wrapCanBeObjectOrArrayOfObjects(field, p);
}
}
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 519
|
[
"orderedModeCallback",
"field",
"objectParser",
"p",
"v",
"c",
"fields"
] |
void
| true
| 4
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getFirstExpression
|
function getFirstExpression(code, startColumn) {
// Lazy load acorn.
if (tokenizer === undefined) {
const Parser = require('internal/deps/acorn/acorn/dist/acorn').Parser;
tokenizer = FunctionPrototypeBind(Parser.tokenizer, Parser);
}
let lastToken;
let firstMemberAccessNameToken;
let terminatingCol;
let parenLvl = 0;
// Tokenize the line to locate the expression at the startColumn.
// The source line may be an incomplete JavaScript source, so do not parse the source line.
for (const token of tokenizer(code, { ecmaVersion: 'latest' })) {
// Peek before the startColumn.
if (token.start < startColumn) {
// There is a semicolon. This is a statement before the startColumn, so reset the memo.
if (token.type.label === ';') {
firstMemberAccessNameToken = null;
continue;
}
// Try to memo the member access expressions before the startColumn, so that the
// returned source code contains more info:
// assert.ok(value)
// ^ startColumn
// The member expression can also be like
// assert['ok'](value) or assert?.ok(value)
// ^ startColumn ^ startColumn
if (memberAccessTokens.includes(token.type.label) && lastToken?.type.label === 'name') {
// First member access name token must be a 'name'.
firstMemberAccessNameToken ??= lastToken;
} else if (!memberAccessTokens.includes(token.type.label) &&
!memberNameTokens.includes(token.type.label)) {
// Reset the memo if it is not a simple member access.
// For example: assert[(() => 'ok')()](value)
// ^ startColumn
firstMemberAccessNameToken = null;
}
lastToken = token;
continue;
}
// Now after the startColumn, this must be an expression.
if (token.type.label === '(') {
parenLvl++;
continue;
}
if (token.type.label === ')') {
parenLvl--;
if (parenLvl === 0) {
// A matched closing parenthesis found after the startColumn,
// terminate here. Include the token.
// (assert.ok(false), assert.ok(true))
// ^ startColumn
terminatingCol = token.start + 1;
break;
}
continue;
}
if (token.type.label === ';') {
// A semicolon found after the startColumn, terminate here.
// assert.ok(false); assert.ok(true));
// ^ startColumn
terminatingCol = token;
break;
}
// If no semicolon found after the startColumn. The string after the
// startColumn must be the expression.
// assert.ok(false)
// ^ startColumn
}
const start = firstMemberAccessNameToken?.start ?? startColumn;
return StringPrototypeSlice(code, start, terminatingCol);
}
|
Get the first expression in a code string at the startColumn.
@param {string} code source code line
@param {number} startColumn which column the error is constructed
@returns {string}
|
javascript
|
lib/internal/errors/error_source.js
| 70
|
[
"code",
"startColumn"
] | false
| 13
| 6
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
get_bundle
|
def get_bundle(self, name: str, version: str | None = None) -> BaseDagBundle:
"""
Get a DAG bundle by name.
:param name: The name of the DAG bundle.
:param version: The version of the DAG bundle you need (optional). If not provided, ``tracking_ref`` will be used instead.
:return: The DAG bundle.
"""
cfg_bundle = self._bundle_config.get(name)
if not cfg_bundle:
raise ValueError(f"Requested bundle '{name}' is not configured.")
return cfg_bundle.bundle_class(name=name, version=version, **cfg_bundle.kwargs)
|
Get a DAG bundle by name.
:param name: The name of the DAG bundle.
:param version: The version of the DAG bundle you need (optional). If not provided, ``tracking_ref`` will be used instead.
:return: The DAG bundle.
|
python
|
airflow-core/src/airflow/dag_processing/bundles/manager.py
| 325
|
[
"self",
"name",
"version"
] |
BaseDagBundle
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
close
|
private void close() {
logger.get().log(FINER, "closing {0}", this);
closeables.close();
}
|
Attempts to cancel execution of this step. This attempt will fail if the step has already
completed, has already been cancelled, or could not be cancelled for some other reason. If
successful, and this step has not started when {@code cancel} is called, this step should never
run.
<p>If successful, causes the objects captured by this step (if already started) and its input
step(s) for later closing to be closed on their respective {@link Executor}s. If any such calls
specified {@link MoreExecutors#directExecutor()}, those objects will be closed synchronously.
@param mayInterruptIfRunning {@code true} if the thread executing this task should be
interrupted; otherwise, in-progress tasks are allowed to complete, but the step will be
cancelled regardless
@return {@code false} if the step could not be cancelled, typically because it has already
completed normally; {@code true} otherwise
|
java
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
| 1,097
|
[] |
void
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
_quantile
|
def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:
"""
Compute the quantiles of self for each quantile in `qs`.
Parameters
----------
qs : np.ndarray[float64]
interpolation: str
Returns
-------
same type as self
"""
pa_dtype = self._pa_array.type
data = self._pa_array
if pa.types.is_temporal(pa_dtype):
# https://github.com/apache/arrow/issues/33769 in these cases
# we can cast to ints and back
nbits = pa_dtype.bit_width
if nbits == 32:
data = data.cast(pa.int32())
else:
data = data.cast(pa.int64())
result = pc.quantile(data, q=qs, interpolation=interpolation)
if pa.types.is_temporal(pa_dtype):
if pa.types.is_floating(result.type):
result = pc.floor(result)
nbits = pa_dtype.bit_width
if nbits == 32:
result = result.cast(pa.int32())
else:
result = result.cast(pa.int64())
result = result.cast(pa_dtype)
return self._from_pyarrow_array(result)
|
Compute the quantiles of self for each quantile in `qs`.
Parameters
----------
qs : np.ndarray[float64]
interpolation: str
Returns
-------
same type as self
|
python
|
pandas/core/arrays/arrow/array.py
| 2,355
|
[
"self",
"qs",
"interpolation"
] |
Self
| true
| 8
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
assertOpen
|
public void assertOpen(Supplier<String> message) {
if (isClosed.get())
throw new IllegalStateException(message.get());
}
|
This method serves as an assert that the {@link IdempotentCloser} is still open. If it is open, this method
simply returns. If it is closed, a new {@link IllegalStateException} will be thrown using the supplied message.
@param message {@link Supplier} that supplies the message for the exception
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/IdempotentCloser.java
| 91
|
[
"message"
] |
void
| true
| 2
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
loadNestedDirectory
|
private static ZipContent loadNestedDirectory(Source source, ZipContent zip, Entry directoryEntry)
throws IOException {
debug.log("Loading nested directory entry '%s' from '%s'", source.nestedEntryName(), source.path());
if (!source.nestedEntryName().endsWith("/")) {
throw new IllegalArgumentException("Nested entry name must end with '/'");
}
String directoryName = directoryEntry.getName();
zip.data.open();
try {
Loader loader = new Loader(source, directoryEntry, zip.data, zip.centralDirectoryPos, zip.size());
for (int cursor = 0; cursor < zip.size(); cursor++) {
int index = zip.lookupIndexes[cursor];
if (index != directoryEntry.getLookupIndex()) {
long pos = zip.getCentralDirectoryFileHeaderRecordPos(index);
ZipCentralDirectoryFileHeaderRecord centralRecord = ZipCentralDirectoryFileHeaderRecord
.load(zip.data, pos);
long namePos = pos + ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET;
short nameLen = centralRecord.fileNameLength();
if (ZipString.startsWith(loader.buffer, zip.data, namePos, nameLen, directoryName) != -1) {
loader.add(centralRecord, pos, true);
}
}
}
return loader.finish(Kind.NESTED_DIRECTORY, zip.commentPos, zip.commentLength, zip.hasJarSignatureFile);
}
catch (IOException | RuntimeException ex) {
zip.data.close();
throw ex;
}
}
|
Returns the location in the data that the archive actually starts. For most
files the archive data will start at 0, however, it is possible to have
prefixed bytes (often used for startup scripts) at the beginning of the data.
@param data the source data
@param eocd the end of central directory record
@param zip64Eocd the zip64 end of central directory record or {@code null}
@return the offset within the data where the archive begins
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 659
|
[
"source",
"zip",
"directoryEntry"
] |
ZipContent
| true
| 6
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
from
|
public <T> Source<T> from(Supplier<? extends @Nullable T> supplier) {
Assert.notNull(supplier, "'supplier' must not be null");
Source<T> source = getSource(supplier);
if (this.sourceOperator != null) {
source = this.sourceOperator.apply(source);
}
return source;
}
|
Return a new {@link Source} from the specified value supplier that can be used to
perform the mapping.
@param <T> the source type
@param supplier the value supplier
@return a {@link Source} that can be used to complete the mapping
@see #from(Object)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 108
|
[
"supplier"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
fit
|
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
X = validate_data(self, X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
|
Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
|
python
|
sklearn/cluster/_mean_shift.py
| 470
|
[
"self",
"X",
"y"
] | false
| 12
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
getMessage
|
@Override
public String getMessage() {
// requireNonNull is safe because ExampleStackTrace sets a non-null message.
StringBuilder message = new StringBuilder(requireNonNull(super.getMessage()));
for (Throwable t = conflictingStackTrace; t != null; t = t.getCause()) {
message.append(", ").append(t.getMessage());
}
return message.toString();
}
|
Appends the chain of messages from the {@code conflictingStackTrace} to the original {@code
message}.
|
java
|
android/guava/src/com/google/common/util/concurrent/CycleDetectingLockFactory.java
| 550
|
[] |
String
| true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
max
|
@ParametricNullness
public <E extends T> E max(Iterator<E> iterator) {
// let this throw NoSuchElementException as necessary
E maxSoFar = iterator.next();
while (iterator.hasNext()) {
maxSoFar = this.<E>max(maxSoFar, iterator.next());
}
return maxSoFar;
}
|
Returns the greatest of the specified values according to this ordering. If there are multiple
greatest values, the first of those is returned. The iterator will be left exhausted: its
{@code hasNext()} method will return {@code false}.
<p><b>Java 8+ users:</b> Use {@code Streams.stream(iterator).max(thisComparator).get()} instead
(but note that it does not guarantee which tied maximum element is returned).
@param iterator the iterator whose maximum element is to be determined
@throws NoSuchElementException if {@code iterator} is empty
@throws ClassCastException if the parameters are not <i>mutually comparable</i> under this
ordering.
@since 11.0
|
java
|
android/guava/src/com/google/common/collect/Ordering.java
| 652
|
[
"iterator"
] |
E
| true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
toStringYesNo
|
public static String toStringYesNo(final boolean bool) {
return toString(bool, YES, NO);
}
|
Converts a boolean to a String returning {@code 'yes'}
or {@code 'no'}.
<pre>
BooleanUtils.toStringYesNo(true) = "yes"
BooleanUtils.toStringYesNo(false) = "no"
</pre>
@param bool the Boolean to check
@return {@code 'yes'}, {@code 'no'}, or {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 1,122
|
[
"bool"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
format
|
@Override
public String format(final Date date) {
final Calendar c = newCalendar();
c.setTime(date);
return applyRulesToString(c);
}
|
Compares two objects for equality.
@param obj the object to compare to.
@return {@code true} if equal.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,152
|
[
"date"
] |
String
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
put
|
public JSONArray put(double value) throws JSONException {
this.values.add(JSON.checkDouble(value));
return this;
}
|
Appends {@code value} to the end of this array.
@param value a finite value. May not be {@link Double#isNaN() NaNs} or
{@link Double#isInfinite() infinities}.
@return this array.
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 145
|
[
"value"
] |
JSONArray
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
containsPreprocessorDirectives
|
static bool containsPreprocessorDirectives(const RecordDecl *Decl,
const SourceManager &SrcMgr,
const LangOptions &LangOpts) {
std::pair<FileID, unsigned> FileAndOffset =
SrcMgr.getDecomposedLoc(Decl->field_begin()->getBeginLoc());
assert(!Decl->field_empty());
auto LastField = Decl->field_begin();
while (std::next(LastField) != Decl->field_end())
++LastField;
unsigned EndOffset = SrcMgr.getFileOffset(LastField->getEndLoc());
StringRef SrcBuffer = SrcMgr.getBufferData(FileAndOffset.first);
Lexer L(SrcMgr.getLocForStartOfFile(FileAndOffset.first), LangOpts,
SrcBuffer.data(), SrcBuffer.data() + FileAndOffset.second,
SrcBuffer.data() + SrcBuffer.size());
IdentifierTable Identifiers(LangOpts);
clang::Token T;
while (!L.LexFromRawLexer(T) && L.getCurrentBufferOffset() < EndOffset) {
if (T.getKind() == tok::hash) {
L.LexFromRawLexer(T);
if (T.getKind() == tok::raw_identifier) {
clang::IdentifierInfo &II = Identifiers.get(T.getRawIdentifier());
if (II.getPPKeywordID() != clang::tok::pp_not_keyword)
return true;
}
}
}
return false;
}
|
\returns nullptr if the name is ambiguous or not found.
|
cpp
|
clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp
| 83
|
[] | true
| 7
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
of
|
public static <E extends Throwable> Duration of(final FailableConsumer<Instant, E> consumer) throws E {
return since(now(consumer::accept));
}
|
Runs the lambda and returns the duration of its execution.
@param <E> The type of exception throw by the lambda.
@param consumer What to execute.
@return The Duration of execution.
@throws E thrown by the lambda.
@see StopWatch
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationUtils.java
| 169
|
[
"consumer"
] |
Duration
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
parseOptional
|
function parseOptional(t: SyntaxKind): boolean {
if (token() === t) {
nextToken();
return true;
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 2,515
|
[
"t"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
generate_private_key
|
def generate_private_key(key_type: str = "RSA", key_size: int = 2048):
"""
Generate a valid private key for testing.
Args:
key_type (str): Type of key to generate. Can be "RSA" or "Ed25516". Defaults to "RSA".
key_size (int): Size of the key in bits. Only applicable for RSA keys. Defaults to 2048.
Returns:
tuple: A tuple containing the private key in PEM format and the corresponding public key in PEM format.
"""
from cryptography.hazmat.primitives.asymmetric import ed25519, rsa
if key_type == "RSA":
# Generate an RSA private key
return rsa.generate_private_key(public_exponent=65537, key_size=key_size, backend=default_backend())
if key_type == "Ed25519":
return ed25519.Ed25519PrivateKey.generate()
raise ValueError(f"unsupported key type: {key_type}")
|
Generate a valid private key for testing.
Args:
key_type (str): Type of key to generate. Can be "RSA" or "Ed25516". Defaults to "RSA".
key_size (int): Size of the key in bits. Only applicable for RSA keys. Defaults to 2048.
Returns:
tuple: A tuple containing the private key in PEM format and the corresponding public key in PEM format.
|
python
|
airflow-core/src/airflow/api_fastapi/auth/tokens.py
| 462
|
[
"key_type",
"key_size"
] | true
| 3
| 8.24
|
apache/airflow
| 43,597
|
google
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.