function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
defaultResourceNameForMethod
|
private static String defaultResourceNameForMethod(String methodName) {
if (methodName.startsWith("set") && methodName.length() > 3) {
return StringUtils.uncapitalizeAsProperty(methodName.substring(3));
}
return methodName;
}
|
Create a new {@link ResourceMethodResolver} for the specified method
and resource name.
@param methodName the method name
@param parameterType the parameter type
@param resourceName the resource name
@return a new {@link ResourceMethodResolver} instance
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ResourceElementResolver.java
| 106
|
[
"methodName"
] |
String
| true
| 3
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parenthesizeConditionOfConditionalExpression
|
function parenthesizeConditionOfConditionalExpression(condition: Expression): Expression {
const conditionalPrecedence = getOperatorPrecedence(SyntaxKind.ConditionalExpression, SyntaxKind.QuestionToken);
const emittedCondition = skipPartiallyEmittedExpressions(condition);
const conditionPrecedence = getExpressionPrecedence(emittedCondition);
if (compareValues(conditionPrecedence, conditionalPrecedence) !== Comparison.GreaterThan) {
return factory.createParenthesizedExpression(condition);
}
return condition;
}
|
Wraps the operand to a BinaryExpression in parentheses if they are needed to preserve the intended
order of operations.
@param binaryOperator The operator for the BinaryExpression.
@param operand The operand for the BinaryExpression.
@param isLeftSideOfBinary A value indicating whether the operand is the left side of the
BinaryExpression.
|
typescript
|
src/compiler/factory/parenthesizerRules.ts
| 324
|
[
"condition"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getAdditionalModulePaths
|
function getAdditionalModulePaths(options = {}) {
const baseUrl = options.baseUrl;
if (!baseUrl) {
return '';
}
const baseUrlResolved = path.resolve(paths.appPath, baseUrl);
// We don't need to do anything if `baseUrl` is set to `node_modules`. This is
// the default behavior.
if (path.relative(paths.appNodeModules, baseUrlResolved) === '') {
return null;
}
// Allow the user set the `baseUrl` to `appSrc`.
if (path.relative(paths.appSrc, baseUrlResolved) === '') {
return [paths.appSrc];
}
// If the path is equal to the root directory we ignore it here.
// We don't want to allow importing from the root directly as source files are
// not transpiled outside of `src`. We do allow importing them with the
// absolute path (e.g. `src/Components/Button.js`) but we set that up with
// an alias.
if (path.relative(paths.appPath, baseUrlResolved) === '') {
return null;
}
// Otherwise, throw an error.
throw new Error(
chalk.red.bold(
"Your project's `baseUrl` can only be set to `src` or `node_modules`." +
' Create React App does not support other values at this time.'
)
);
}
|
Get additional module paths based on the baseUrl of a compilerOptions object.
@param {Object} options
|
javascript
|
fixtures/flight/config/modules.js
| 14
|
[] | false
| 5
| 6.08
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
maybe_prepare_scalar_for_op
|
def maybe_prepare_scalar_for_op(obj, shape: Shape):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
shape : tuple[int]
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
"""
if type(obj) is datetime.timedelta:
# GH#22390 cast up to Timedelta to rely on Timedelta
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return Timedelta(obj)
elif type(obj) is datetime.datetime:
# cast up to Timestamp to rely on Timestamp implementation, see Timedelta above
return Timestamp(obj)
elif isinstance(obj, np.datetime64):
# GH#28080 numpy casts integer-dtype to datetime64 when doing
# array[int] + datetime64, which we do not allow
if isna(obj):
from pandas.core.arrays import DatetimeArray
# Avoid possible ambiguities with pd.NaT
# GH 52295
if is_unitless(obj.dtype):
# Use second resolution to ensure that the result of e.g.
# `left - np.datetime64("NaT")` retains the unit of left.unit
obj = obj.astype("datetime64[s]")
elif not is_supported_dtype(obj.dtype):
new_dtype = get_supported_dtype(obj.dtype)
obj = obj.astype(new_dtype)
right = np.broadcast_to(obj, shape)
return DatetimeArray._simple_new(right, dtype=right.dtype)
return Timestamp(obj)
elif isinstance(obj, np.timedelta64):
if isna(obj):
from pandas.core.arrays import TimedeltaArray
# wrapping timedelta64("NaT") in Timedelta returns NaT,
# which would incorrectly be treated as a datetime-NaT, so
# we broadcast and wrap in a TimedeltaArray
# GH 52295
if is_unitless(obj.dtype):
# Use second resolution to ensure that the result of e.g.
# `left + np.timedelta64("NaT")` retains the unit of left.unit
obj = obj.astype("timedelta64[s]")
elif not is_supported_dtype(obj.dtype):
new_dtype = get_supported_dtype(obj.dtype)
obj = obj.astype(new_dtype)
right = np.broadcast_to(obj, shape)
return TimedeltaArray._simple_new(right, dtype=right.dtype)
# In particular non-nanosecond timedelta64 needs to be cast to
# nanoseconds, or else we get undesired behavior like
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
return Timedelta(obj)
# We want NumPy numeric scalars to behave like Python scalars
# post NEP 50
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
|
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
shape : tuple[int]
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
|
python
|
pandas/core/ops/array_ops.py
| 512
|
[
"obj",
"shape"
] | true
| 13
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
_block_info_recursion
|
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim : int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth + 1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
|
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim : int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
|
python
|
numpy/_core/shape_base.py
| 695
|
[
"arrays",
"max_depth",
"result_ndim",
"depth"
] | false
| 3
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
addIfHasValue
|
private void addIfHasValue(Properties properties, String name, @Nullable String value) {
if (StringUtils.hasText(value)) {
properties.put(name, value);
}
}
|
Creates a new {@code BuildPropertiesWriter} that will write to the given
{@code outputFile}.
@param outputFile the output file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/BuildPropertiesWriter.java
| 91
|
[
"properties",
"name",
"value"
] |
void
| true
| 2
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_check_skiprows_func
|
def _check_skiprows_func(
self,
skiprows: Callable,
rows_to_use: int,
) -> int:
"""
Determine how many file rows are required to obtain `nrows` data
rows when `skiprows` is a function.
Parameters
----------
skiprows : function
The function passed to read_excel by the user.
rows_to_use : int
The number of rows that will be needed for the header and
the data.
Returns
-------
int
"""
i = 0
rows_used_so_far = 0
while rows_used_so_far < rows_to_use:
if not skiprows(i):
rows_used_so_far += 1
i += 1
return i
|
Determine how many file rows are required to obtain `nrows` data
rows when `skiprows` is a function.
Parameters
----------
skiprows : function
The function passed to read_excel by the user.
rows_to_use : int
The number of rows that will be needed for the header and
the data.
Returns
-------
int
|
python
|
pandas/io/excel/_base.py
| 607
|
[
"self",
"skiprows",
"rows_to_use"
] |
int
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_exchange
|
def get_exchange(conn, name=EVENT_EXCHANGE_NAME):
"""Get exchange used for sending events.
Arguments:
conn (kombu.Connection): Connection used for sending/receiving events.
name (str): Name of the exchange. Default is ``celeryev``.
Note:
The event type changes if Redis is used as the transport
(from topic -> fanout).
"""
ex = copy(event_exchange)
if conn.transport.driver_type in {'redis', 'gcpubsub'}:
# quick hack for Issue #436
ex.type = 'fanout'
if name != ex.name:
ex.name = name
return ex
|
Get exchange used for sending events.
Arguments:
conn (kombu.Connection): Connection used for sending/receiving events.
name (str): Name of the exchange. Default is ``celeryev``.
Note:
The event type changes if Redis is used as the transport
(from topic -> fanout).
|
python
|
celery/events/event.py
| 46
|
[
"conn",
"name"
] | false
| 3
| 6.24
|
celery/celery
| 27,741
|
google
| false
|
|
shouldEmitAliasDeclaration
|
function shouldEmitAliasDeclaration(node: Node): boolean {
return compilerOptions.verbatimModuleSyntax || isInJSFile(node) || resolver.isReferencedAliasDeclaration(node);
}
|
Hooks node substitutions.
@param hint A hint as to the intended usage of the node.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,743
|
[
"node"
] | true
| 3
| 6.64
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
fuzz_spec_custom
|
def fuzz_spec_custom(self):
"""
Generate a random Spec based on this template's distribution preferences.
Returns:
Spec: Either a TensorSpec or ScalarSpec according to template's distribution
"""
import random
from torchfuzz.tensor_fuzzer import fuzz_torch_tensor_type
# Get template's distribution configuration
distribution = self.spec_distribution()
# Get random dtype based on template
dtype = fuzz_torch_tensor_type("default")
# Validate distribution configuration
allow_tensors = distribution.get("allow_tensors", True)
allow_scalars = distribution.get("allow_scalars", True)
if not allow_tensors and not allow_scalars:
raise ValueError("Template must allow at least one of tensors or scalars")
# Determine which type to generate
if not allow_scalars:
# Only tensors allowed
return self._generate_tensor_spec(dtype)
elif not allow_tensors:
# Only scalars allowed
return self._generate_scalar_spec(dtype)
else:
# Both allowed, use probability distribution
tensor_prob = distribution.get("tensor_prob", 0.8)
if random.random() < tensor_prob:
return self._generate_tensor_spec(dtype)
else:
return self._generate_scalar_spec(dtype)
|
Generate a random Spec based on this template's distribution preferences.
Returns:
Spec: Either a TensorSpec or ScalarSpec according to template's distribution
|
python
|
tools/experimental/torchfuzz/codegen.py
| 49
|
[
"self"
] | false
| 8
| 6.64
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
resolveConstructorArguments
|
protected List<Object> resolveConstructorArguments(Object[] args, int start, int end) {
Object[] constructorArgs = Arrays.copyOfRange(args, start, end);
for (int i = 0; i < constructorArgs.length; i++) {
if (constructorArgs[i] instanceof GString) {
constructorArgs[i] = constructorArgs[i].toString();
}
else if (constructorArgs[i] instanceof List<?> list) {
constructorArgs[i] = manageListIfNecessary(list);
}
else if (constructorArgs[i] instanceof Map<?, ?> map){
constructorArgs[i] = manageMapIfNecessary(map);
}
}
return List.of(constructorArgs);
}
|
This method is called when a bean definition node is called.
@param beanName the name of the bean to define
@param args the arguments to the bean. The first argument is the class name, the last
argument is sometimes a closure. All the arguments in between are constructor arguments.
@return the bean definition wrapper
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
| 545
|
[
"args",
"start",
"end"
] | true
| 5
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
cancel_task_execution
|
def cancel_task_execution(self, task_execution_arn: str) -> None:
"""
Cancel a TaskExecution for the specified ``task_execution_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.cancel_task_execution`
:param task_execution_arn: TaskExecutionArn.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
"""
if not task_execution_arn:
raise AirflowBadRequest("task_execution_arn not specified")
self.get_conn().cancel_task_execution(TaskExecutionArn=task_execution_arn)
|
Cancel a TaskExecution for the specified ``task_execution_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.cancel_task_execution`
:param task_execution_arn: TaskExecutionArn.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
| 238
|
[
"self",
"task_execution_arn"
] |
None
| true
| 2
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
asSupplier
|
@SuppressWarnings("unchecked")
public static <R> Supplier<R> asSupplier(final Method method) {
return asInterfaceInstance(Supplier.class, method);
}
|
Produces a {@link Supplier} for a given a <em>supplier</em> Method. The Supplier return type must match the method's
return type.
<p>
Only works with static methods.
</p>
@param <R> The Method return type.
@param method the method to invoke.
@return a correctly-typed wrapper for the given target.
|
java
|
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
| 224
|
[
"method"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
iterator
|
public static Iterator<Calendar> iterator(final Calendar calendar, final int rangeStyle) {
Objects.requireNonNull(calendar, "calendar");
final Calendar start;
final Calendar end;
int startCutoff = Calendar.SUNDAY;
int endCutoff = Calendar.SATURDAY;
switch (rangeStyle) {
case RANGE_MONTH_SUNDAY:
case RANGE_MONTH_MONDAY:
//Set start to the first of the month
start = truncate(calendar, Calendar.MONTH);
//Set end to the last of the month
end = (Calendar) start.clone();
end.add(Calendar.MONTH, 1);
end.add(Calendar.DATE, -1);
//Loop start back to the previous sunday or monday
if (rangeStyle == RANGE_MONTH_MONDAY) {
startCutoff = Calendar.MONDAY;
endCutoff = Calendar.SUNDAY;
}
break;
case RANGE_WEEK_SUNDAY:
case RANGE_WEEK_MONDAY:
case RANGE_WEEK_RELATIVE:
case RANGE_WEEK_CENTER:
//Set start and end to the current date
start = truncate(calendar, Calendar.DATE);
end = truncate(calendar, Calendar.DATE);
switch (rangeStyle) {
case RANGE_WEEK_SUNDAY:
//already set by default
break;
case RANGE_WEEK_MONDAY:
startCutoff = Calendar.MONDAY;
endCutoff = Calendar.SUNDAY;
break;
case RANGE_WEEK_RELATIVE:
startCutoff = calendar.get(Calendar.DAY_OF_WEEK);
endCutoff = startCutoff - 1;
break;
case RANGE_WEEK_CENTER:
startCutoff = calendar.get(Calendar.DAY_OF_WEEK) - 3;
endCutoff = calendar.get(Calendar.DAY_OF_WEEK) + 3;
break;
default:
break;
}
break;
default:
throw new IllegalArgumentException("The range style " + rangeStyle + " is not valid.");
}
if (startCutoff < Calendar.SUNDAY) {
startCutoff += 7;
}
if (startCutoff > Calendar.SATURDAY) {
startCutoff -= 7;
}
if (endCutoff < Calendar.SUNDAY) {
endCutoff += 7;
}
if (endCutoff > Calendar.SATURDAY) {
endCutoff -= 7;
}
while (start.get(Calendar.DAY_OF_WEEK) != startCutoff) {
start.add(Calendar.DATE, -1);
}
while (end.get(Calendar.DAY_OF_WEEK) != endCutoff) {
end.add(Calendar.DATE, 1);
}
return new DateIterator(start, end);
}
|
Constructs an {@link Iterator} over each day in a date
range defined by a focus date and range style.
<p>For instance, passing Thursday, July 4, 2002 and a
{@code RANGE_MONTH_SUNDAY} will return an {@link Iterator}
that starts with Sunday, June 30, 2002 and ends with Saturday, August 3,
2002, returning a Calendar instance for each intermediate day.</p>
<p>This method provides an iterator that returns Calendar objects.
The days are progressed using {@link Calendar#add(int, int)}.</p>
@param calendar the date to work with, not null.
@param rangeStyle the style constant to use. Must be one of
{@link DateUtils#RANGE_MONTH_SUNDAY},
{@link DateUtils#RANGE_MONTH_MONDAY},
{@link DateUtils#RANGE_WEEK_SUNDAY},
{@link DateUtils#RANGE_WEEK_MONDAY},
{@link DateUtils#RANGE_WEEK_RELATIVE},
{@link DateUtils#RANGE_WEEK_CENTER}.
@return the date iterator, not null.
@throws NullPointerException if calendar is {@code null}.
@throws IllegalArgumentException if the rangeStyle is invalid.
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 971
|
[
"calendar",
"rangeStyle"
] | true
| 8
| 7.52
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
value
|
public String value() {
return value;
}
|
Returns real password string
@return real password string
|
java
|
clients/src/main/java/org/apache/kafka/common/config/types/Password.java
| 64
|
[] |
String
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
leave
|
public void leave() {
ReentrantLock lock = this.lock;
try {
// No need to signal if we will still be holding the lock when we return
if (lock.getHoldCount() == 1) {
signalNextWaiter();
}
} finally {
lock.unlock(); // Will throw IllegalMonitorStateException if not held
}
}
|
Leaves this monitor. May be called only by a thread currently occupying this monitor.
|
java
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
| 939
|
[] |
void
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
getSystemThreadGroup
|
public static ThreadGroup getSystemThreadGroup() {
ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
while (threadGroup != null && threadGroup.getParent() != null) {
threadGroup = threadGroup.getParent();
}
return threadGroup;
}
|
Gets the system thread group (sometimes also referred as "root thread group").
<p>
This method returns null if this thread has died (been stopped).
</p>
@return the system thread group.
@throws SecurityException if the current thread cannot modify thread groups from this thread's thread group up to the system thread group.
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 462
|
[] |
ThreadGroup
| true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
setupQuic
|
function setupQuic() {
if (!getOptionValue('--experimental-quic')) {
return;
}
const { BuiltinModule } = require('internal/bootstrap/realm');
BuiltinModule.allowRequireByUsers('quic');
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 390
|
[] | false
| 2
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
putIfHasLength
|
private void putIfHasLength(Attributes attributes, String name, @Nullable String value) {
if (StringUtils.hasLength(value)) {
attributes.putValue(name, value);
}
}
|
Return the {@link File} to use to back up the original source.
@return the file to use to back up the original source
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 431
|
[
"attributes",
"name",
"value"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getIdentifierToken
|
function getIdentifierToken(): SyntaxKind.Identifier | KeywordSyntaxKind {
// Reserved words are between 2 and 12 characters long and start with a lowercase letter
const len = tokenValue.length;
if (len >= 2 && len <= 12) {
const ch = tokenValue.charCodeAt(0);
if (ch >= CharacterCodes.a && ch <= CharacterCodes.z) {
const keyword = textToKeyword.get(tokenValue);
if (keyword !== undefined) {
return token = keyword;
}
}
}
return token = SyntaxKind.Identifier;
}
|
Sets the current 'tokenValue' and returns a NoSubstitutionTemplateLiteral or
a literal component of a TemplateExpression.
|
typescript
|
src/compiler/scanner.ts
| 1,815
|
[] | true
| 6
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
title
|
def title(a):
"""
Return element-wise title cased version of string or unicode.
Title case words start with uppercase characters, all remaining cased
characters are lowercase.
Calls :meth:`str.title` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.title
Examples
--------
>>> import numpy as np
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
dtype='|S5')
>>> np.strings.title(c)
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
dtype='|S5')
"""
a_arr = np.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'title')
|
Return element-wise title cased version of string or unicode.
Title case words start with uppercase characters, all remaining cased
characters are lowercase.
Calls :meth:`str.title` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.title
Examples
--------
>>> import numpy as np
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
dtype='|S5')
>>> np.strings.title(c)
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
dtype='|S5')
|
python
|
numpy/_core/strings.py
| 1,244
|
[
"a"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
match
|
public boolean match(byte[] utf8Bytes, int offset, int length, GrokCaptureExtracter extracter) {
Matcher matcher = compiledExpression.matcher(utf8Bytes, offset, offset + length);
int result;
try {
matcherWatchdog.register(matcher);
result = matcher.search(offset, offset + length, Option.DEFAULT);
} finally {
matcherWatchdog.unregister(matcher);
}
if (result == Matcher.INTERRUPTED) {
throw new RuntimeException(
"grok pattern matching was interrupted after [" + matcherWatchdog.maxExecutionTimeInMillis() + "] ms"
);
}
if (result == Matcher.FAILED) {
return false;
}
extracter.extract(utf8Bytes, offset, matcher.getEagerRegion());
return true;
}
|
Matches and collects any named captures.
@param utf8Bytes array containing the text to match against encoded in utf-8
@param offset offset {@code utf8Bytes} of the start of the text
@param length length of the text to match
@param extracter collector for captures. {@link GrokCaptureConfig#nativeExtracter} can build these.
@return true if there was a match, false otherwise
@throws RuntimeException if there was a timeout
|
java
|
libs/grok/src/main/java/org/elasticsearch/grok/Grok.java
| 233
|
[
"utf8Bytes",
"offset",
"length",
"extracter"
] | true
| 3
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
repeat
|
public static String repeat(final char repeat, final int count) {
if (count <= 0) {
return EMPTY;
}
return new String(ArrayFill.fill(new char[count], repeat));
}
|
Returns padding using the specified delimiter repeated to a given length.
<pre>
StringUtils.repeat('e', 0) = ""
StringUtils.repeat('e', 3) = "eee"
StringUtils.repeat('e', -2) = ""
</pre>
<p>
Note: this method does not support padding with <a href="https://www.unicode.org/glossary/#supplementary_character">Unicode Supplementary Characters</a>
as they require a pair of {@code char}s to be represented. If you are needing to support full I18N of your applications consider using
{@link #repeat(String, int)} instead.
</p>
@param repeat character to repeat.
@param count number of times to repeat char, negative treated as zero.
@return String with repeated character.
@see #repeat(String, int)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,041
|
[
"repeat",
"count"
] |
String
| true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getImplicitLowerBounds
|
public static Type[] getImplicitLowerBounds(final WildcardType wildcardType) {
Objects.requireNonNull(wildcardType, "wildcardType");
final Type[] bounds = wildcardType.getLowerBounds();
return bounds.length == 0 ? new Type[] { null } : bounds;
}
|
Gets an array containing a single value of {@code null} if {@link WildcardType#getLowerBounds()} returns an empty array. Otherwise, it returns the result
of {@link WildcardType#getLowerBounds()}.
@param wildcardType the subject wildcard type, not {@code null}.
@return a non-empty array containing the lower bounds of the wildcard type, which could be null.
@throws NullPointerException if {@code wildcardType} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 664
|
[
"wildcardType"
] | true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
CONST
|
public static long CONST(final long v) {
return v;
}
|
Returns the provided value unchanged. This can prevent javac from inlining a constant field, e.g.,
<pre>
public final static long MAGIC_LONG = ObjectUtils.CONST(123L);
</pre>
This way any jars that refer to this field do not have to recompile themselves if the field's value changes at some future date.
@param v the long value to return.
@return the long v, unchanged.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 437
|
[
"v"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toPromLikeExpr
|
function toPromLikeExpr(labelBasedQuery: AbstractQuery): string {
const expr = labelBasedQuery.labelMatchers
.map((selector: AbstractLabelMatcher) => {
const operator = ToPromLikeMap[selector.operator];
if (operator) {
return `${selector.name}${operator}"${selector.value}"`;
} else {
return '';
}
})
.filter((e: string) => e !== '')
.join(', ');
return expr ? `{${expr}}` : '';
}
|
Adds metadata for synthetic metrics for which the API does not provide metadata.
See https://github.com/grafana/grafana/issues/22337 for details.
@param metadata HELP and TYPE metadata from /api/v1/metadata
|
typescript
|
packages/grafana-prometheus/src/language_utils.ts
| 259
|
[
"labelBasedQuery"
] | true
| 4
| 6.72
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
_padding_can_be_fused
|
def _padding_can_be_fused():
"""
Conservatively check if padding can be fused with downstream op.
1. if the downstream op is a sum, then there is little benefit to
do inplace padding
2. if the downstream op is a matmul, doing inplace padding can
save membw.
"""
current_node = V.graph.current_node
if current_node is None:
return True # be conservative
users = tuple(current_node.users)
if len(users) == 1 and users[0].target in (
aten.mm.default,
aten.addmm.default,
):
return False
return True # be conservative
|
Conservatively check if padding can be fused with downstream op.
1. if the downstream op is a sum, then there is little benefit to
do inplace padding
2. if the downstream op is a matmul, doing inplace padding can
save membw.
|
python
|
torch/_inductor/lowering.py
| 4,467
|
[] | false
| 4
| 6.08
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
didNotFind
|
public ItemsBuilder didNotFind(String article) {
return didNotFind(article, article);
}
|
Indicate that one or more results were not found. For example
{@code didNotFind("bean").items("x")} results in the message "did not find bean
x".
@param article the article found
@return an {@link ItemsBuilder}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 249
|
[
"article"
] |
ItemsBuilder
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
offer
|
public void offer(@ParametricNullness T elem) {
if (k == 0) {
return;
} else if (bufferSize == 0) {
buffer[0] = elem;
threshold = elem;
bufferSize = 1;
} else if (bufferSize < k) {
buffer[bufferSize++] = elem;
// uncheckedCastNullableTToT is safe because bufferSize > 0.
if (comparator.compare(elem, uncheckedCastNullableTToT(threshold)) > 0) {
threshold = elem;
}
// uncheckedCastNullableTToT is safe because bufferSize > 0.
} else if (comparator.compare(elem, uncheckedCastNullableTToT(threshold)) < 0) {
// Otherwise, we can ignore elem; we've seen k better elements.
buffer[bufferSize++] = elem;
if (bufferSize == 2 * k) {
trim();
}
}
}
|
Adds {@code elem} as a candidate for the top {@code k} elements. This operation takes amortized
O(1) time.
|
java
|
android/guava/src/com/google/common/collect/TopKSelector.java
| 137
|
[
"elem"
] |
void
| true
| 7
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
compute_dict_like
|
def compute_dict_like(
self,
op_name: Literal["agg", "apply"],
selected_obj: Series | DataFrame,
selection: Hashable | Sequence[Hashable],
kwargs: dict[str, Any],
) -> tuple[list[Hashable], list[Any]]:
"""
Compute agg/apply results for dict-like input.
Parameters
----------
op_name : {"agg", "apply"}
Operation being performed.
selected_obj : Series or DataFrame
Data to perform operation on.
selection : hashable or sequence of hashables
Used by GroupBy, Window, and Resample if selection is applied to the object.
kwargs : dict
Keyword arguments to pass to the functions.
Returns
-------
keys : list[hashable]
Index labels for result.
results : list
Data for result. When aggregating with a Series, this can contain any
Python object.
"""
from pandas.core.groupby.generic import (
DataFrameGroupBy,
SeriesGroupBy,
)
obj = self.obj
is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy))
func = cast(AggFuncTypeDict, self.func)
func = self.normalize_dictlike_arg(op_name, selected_obj, func)
is_non_unique_col = (
selected_obj.ndim == 2
and selected_obj.columns.nunique() < len(selected_obj.columns)
)
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(selection, ndim=1)
results = [getattr(colg, op_name)(how, **kwargs) for _, how in func.items()]
keys = list(func.keys())
elif not is_groupby and is_non_unique_col:
# key used for column selection and output
# GH#51099
results = []
keys = []
for key, how in func.items():
indices = selected_obj.columns.get_indexer_for([key])
labels = selected_obj.columns.take(indices)
label_to_indices = defaultdict(list)
for index, label in zip(indices, labels, strict=True):
label_to_indices[label].append(index)
key_data = [
getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs)
for label, indices in label_to_indices.items()
for indice in indices
]
keys += [key] * len(key_data)
results += key_data
elif is_groupby:
# key used for column selection and output
df = selected_obj
results, keys = [], []
for key, how in func.items():
cols = df[key]
if cols.ndim == 1:
series = obj._gotitem(key, ndim=1, subset=cols)
results.append(getattr(series, op_name)(how, **kwargs))
keys.append(key)
else:
for _, col in cols.items():
series = obj._gotitem(key, ndim=1, subset=col)
results.append(getattr(series, op_name)(how, **kwargs))
keys.append(key)
else:
results = [
getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs)
for key, how in func.items()
]
keys = list(func.keys())
return keys, results
|
Compute agg/apply results for dict-like input.
Parameters
----------
op_name : {"agg", "apply"}
Operation being performed.
selected_obj : Series or DataFrame
Data to perform operation on.
selection : hashable or sequence of hashables
Used by GroupBy, Window, and Resample if selection is applied to the object.
kwargs : dict
Keyword arguments to pass to the functions.
Returns
-------
keys : list[hashable]
Index labels for result.
results : list
Data for result. When aggregating with a Series, this can contain any
Python object.
|
python
|
pandas/core/apply.py
| 513
|
[
"self",
"op_name",
"selected_obj",
"selection",
"kwargs"
] |
tuple[list[Hashable], list[Any]]
| true
| 13
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
createBindTarget
|
private static @Nullable Bindable<Object> createBindTarget(@Nullable Object bean, Class<?> beanType,
@Nullable Method factoryMethod) {
ResolvableType type = (factoryMethod != null) ? ResolvableType.forMethodReturnType(factoryMethod)
: ResolvableType.forClass(beanType);
Annotation[] annotations = findAnnotations(bean, beanType, factoryMethod);
return (annotations != null) ? Bindable.of(type).withAnnotations(annotations) : null;
}
|
Return a {@link ConfigurationPropertiesBean @ConfigurationPropertiesBean} instance
for the given bean details or {@code null} if the bean is not a
{@link ConfigurationProperties @ConfigurationProperties} object. Annotations are
considered both on the bean itself, as well as any factory method (for example a
{@link Bean @Bean} method).
@param applicationContext the source application context
@param bean the bean to consider
@param beanName the bean name
@return a configuration properties bean or {@code null} if the neither the bean nor
factory method are annotated with
{@link ConfigurationProperties @ConfigurationProperties}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 249
|
[
"bean",
"beanType",
"factoryMethod"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
flipNode
|
function flipNode(node: Node, size: number, orthogonalSize: number): Node {
if (node instanceof BranchNode) {
const result = new BranchNode(orthogonal(node.orientation), node.layoutController, node.styles, node.splitviewProportionalLayout, size, orthogonalSize, node.edgeSnapping);
let totalSize = 0;
for (let i = node.children.length - 1; i >= 0; i--) {
const child = node.children[i];
const childSize = child instanceof BranchNode ? child.orthogonalSize : child.size;
let newSize = node.size === 0 ? 0 : Math.round((size * childSize) / node.size);
totalSize += newSize;
// The last view to add should adjust to rounding errors
if (i === 0) {
newSize += size - totalSize;
}
result.addChild(flipNode(child, orthogonalSize, newSize), newSize, 0, true);
}
node.dispose();
return result;
} else {
const result = new LeafNode(node.view, orthogonal(node.orientation), node.layoutController, orthogonalSize);
node.dispose();
return result;
}
}
|
Creates a latched event that avoids being fired when the view
constraints do not change at all.
|
typescript
|
src/vs/base/browser/ui/grid/gridview.ts
| 950
|
[
"node",
"size",
"orthogonalSize"
] | true
| 7
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
_fpath_from_key
|
def _fpath_from_key(self, key: str) -> Path:
"""Generate a file path from a cache key.
Args:
key: The cache key to convert to a file path (must be str).
Returns:
A Path object representing the file location for this key.
"""
return self._cache_dir / key
|
Generate a file path from a cache key.
Args:
key: The cache key to convert to a file path (must be str).
Returns:
A Path object representing the file location for this key.
|
python
|
torch/_inductor/runtime/caching/implementations.py
| 211
|
[
"self",
"key"
] |
Path
| true
| 1
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
forTopicPartition
|
public static Optional<FetchSnapshotRequestData.PartitionSnapshot> forTopicPartition(
FetchSnapshotRequestData data,
TopicPartition topicPartition
) {
return data
.topics()
.stream()
.filter(topic -> topic.name().equals(topicPartition.topic()))
.flatMap(topic -> topic.partitions().stream())
.filter(partition -> partition.partition() == topicPartition.partition())
.findAny();
}
|
Finds the PartitionSnapshot for a given topic partition.
@param data the fetch snapshot request data
@param topicPartition the topic partition to find
@return the request partition snapshot if found, otherwise an empty Optional
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotRequest.java
| 57
|
[
"data",
"topicPartition"
] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
equals
|
public boolean equals(final StrBuilder other) {
if (this == other) {
return true;
}
if (other == null) {
return false;
}
if (this.size != other.size) {
return false;
}
final char[] thisBuf = this.buffer;
final char[] otherBuf = other.buffer;
for (int i = size - 1; i >= 0; i--) {
if (thisBuf[i] != otherBuf[i]) {
return false;
}
}
return true;
}
|
Checks the contents of this builder against another to see if they
contain the same character content.
@param other the object to check, null returns false
@return true if the builders contain the same characters in the same order
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,867
|
[
"other"
] | true
| 6
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
entrySet
|
@Override
public Set<Entry<K, V>> entrySet() {
return (entrySetView == null) ? entrySetView = createEntrySet() : entrySetView;
}
|
Updates the index an iterator is pointing to after a call to remove: returns the index of the
entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
index that *was* the next entry that would be looked at.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 726
|
[] | true
| 2
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
create
|
static Archive create(ProtectionDomain protectionDomain) throws Exception {
CodeSource codeSource = protectionDomain.getCodeSource();
URI location = (codeSource != null) ? codeSource.getLocation().toURI() : null;
if (location == null) {
throw new IllegalStateException("Unable to determine code source archive");
}
return create(Path.of(location).toFile());
}
|
Factory method to create an appropriate {@link Archive} from the given
{@link Class} target.
@param target a target class that will be used to find the archive code source
@return an new {@link Archive} instance
@throws Exception if the archive cannot be created
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Archive.java
| 108
|
[
"protectionDomain"
] |
Archive
| true
| 3
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
format
|
@Deprecated
StringBuffer format(Calendar calendar, StringBuffer buf);
|
Formats a {@link Calendar} object into the supplied {@link StringBuffer}.
The TimeZone set on the Calendar is only used to adjust the time offset.
The TimeZone specified during the construction of the Parser will determine the TimeZone
used in the formatted string.
@param calendar the calendar to format.
@param buf the buffer to format into.
@return the specified string buffer.
@deprecated Use {{@link #format(Calendar, Appendable)}.
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 74
|
[
"calendar",
"buf"
] |
StringBuffer
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
info
|
public RecordsInfo info() {
if (timestampType == TimestampType.LOG_APPEND_TIME) {
if (compression.type() != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2)
// maxTimestamp => case 2
// shallowOffsetOfMaxTimestamp => case 2
return new RecordsInfo(logAppendTime, lastOffset);
else
// maxTimestamp => case 2
// shallowOffsetOfMaxTimestamp => case 3
return new RecordsInfo(logAppendTime, baseOffset);
} else if (maxTimestamp == RecordBatch.NO_TIMESTAMP) {
// maxTimestamp => case 1
// shallowOffsetOfMaxTimestamp => case 1
return new RecordsInfo(RecordBatch.NO_TIMESTAMP, -1);
} else {
if (compression.type() != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2)
// maxTimestamp => case 3
// shallowOffsetOfMaxTimestamp => case 4
return new RecordsInfo(maxTimestamp, lastOffset);
else
// maxTimestamp => case 3
// shallowOffsetOfMaxTimestamp => case 5
return new RecordsInfo(maxTimestamp, offsetOfMaxTimestamp);
}
}
|
There are three cases of finding max timestamp to return:
1) version 0: The max timestamp is NO_TIMESTAMP (-1)
2) LogAppendTime: All records have same timestamp, and so the max timestamp is equal to logAppendTime
3) CreateTime: The max timestamp of record
<p>
Let's talk about OffsetOfMaxTimestamp. There are some paths that we don't try to find the OffsetOfMaxTimestamp
to avoid expensive records iteration. Those paths include follower append and index recovery. In order to
avoid inconsistent time index, we let all paths find shallowOffsetOfMaxTimestamp instead of OffsetOfMaxTimestamp.
<p>
Let's define the shallowOffsetOfMaxTimestamp: It is last offset of the batch having max timestamp. If there are
many batches having same max timestamp, we pick up the earliest batch.
<p>
There are five cases of finding shallowOffsetOfMaxTimestamp to return:
1) version 0: It is always the -1
2) LogAppendTime with single batch: It is the offset of last record
3) LogAppendTime with many single-record batches: Those single-record batches have same max timestamp, so we return
the base offset, which is equal to the last offset of earliest batch
4) CreateTime with single batch: We return offset of last record to follow the spec we mentioned above. Of course,
we do have the OffsetOfMaxTimestamp for this case, but we want to make all paths
find the shallowOffsetOfMaxTimestamp rather than offsetOfMaxTimestamp
5) CreateTime with many single-record batches: Each batch is composed of single record, and hence offsetOfMaxTimestamp
is equal to the last offset of earliest batch with max timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 271
|
[] |
RecordsInfo
| true
| 7
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
getByteArrayBaseOffset
|
private static int getByteArrayBaseOffset() {
if (theUnsafe == null) {
return OFFSET_UNSAFE_APPROACH_IS_UNAVAILABLE;
}
try {
int offset = theUnsafe.arrayBaseOffset(byte[].class);
int scale = theUnsafe.arrayIndexScale(byte[].class);
// Use Unsafe only if we're in a 64-bit JVM with an 8-byte aligned field offset.
if (Objects.equals(System.getProperty("sun.arch.data.model"), "64")
&& (offset % 8) == 0
// sanity check - this should never fail
&& scale == 1) {
return offset;
}
return OFFSET_UNSAFE_APPROACH_IS_UNAVAILABLE;
} catch (UnsupportedOperationException e) {
return OFFSET_UNSAFE_APPROACH_IS_UNAVAILABLE;
}
}
|
The offset to the first element in a byte array, or {@link
#OFFSET_UNSAFE_APPROACH_IS_UNAVAILABLE}.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
| 349
|
[] | true
| 6
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
build_mime_message
|
def build_mime_message(
mail_from: str | None,
to: str | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
custom_headers: dict[str, Any] | None = None,
) -> tuple[MIMEMultipart, list[str]]:
"""
Build a MIME message that can be used to send an email and returns a full list of recipients.
:param mail_from: Email address to set as the email's "From" field.
:param to: A string or iterable of strings containing email addresses to set as the email's "To" field.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to be attached to the email.
:param cc: A string or iterable of strings containing email addresses to set as the email's "CC" field.
:param bcc: A string or iterable of strings containing email addresses to set as the email's "BCC" field.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param custom_headers: Additional headers to add to the MIME message. No validations are run on these
values, and they should be able to be encoded.
:return: A tuple containing the email as a MIMEMultipart object and a list of recipient email addresses.
"""
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg["Subject"] = subject
if mail_from:
msg["From"] = mail_from
msg["To"] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg["CC"] = ", ".join(cc)
recipients += cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients += bcc
msg["Date"] = formatdate(localtime=True)
mime_text = MIMEText(html_content, "html", mime_charset)
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as file:
part = MIMEApplication(file.read(), Name=basename)
part["Content-Disposition"] = f'attachment; filename="{basename}"'
part["Content-ID"] = f"<{basename}>"
msg.attach(part)
if custom_headers:
for header_key, header_value in custom_headers.items():
msg[header_key] = header_value
return msg, recipients
|
Build a MIME message that can be used to send an email and returns a full list of recipients.
:param mail_from: Email address to set as the email's "From" field.
:param to: A string or iterable of strings containing email addresses to set as the email's "To" field.
:param subject: The subject of the email.
:param html_content: The content of the email in HTML format.
:param files: A list of paths to files to be attached to the email.
:param cc: A string or iterable of strings containing email addresses to set as the email's "CC" field.
:param bcc: A string or iterable of strings containing email addresses to set as the email's "BCC" field.
:param mime_subtype: The subtype of the MIME message. Default: "mixed".
:param mime_charset: The charset of the email. Default: "utf-8".
:param custom_headers: Additional headers to add to the MIME message. No validations are run on these
values, and they should be able to be encoded.
:return: A tuple containing the email as a MIMEMultipart object and a list of recipient email addresses.
|
python
|
airflow-core/src/airflow/utils/email.py
| 157
|
[
"mail_from",
"to",
"subject",
"html_content",
"files",
"cc",
"bcc",
"mime_subtype",
"mime_charset",
"custom_headers"
] |
tuple[MIMEMultipart, list[str]]
| true
| 8
| 8.16
|
apache/airflow
| 43,597
|
sphinx
| false
|
handleListOffsetResponse
|
private void handleListOffsetResponse(ListOffsetsResponse listOffsetsResponse,
RequestFuture<ListOffsetResult> future) {
try {
ListOffsetResult result = offsetFetcherUtils.handleListOffsetResponse(listOffsetsResponse);
future.complete(result);
} catch (RuntimeException e) {
future.raise(e);
}
}
|
Callback for the response of the list offset call above.
@param listOffsetsResponse The response from the server.
@param future The future to be completed when the response returns. Note that any partition-level errors will
generally fail the entire future result. The one exception is UNSUPPORTED_FOR_MESSAGE_FORMAT,
which indicates that the broker does not support the v1 message format. Partitions with this
particular error are simply left out of the future map. Note that the corresponding timestamp
value of each partition may be null only for v0. In v1 and later the ListOffset API would not
return a null timestamp (-1 is returned instead when necessary).
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcher.java
| 424
|
[
"listOffsetsResponse",
"future"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
create
|
def create(value: Any, **kwargs: Any) -> VariableTracker:
"""
Create a `ConstantVariable` based on the given value, and supports
automatic routing for collection types like `tuple` (in which case we'd
create `ConstantVariable` for the leaf items).
NOTE: the caller must install the proper guards if needed; most often
the guard will be `CONSTANT_MATCH`.
"""
source = kwargs.get("source")
# Routing for supported collection literals.
if isinstance(value, set):
items = [ConstantVariable.create(x) for x in value]
return variables.SetVariable(items, **kwargs) # type: ignore[arg-type]
elif isinstance(value, frozenset):
items = [ConstantVariable.create(x) for x in value]
return variables.FrozensetVariable(items, **kwargs) # type: ignore[arg-type]
elif isinstance(value, slice):
slice_args = (value.start, value.stop, value.step)
slice_args_vars = tuple(ConstantVariable.create(arg) for arg in slice_args)
return variables.SliceVariable(slice_args_vars, **kwargs)
elif isinstance(value, (list, tuple)):
items = []
for i, x in enumerate(value):
item_source = GetItemSource(source, i) if source else None
items.append(
ConstantVariable.create(
x,
source=item_source,
)
)
return variables.BaseListVariable.cls_for(type(value))(items, **kwargs)
return ConstantVariable(value, **kwargs)
|
Create a `ConstantVariable` based on the given value, and supports
automatic routing for collection types like `tuple` (in which case we'd
create `ConstantVariable` for the leaf items).
NOTE: the caller must install the proper guards if needed; most often
the guard will be `CONSTANT_MATCH`.
|
python
|
torch/_dynamo/variables/constant.py
| 56
|
[
"value"
] |
VariableTracker
| true
| 7
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
update_min_airflow_version_and_build_files
|
def update_min_airflow_version_and_build_files(
provider_id: str, with_breaking_changes: bool, maybe_with_new_features: bool, skip_readme: bool
):
"""Updates min airflow version in provider yaml and __init__.py
:param provider_id: provider package id
:param with_breaking_changes: whether there are any breaking changes
:param maybe_with_new_features: whether there are any new features
:param skip_readme: skip updating readme: skip_readme
:return:
"""
provider_details = get_provider_details(provider_id)
if provider_details.removed:
return
jinja_context = get_provider_documentation_jinja_context(
provider_id=provider_id,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
)
_generate_build_files_for_provider(
context=jinja_context,
provider_details=provider_details,
skip_readme=skip_readme,
)
_replace_min_airflow_version_in_provider_yaml(
context=jinja_context, provider_yaml_path=provider_details.provider_yaml_path
)
|
Updates min airflow version in provider yaml and __init__.py
:param provider_id: provider package id
:param with_breaking_changes: whether there are any breaking changes
:param maybe_with_new_features: whether there are any new features
:param skip_readme: skip updating readme: skip_readme
:return:
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
| 1,303
|
[
"provider_id",
"with_breaking_changes",
"maybe_with_new_features",
"skip_readme"
] | true
| 2
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
getResolvableType
|
public ResolvableType getResolvableType() {
ResolvableType resolvableType = this.resolvableType;
if (resolvableType == null) {
resolvableType = (this.field != null ?
ResolvableType.forField(this.field, this.nestingLevel, this.containingClass) :
ResolvableType.forMethodParameter(obtainMethodParameter()));
this.resolvableType = resolvableType;
}
return resolvableType;
}
|
Build a {@link ResolvableType} object for the wrapped parameter/field.
@since 4.0
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/DependencyDescriptor.java
| 267
|
[] |
ResolvableType
| true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
skipToListStart
|
private static void skipToListStart(XContentParser parser) throws IOException {
Token token = parser.currentToken();
if (token == null) {
token = parser.nextToken();
}
if (token == XContentParser.Token.FIELD_NAME) {
token = parser.nextToken();
}
if (token != XContentParser.Token.START_ARRAY) {
throw new XContentParseException(
parser.getTokenLocation(),
"Failed to parse list: expecting " + XContentParser.Token.START_ARRAY + " but got " + token
);
}
}
|
Checks if the next current token in the supplied parser is a map start for a non-empty map.
Skips to the next token if the parser does not yet have a current token (i.e. {@link #currentToken()} returns {@code null}) and then
checks it.
@return the first key in the map if a non-empty map start is found
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
| 382
|
[
"parser"
] |
void
| true
| 4
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
afterPropertiesSet
|
@Override
public void afterPropertiesSet() {
if (isSingleton()) {
this.properties = createProperties();
}
}
|
Set if a singleton should be created, or a new object on each request
otherwise. Default is {@code true} (a singleton).
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/YamlPropertiesFactoryBean.java
| 105
|
[] |
void
| true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
afterPropertiesSet
|
@Override
@SuppressWarnings("NullAway") // Dataflow analysis limitation
public void afterPropertiesSet() throws ClassNotFoundException, NoSuchFieldException {
if (this.targetClass != null && this.targetObject != null) {
throw new IllegalArgumentException("Specify either targetClass or targetObject, not both");
}
if (this.targetClass == null && this.targetObject == null) {
if (this.targetField != null) {
throw new IllegalArgumentException(
"Specify targetClass or targetObject in combination with targetField");
}
// If no other property specified, consider bean name as static field expression.
if (this.staticField == null) {
this.staticField = this.beanName;
Assert.state(this.staticField != null, "No target field specified");
}
// Try to parse static field into class and field.
int lastDotIndex = this.staticField.lastIndexOf('.');
if (lastDotIndex == -1 || lastDotIndex == this.staticField.length()) {
throw new IllegalArgumentException(
"staticField must be a fully qualified class plus static field name: " +
"for example, 'example.MyExampleClass.MY_EXAMPLE_FIELD'");
}
String className = this.staticField.substring(0, lastDotIndex);
String fieldName = this.staticField.substring(lastDotIndex + 1);
this.targetClass = ClassUtils.forName(className, this.beanClassLoader);
this.targetField = fieldName;
}
else if (this.targetField == null) {
// Either targetClass or targetObject specified.
throw new IllegalArgumentException("targetField is required");
}
// Try to get the exact method first.
Class<?> targetClass = (this.targetObject != null ? this.targetObject.getClass() : this.targetClass);
this.fieldObject = targetClass.getField(this.targetField);
}
|
The bean name of this FieldRetrievingFactoryBean will be interpreted
as "staticField" pattern, if neither "targetClass" nor "targetObject"
nor "targetField" have been specified.
This allows for concise bean definitions with just an id/name.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/FieldRetrievingFactoryBean.java
| 160
|
[] |
void
| true
| 11
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fromHost
|
public static HostAndPort fromHost(String host) {
HostAndPort parsedHost = fromString(host);
checkArgument(!parsedHost.hasPort(), "Host has a port: %s", host);
return parsedHost;
}
|
Build a HostAndPort instance from a host only.
<p>Note: Non-bracketed IPv6 literals are allowed. Use {@link #requireBracketsForIPv6()} to
prohibit these.
@param host the host-only string to parse. Must not contain a port number.
@return if parsing was successful, a populated HostAndPort object.
@throws IllegalArgumentException if {@code host} contains a port number.
@since 17.0
|
java
|
android/guava/src/com/google/common/net/HostAndPort.java
| 151
|
[
"host"
] |
HostAndPort
| true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
normalize
|
def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : {0, 1}, default=1
Define axis used to normalize the data along. If 1, independently
normalize each sample, otherwise (if 0) normalize each feature.
copy : bool, default=True
If False, try to avoid a copy and normalize in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an int dtype, a copy will be returned even with
copy=False.
return_norm : bool, default=False
Whether to return the computed norms.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See Also
--------
Normalizer : Performs normalization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`.
Examples
--------
>>> from sklearn.preprocessing import normalize
>>> X = [[-2, 1, 2], [-1, 0, 1]]
>>> normalize(X, norm="l1") # L1 normalization each row independently
array([[-0.4, 0.2, 0.4],
[-0.5, 0. , 0.5]])
>>> normalize(X, norm="l2") # L2 normalization each row independently
array([[-0.67, 0.33, 0.67],
[-0.71, 0. , 0.71]])
"""
if axis == 0:
sparse_format = "csc"
else: # axis == 1:
sparse_format = "csr"
xp, _ = get_namespace(X)
X = check_array(
X,
accept_sparse=sparse_format,
copy=copy,
estimator="the normalize function",
dtype=_array_api.supported_float_dtypes(xp),
force_writeable=True,
)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
inplace_csr_row_normalize_l1(X)
elif norm == "l2":
inplace_csr_row_normalize_l2(X)
elif norm == "max":
mins, maxes = min_max_axis(X, 1)
norms = np.maximum(abs(mins), maxes)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == "l1":
norms = xp.sum(xp.abs(X), axis=1)
elif norm == "l2":
norms = row_norms(X)
elif norm == "max":
norms = xp.max(xp.abs(X), axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, None]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
|
Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : {'l1', 'l2', 'max'}, default='l2'
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : {0, 1}, default=1
Define axis used to normalize the data along. If 1, independently
normalize each sample, otherwise (if 0) normalize each feature.
copy : bool, default=True
If False, try to avoid a copy and normalize in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an int dtype, a copy will be returned even with
copy=False.
return_norm : bool, default=False
Whether to return the computed norms.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Normalized input X.
norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See Also
--------
Normalizer : Performs normalization using the Transformer API
(e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`.
Examples
--------
>>> from sklearn.preprocessing import normalize
>>> X = [[-2, 1, 2], [-1, 0, 1]]
>>> normalize(X, norm="l1") # L1 normalization each row independently
array([[-0.4, 0.2, 0.4],
[-0.5, 0. , 0.5]])
>>> normalize(X, norm="l2") # L2 normalization each row independently
array([[-0.67, 0.33, 0.67],
[-0.71, 0. , 0.71]])
|
python
|
sklearn/preprocessing/_data.py
| 1,961
|
[
"X",
"norm",
"axis",
"copy",
"return_norm"
] | false
| 17
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
random
|
private static ThreadLocalRandom random() {
return ThreadLocalRandom.current();
}
|
Gets the {@link ThreadLocalRandom} for {@code shuffle} methods that don't take a {@link Random} argument.
@return the current ThreadLocalRandom.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,655
|
[] |
ThreadLocalRandom
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getObjectForBeanInstance
|
protected Object getObjectForBeanInstance(Object beanInstance, @Nullable Class<?> requiredType,
String name, String beanName, @Nullable RootBeanDefinition mbd) {
// Don't let calling code try to dereference the factory if the bean isn't a factory.
if (BeanFactoryUtils.isFactoryDereference(name)) {
if (beanInstance instanceof NullBean) {
return beanInstance;
}
if (!(beanInstance instanceof FactoryBean)) {
throw new BeanIsNotAFactoryException(beanName, beanInstance.getClass());
}
if (mbd != null) {
mbd.isFactoryBean = true;
}
return beanInstance;
}
// Now we have the bean instance, which may be a normal bean or a FactoryBean.
// If it's a FactoryBean, we use it to create a bean instance, unless the
// caller actually wants a reference to the factory.
if (!(beanInstance instanceof FactoryBean<?> factoryBean)) {
return beanInstance;
}
Object object = null;
if (mbd != null) {
mbd.isFactoryBean = true;
}
else {
object = getCachedObjectForFactoryBean(beanName);
}
if (object == null) {
// Return bean instance from factory.
// Caches object obtained from FactoryBean if it is a singleton.
if (mbd == null && containsBeanDefinition(beanName)) {
mbd = getMergedLocalBeanDefinition(beanName);
}
boolean synthetic = (mbd != null && mbd.isSynthetic());
object = getObjectFromFactoryBean(factoryBean, requiredType, beanName, !synthetic);
}
return object;
}
|
Get the object for the given bean instance, either the bean
instance itself or its created object in case of a FactoryBean.
@param beanInstance the shared bean instance
@param name the name that may include factory dereference prefix
@param beanName the canonical bean name
@param mbd the merged bean definition
@return the object to expose for the bean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,841
|
[
"beanInstance",
"requiredType",
"name",
"beanName",
"mbd"
] |
Object
| true
| 11
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
repeat
|
function repeat(string, n, guard) {
if ((guard ? isIterateeCall(string, n, guard) : n === undefined)) {
n = 1;
} else {
n = toInteger(n);
}
return baseRepeat(toString(string), n);
}
|
Repeats the given string `n` times.
@static
@memberOf _
@since 3.0.0
@category String
@param {string} [string=''] The string to repeat.
@param {number} [n=1] The number of times to repeat the string.
@param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
@returns {string} Returns the repeated string.
@example
_.repeat('*', 3);
// => '***'
_.repeat('abc', 2);
// => 'abcabc'
_.repeat('abc', 0);
// => ''
|
javascript
|
lodash.js
| 14,615
|
[
"string",
"n",
"guard"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("Request{");
b.append("method='").append(method).append('\'');
b.append(", endpoint='").append(endpoint).append('\'');
if (false == parameters.isEmpty()) {
b.append(", params=").append(parameters);
}
if (entity != null) {
b.append(", entity=").append(entity);
}
b.append(", options=").append(options);
return b.append('}').toString();
}
|
Get the portion of an HTTP request to Elasticsearch that can be
manipulated without changing Elasticsearch's behavior.
|
java
|
client/rest/src/main/java/org/elasticsearch/client/Request.java
| 150
|
[] |
String
| true
| 3
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
getFunctionDeclarationAtPosition
|
function getFunctionDeclarationAtPosition(file: SourceFile, startPosition: number, checker: TypeChecker): ValidFunctionDeclaration | undefined {
const node = getTouchingToken(file, startPosition);
const functionDeclaration = getContainingFunctionDeclaration(node);
// don't offer refactor on top-level JSDoc
if (isTopLevelJSDoc(node)) return undefined;
if (
functionDeclaration
&& isValidFunctionDeclaration(functionDeclaration, checker)
&& rangeContainsRange(functionDeclaration, node)
&& !(functionDeclaration.body && rangeContainsRange(functionDeclaration.body, node))
) return functionDeclaration;
return undefined;
}
|
Gets the symbol for the contextual type of the node if it is not a union or intersection.
|
typescript
|
src/services/refactors/convertParamsToDestructuredObject.ts
| 435
|
[
"file",
"startPosition",
"checker"
] | true
| 7
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
with
|
public Options with(Option option) {
return copy((options) -> options.add(option));
}
|
Create a new {@link Options} instance that contains the options in this set
including the given option.
@param option the option to include
@return a new {@link Options} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigData.java
| 239
|
[
"option"
] |
Options
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
clusterResource
|
ClusterResource clusterResource() {
return new ClusterResource(clusterId);
}
|
Get leader-epoch for partition.
@param tp partition
@return leader-epoch if known, else return OptionalInt.empty()
|
java
|
clients/src/main/java/org/apache/kafka/clients/MetadataSnapshot.java
| 143
|
[] |
ClusterResource
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
_mini_batch_step
|
def _mini_batch_step(
X,
sample_weight,
centers,
centers_new,
weight_sums,
random_state,
random_reassign=False,
reassignment_ratio=0.01,
verbose=False,
n_threads=1,
):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
"""
# Perform label assignment to nearest centers
# For better efficiency, it's better to run _mini_batch_step in a
# threadpool_limit context than using _labels_inertia_threadpool_limit here
labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads)
# Update centers according to the labels
if sp.issparse(X):
_minibatch_update_sparse(
X, sample_weight, centers, centers_new, weight_sums, labels, n_threads
)
else:
_minibatch_update_dense(
X,
sample_weight,
centers,
centers_new,
weight_sums,
labels,
n_threads,
)
# Reassign clusters that have very low weight
if random_reassign and reassignment_ratio > 0:
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > 0.5 * X.shape[0]:
indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(
X.shape[0], replace=False, size=n_reassigns
)
if verbose:
print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.")
if sp.issparse(X):
assign_rows_csr(
X,
new_centers.astype(np.intp, copy=False),
np.where(to_reassign)[0].astype(np.intp, copy=False),
centers_new,
)
else:
centers_new[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
return inertia
|
Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
|
python
|
sklearn/cluster/_kmeans.py
| 1,563
|
[
"X",
"sample_weight",
"centers",
"centers_new",
"weight_sums",
"random_state",
"random_reassign",
"reassignment_ratio",
"verbose",
"n_threads"
] | false
| 10
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
from_arrays
|
def from_arrays(
cls,
left,
right,
closed: IntervalClosedType | None = "right",
copy: bool = False,
dtype: Dtype | None = None,
) -> Self:
"""
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
IntervalArray
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalArray.from_breaks : Construct an IntervalArray from an array of
splits.
IntervalArray.from_tuples : Construct an IntervalArray from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
Examples
--------
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
"""
left = _maybe_convert_platform_interval(left)
right = _maybe_convert_platform_interval(right)
left, right, dtype = cls._ensure_simple_new_inputs(
left,
right,
closed=closed,
copy=copy,
dtype=dtype,
)
cls._validate(left, right, dtype=dtype)
return cls._simple_new(left, right, dtype=dtype)
|
Construct from two arrays defining the left and right bounds.
Parameters
----------
left : array-like (1-dimensional)
Left bounds for each interval.
right : array-like (1-dimensional)
Right bounds for each interval.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
copy : bool, default False
Copy the data.
dtype : dtype, optional
If None, dtype will be inferred.
Returns
-------
IntervalArray
Raises
------
ValueError
When a value is missing in only one of `left` or `right`.
When a value in `left` is greater than the corresponding value
in `right`.
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalArray.from_breaks : Construct an IntervalArray from an array of
splits.
IntervalArray.from_tuples : Construct an IntervalArray from an
array-like of tuples.
Notes
-----
Each element of `left` must be less than or equal to the `right`
element at the same position. If an element is missing, it must be
missing in both `left` and `right`. A TypeError is raised when
using an unsupported type for `left` or `right`. At the moment,
'category', 'object', and 'string' subtypes are not supported.
Examples
--------
>>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
|
python
|
pandas/core/arrays/interval.py
| 581
|
[
"cls",
"left",
"right",
"closed",
"copy",
"dtype"
] |
Self
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
writeBuckets
|
private static void writeBuckets(XContentBuilder b, String fieldName, ExponentialHistogram.Buckets buckets) throws IOException {
if (buckets.iterator().hasNext() == false) {
return;
}
b.startObject(fieldName);
BucketIterator it = buckets.iterator();
b.startArray(BUCKET_INDICES_FIELD);
while (it.hasNext()) {
b.value(it.peekIndex());
it.advance();
}
b.endArray();
it = buckets.iterator();
b.startArray(BUCKET_COUNTS_FIELD);
while (it.hasNext()) {
b.value(it.peekCount());
it.advance();
}
b.endArray();
b.endObject();
}
|
Serializes an {@link ExponentialHistogram} to the provided {@link XContentBuilder}.
@param builder the XContentBuilder to write to
@param histogram the ExponentialHistogram to serialize
@throws IOException if the XContentBuilder throws an IOException
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramXContent.java
| 96
|
[
"b",
"fieldName",
"buckets"
] |
void
| true
| 4
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
equals
|
@Override
public boolean equals(final Object obj) {
if (obj instanceof MutableInt) {
return value == ((MutableInt) obj).intValue();
}
return false;
}
|
Compares this object to the specified object. The result is {@code true} if and only if the argument is
not {@code null} and is a {@link MutableInt} object that contains the same {@code int} value
as this object.
@param obj the object to compare with, null returns false.
@return {@code true} if the objects are the same; {@code false} otherwise.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableInt.java
| 180
|
[
"obj"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
of
|
static PemSslStore of(@Nullable String type, List<X509Certificate> certificates, @Nullable PrivateKey privateKey) {
return of(type, null, null, certificates, privateKey);
}
|
Factory method that can be used to create a new {@link PemSslStore} with the given
values.
@param type the key store type
@param certificates the certificates for this store
@param privateKey the private key
@return a new {@link PemSslStore} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
| 130
|
[
"type",
"certificates",
"privateKey"
] |
PemSslStore
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isDotOfNumericLiteral
|
function isDotOfNumericLiteral(contextToken: Node): boolean {
if (contextToken.kind === SyntaxKind.NumericLiteral) {
const text = contextToken.getFullText();
return text.charAt(text.length - 1) === ".";
}
return false;
}
|
@returns true if we are certain that the currently edited location must define a new location; false otherwise.
|
typescript
|
src/services/completions.ts
| 5,105
|
[
"contextToken"
] | true
| 2
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
fit
|
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
if self.fit_inverse_transform and self.kernel == "precomputed":
raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
X = validate_data(self, X, accept_sparse="csr", copy=self.copy_X)
self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
self._centerer = KernelCenterer().set_output(transform="default")
K = self._get_kernel(X)
# When kernel="precomputed", K is X but it's safe to perform in place operations
# on K because a copy was made before if requested by copy_X.
self._fit_transform_in_place(K)
if self.fit_inverse_transform:
# no need to use the kernel to transform X, use shortcut expression
X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
|
Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
python
|
sklearn/decomposition/_kernel_pca.py
| 419
|
[
"self",
"X",
"y"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
validate_ordered
|
def validate_ordered(ordered: Ordered) -> None:
"""
Validates that we have a valid ordered parameter. If
it is not a boolean, a TypeError will be raised.
Parameters
----------
ordered : object
The parameter to be verified.
Raises
------
TypeError
If 'ordered' is not a boolean.
"""
if not is_bool(ordered):
raise TypeError("'ordered' must either be 'True' or 'False'")
|
Validates that we have a valid ordered parameter. If
it is not a boolean, a TypeError will be raised.
Parameters
----------
ordered : object
The parameter to be verified.
Raises
------
TypeError
If 'ordered' is not a boolean.
|
python
|
pandas/core/dtypes/dtypes.py
| 544
|
[
"ordered"
] |
None
| true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_variable
|
def get_variable(self, key: str, team_name: str | None = None) -> str | None:
"""
Get Airflow Variable from Environment Variable.
:param key: Variable Key
:param team_name: Team name associated to the task trying to access the variable (if any)
:return: Variable Value
"""
if team_name and (
team_var := os.environ.get(f"{VAR_ENV_PREFIX}_{team_name.upper()}___" + key.upper())
):
# Format to set a team specific variable: AIRFLOW_VAR__<TEAM_ID>___<VAR_KEY>
return team_var
return os.environ.get(VAR_ENV_PREFIX + key.upper())
|
Get Airflow Variable from Environment Variable.
:param key: Variable Key
:param team_name: Team name associated to the task trying to access the variable (if any)
:return: Variable Value
|
python
|
airflow-core/src/airflow/secrets/environment_variables.py
| 36
|
[
"self",
"key",
"team_name"
] |
str | None
| true
| 3
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
falsePredicate
|
@SuppressWarnings("unchecked")
static <T, U, E extends Throwable> FailableBiPredicate<T, U, E> falsePredicate() {
return FALSE;
}
|
Gets the FALSE singleton.
@param <T> Consumed type 1.
@param <U> Consumed type 2.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableBiPredicate.java
| 50
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
instance
|
public Struct instance(String field) {
return instance(schema.get(field));
}
|
Create a struct instance for the given field which must be a container type (struct or array)
@param field The name of the field to create (field must be a schema type)
@return The struct
@throws SchemaException If the given field is not a container type
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/Struct.java
| 182
|
[
"field"
] |
Struct
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
maybeBindExpressionFlowIfCall
|
function maybeBindExpressionFlowIfCall(node: Expression) {
// A top level or comma expression call expression with a dotted function name and at least one argument
// is potentially an assertion and is therefore included in the control flow.
if (node.kind === SyntaxKind.CallExpression) {
const call = node as CallExpression;
if (call.expression.kind !== SyntaxKind.SuperKeyword && isDottedName(call.expression)) {
currentFlow = createFlowCall(currentFlow, call);
}
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 1,778
|
[
"node"
] | false
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
visitFunctionExpression
|
function visitFunctionExpression(node: FunctionExpression): Expression {
let parameters: NodeArray<ParameterDeclaration>;
const savedLexicalArgumentsBinding = lexicalArgumentsBinding;
lexicalArgumentsBinding = undefined;
const functionFlags = getFunctionFlags(node);
const updated = factory.updateFunctionExpression(
node,
visitNodes(node.modifiers, visitor, isModifier),
node.asteriskToken,
node.name,
/*typeParameters*/ undefined,
parameters = functionFlags & FunctionFlags.Async ?
transformAsyncFunctionParameterList(node) :
visitParameterList(node.parameters, visitor, context),
/*type*/ undefined,
functionFlags & FunctionFlags.Async ?
transformAsyncFunctionBody(node, parameters) :
visitFunctionBody(node.body, visitor, context),
);
lexicalArgumentsBinding = savedLexicalArgumentsBinding;
return updated;
}
|
Visits a FunctionExpression node.
This function will be called when one of the following conditions are met:
- The node is marked async
@param node The node to visit.
|
typescript
|
src/compiler/transformers/es2017.ts
| 524
|
[
"node"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
describeFeatures
|
DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options);
|
Describes finalized as well as supported features. The request is issued to any random
broker.
<p>
The following exceptions can be anticipated when calling {@code get()} on the future from the
returned {@link DescribeFeaturesResult}:
<ul>
<li>{@link org.apache.kafka.common.errors.TimeoutException}
If the request timed out before the describe operation could finish.</li>
</ul>
<p>
@param options the options to use
@return the {@link DescribeFeaturesResult} containing the result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,542
|
[
"options"
] |
DescribeFeaturesResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
setNullText
|
public StrBuilder setNullText(String nullText) {
if (StringUtils.isEmpty(nullText)) {
nullText = null;
}
this.nullText = nullText;
return this;
}
|
Sets the text to be appended when null is added.
@param nullText the null text, null means no append
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,834
|
[
"nullText"
] |
StrBuilder
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
falsePredicate
|
@SuppressWarnings("unchecked")
static <T, E extends Throwable> FailablePredicate<T, E> falsePredicate() {
return FALSE;
}
|
Gets the FALSE singleton.
@param <T> Predicate type.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailablePredicate.java
| 48
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
hash
|
@Deprecated
@InlineMe(
replacement = "Files.asByteSource(file).hash(hashFunction)",
imports = "com.google.common.io.Files")
public
static HashCode hash(File file, HashFunction hashFunction) throws IOException {
return asByteSource(file).hash(hashFunction);
}
|
Computes the hash code of the {@code file} using {@code hashFunction}.
@param file the file to read
@param hashFunction the hash function to use to hash the data
@return the {@link HashCode} of all of the bytes in the file
@throws IOException if an I/O error occurs
@since 12.0
@deprecated Prefer {@code asByteSource(file).hash(hashFunction)}.
|
java
|
android/guava/src/com/google/common/io/Files.java
| 621
|
[
"file",
"hashFunction"
] |
HashCode
| true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
write
|
def write(self, message):
"""
Do whatever it takes to actually log the specified logging record.
:param message: message to log
"""
if message.endswith("\n"):
message = message.rstrip()
self._buffer += message
self.flush()
else:
self._buffer += message
return len(message)
|
Do whatever it takes to actually log the specified logging record.
:param message: message to log
|
python
|
airflow-core/src/airflow/utils/log/logging_mixin.py
| 213
|
[
"self",
"message"
] | false
| 3
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
createErrorMessage
|
private static String createErrorMessage(RegisteredBean registeredBean, String msg) {
StringBuilder sb = new StringBuilder("Error processing bean with name '");
sb.append(registeredBean.getBeanName()).append("'");
String resourceDescription = registeredBean.getMergedBeanDefinition().getResourceDescription();
if (resourceDescription != null) {
sb.append(" defined in ").append(resourceDescription);
}
sb.append(": ").append(msg);
return sb.toString();
}
|
Shortcut to create an instance with the {@link RegisteredBean} that fails
to be processed with only a detail message.
@param registeredBean the registered bean that fails to be processed
@param msg the detail message
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotBeanProcessingException.java
| 57
|
[
"registeredBean",
"msg"
] |
String
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
union
|
def union(self, other) -> FrozenList:
"""
Returns a FrozenList with other concatenated to the end of self.
Parameters
----------
other : array-like
The array-like whose elements we are concatenating.
Returns
-------
FrozenList
The collection difference between self and other.
"""
if isinstance(other, tuple):
other = list(other)
return type(self)(super().__add__(other))
|
Returns a FrozenList with other concatenated to the end of self.
Parameters
----------
other : array-like
The array-like whose elements we are concatenating.
Returns
-------
FrozenList
The collection difference between self and other.
|
python
|
pandas/core/indexes/frozen.py
| 35
|
[
"self",
"other"
] |
FrozenList
| true
| 2
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
leaveGroup
|
protected CompletableFuture<Void> leaveGroup(boolean runCallbacks) {
if (isNotInGroup()) {
if (state == MemberState.FENCED) {
clearAssignment();
transitionTo(MemberState.UNSUBSCRIBED);
}
subscriptions.unsubscribe();
notifyAssignmentChange(Collections.emptySet());
return CompletableFuture.completedFuture(null);
}
if (state == MemberState.PREPARE_LEAVING || state == MemberState.LEAVING) {
// Member already leaving. No-op and return existing leave group future that will
// complete when the ongoing leave operation completes.
log.debug("Leave group operation already in progress for member {}", memberId);
return leaveGroupInProgress.get();
}
transitionTo(MemberState.PREPARE_LEAVING);
CompletableFuture<Void> leaveResult = new CompletableFuture<>();
leaveGroupInProgress = Optional.of(leaveResult);
if (runCallbacks) {
CompletableFuture<Void> callbackResult = signalMemberLeavingGroup();
callbackResult.whenComplete((result, error) -> {
if (error != null) {
log.error("Member {} callback to release assignment failed. It will proceed " +
"to clear its assignment and send a leave group heartbeat", memberId, error);
} else {
log.info("Member {} completed callback to release assignment. It will proceed " +
"to clear its assignment and send a leave group heartbeat", memberId);
}
// Clear the assignment, no matter if the callback execution failed or succeeded.
clearAssignmentAndLeaveGroup();
});
} else {
log.debug("Member {} attempting to leave has no rebalance callbacks, " +
"so it will clear assignments and transition to send heartbeat to leave group.", memberId);
clearAssignmentAndLeaveGroup();
}
// Return future to indicate that the leave group is done when the callbacks
// complete, and the transition to send the heartbeat has been made.
return leaveResult;
}
|
Transition to {@link MemberState#PREPARE_LEAVING} to release the assignment. Once completed,
transition to {@link MemberState#LEAVING} to send the heartbeat request and leave the group.
This is expected to be invoked when the user calls the unsubscribe API or is closing the consumer.
@param runCallbacks {@code true} to insert the step to execute the {@link ConsumerRebalanceListener} callback,
{@code false} to skip
@return Future that will complete when the callback execution completes and the heartbeat
to leave the group has been sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 580
|
[
"runCallbacks"
] | true
| 7
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isClientAbortException
|
private boolean isClientAbortException(@Nullable Throwable ex) {
if (ex == null) {
return false;
}
for (Class<?> candidate : CLIENT_ABORT_EXCEPTIONS) {
if (candidate.isInstance(ex)) {
return true;
}
}
return isClientAbortException(ex.getCause());
}
|
Return the description for the given request. By default this method will return a
description based on the request {@code servletPath} and {@code pathInfo}.
@param request the source request
@return the description
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/support/ErrorPageFilter.java
| 226
|
[
"ex"
] | true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
is_dict_like
|
def is_dict_like(obj: object) -> bool:
"""
Check if the object is dict-like.
Parameters
----------
obj : object
The object to check. This can be any Python object,
and the function will determine whether it
behaves like a dictionary.
Returns
-------
bool
Whether `obj` has dict-like properties.
See Also
--------
api.types.is_list_like : Check if the object is list-like.
api.types.is_file_like : Check if the object is a file-like.
api.types.is_named_tuple : Check if the object is a named tuple.
Examples
--------
>>> from pandas.api.types import is_dict_like
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (
all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)
)
|
Check if the object is dict-like.
Parameters
----------
obj : object
The object to check. This can be any Python object,
and the function will determine whether it
behaves like a dictionary.
Returns
-------
bool
Whether `obj` has dict-like properties.
See Also
--------
api.types.is_list_like : Check if the object is list-like.
api.types.is_file_like : Check if the object is a file-like.
api.types.is_named_tuple : Check if the object is a named tuple.
Examples
--------
>>> from pandas.api.types import is_dict_like
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
|
python
|
pandas/core/dtypes/inference.py
| 307
|
[
"obj"
] |
bool
| true
| 2
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
tryAddBucket
|
boolean tryAddBucket(long index, long count) {
int slot = startSlot() + numBuckets;
assert numBuckets == 0 || bucketIndices[slot - 1] < index
: "Histogram buckets must be added with their indices in ascending order";
if (slot >= bucketCounts.length) {
return false; // no more space
}
bucketIndices[slot] = index;
bucketCounts[slot] = count;
numBuckets++;
return true;
}
|
@return the position of the first bucket of this set of buckets within {@link #bucketCounts} and {@link #bucketIndices}.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 275
|
[
"index",
"count"
] | true
| 3
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
copyFile
|
function copyFile(src, dest, mode, callback) {
if (typeof mode === 'function') {
callback = mode;
mode = 0;
}
src = getValidatedPath(src, 'src');
dest = getValidatedPath(dest, 'dest');
callback = makeCallback(callback);
const req = new FSReqCallback();
req.oncomplete = callback;
binding.copyFile(src, dest, mode, req);
}
|
Asynchronously copies `src` to `dest`. By
default, `dest` is overwritten if it already exists.
@param {string | Buffer | URL} src
@param {string | Buffer | URL} dest
@param {number} [mode]
@param {(err?: Error) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 3,058
|
[
"src",
"dest",
"mode",
"callback"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
getInet4Address
|
private static Inet4Address getInet4Address(byte[] bytes) {
checkArgument(
bytes.length == 4,
"Byte array has invalid length for an IPv4 address: %s != 4.",
bytes.length);
// Given a 4-byte array, this cast should always succeed.
return (Inet4Address) bytesToInetAddress(bytes, null);
}
|
Returns an {@link Inet4Address}, given a byte array representation of the IPv4 address.
@param bytes byte array representing an IPv4 address (should be of length 4)
@return {@link Inet4Address} corresponding to the supplied byte array
@throws IllegalArgumentException if a valid {@link Inet4Address} can not be created
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 124
|
[
"bytes"
] |
Inet4Address
| true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
toBooleanObject
|
public static Boolean toBooleanObject(final int value) {
return value == 0 ? Boolean.FALSE : Boolean.TRUE;
}
|
Converts an int to a Boolean using the convention that {@code zero}
is {@code false}, everything else is {@code true}.
<pre>
BooleanUtils.toBoolean(0) = Boolean.FALSE
BooleanUtils.toBoolean(1) = Boolean.TRUE
BooleanUtils.toBoolean(2) = Boolean.TRUE
</pre>
@param value the int to convert
@return Boolean.TRUE if non-zero, Boolean.FALSE if zero,
{@code null} if {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 583
|
[
"value"
] |
Boolean
| true
| 2
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
fromrecords
|
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=ma.nomask):
"""
Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.
if isinstance(reclist, np.ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, ma.MaskedArray):
reclist = reclist.filled().view(np.ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not ma.nomask:
mask = np.asarray(mask)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif mask.ndim == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
|
Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
|
python
|
numpy/ma/mrecords.py
| 537
|
[
"reclist",
"dtype",
"shape",
"formats",
"names",
"titles",
"aligned",
"byteorder",
"fill_value",
"mask"
] | false
| 10
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_set_node_metadata_hook
|
def _set_node_metadata_hook(gm: torch.fx.GraphModule, f):
"""
Takes a callable which will be called after we create a new node. The
callable takes the newly created node as input and returns None.
"""
assert callable(f), "node_metadata_hook must be a callable."
# Add the hook to all submodules
for m in gm.modules():
if isinstance(m, GraphModule):
m._register_create_node_hook(f)
try:
yield
finally:
# Restore hook for all submodules
for m in gm.modules():
if isinstance(m, GraphModule):
m._unregister_create_node_hook(f)
|
Takes a callable which will be called after we create a new node. The
callable takes the newly created node as input and returns None.
|
python
|
torch/_export/passes/_node_metadata_hook.py
| 94
|
[
"gm",
"f"
] | true
| 5
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
forward
|
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
|
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
|
python
|
benchmarks/functional_autograd_benchmark/torchaudio_models.py
| 413
|
[
"self",
"x"
] | false
| 1
| 6.16
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
sensor
|
public synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents) {
return sensor(name, config, Long.MAX_VALUE, recordingLevel, parents);
}
|
Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
receive every value recorded with this sensor.
@param name The name of the sensor
@param config A default configuration to use for this sensor for metrics that don't have their own config
@param recordingLevel The recording level.
@param parents The parent sensors
@return The sensor that is created
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
| 386
|
[
"name",
"config",
"recordingLevel"
] |
Sensor
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@SuppressWarnings("GuardedBy")
@Override
public String toString() {
Runnable currentlyRunning = task;
if (currentlyRunning != null) {
return "SequentialExecutorWorker{running=" + currentlyRunning + "}";
}
return "SequentialExecutorWorker{state=" + workerRunningState + "}";
}
|
Continues executing tasks from {@link #queue} until it is empty.
<p>The thread's interrupt bit is cleared before execution of each task.
<p>If the Thread in use is interrupted before or during execution of the tasks in {@link
#queue}, the Executor will complete its tasks, and then restore the interruption. This means
that once the Thread returns to the Executor that this Executor composes, the interruption
will still be present. If the composed Executor is an ExecutorService, it can respond to
shutdown() by returning tasks queued on that Thread after {@link #worker} drains the queue.
|
java
|
android/guava/src/com/google/common/util/concurrent/SequentialExecutor.java
| 256
|
[] |
String
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
appendln
|
public StrBuilder appendln(final char[] chars) {
return append(chars).appendNewLine();
}
|
Appends a char array followed by a new line to the string builder.
Appending null will call {@link #appendNull()}.
@param chars the char array to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 957
|
[
"chars"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isFull
|
public boolean isFull() {
// note that the write limit is respected only after the first record is added which ensures we can always
// create non-empty batches (this is used to disable batching when the producer's batch size is set to 0).
return appendStream == CLOSED_STREAM || (this.numRecords > 0 && this.writeLimit <= estimatedBytesWritten());
}
|
Check if we have room for a given number of bytes.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 889
|
[] | true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
bean
|
public GenericBeanDefinition bean(Class<?> type) {
GenericBeanDefinition beanDefinition = new GenericBeanDefinition();
beanDefinition.setBeanClass(type);
return beanDefinition;
}
|
Define an inner bean definition.
@param type the bean type
@return the bean definition
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
| 303
|
[
"type"
] |
GenericBeanDefinition
| true
| 1
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
chomp
|
public static String chomp(final String str) {
if (isEmpty(str)) {
return str;
}
if (str.length() == 1) {
final char ch = str.charAt(0);
if (ch == CharUtils.CR || ch == CharUtils.LF) {
return EMPTY;
}
return str;
}
int lastIdx = str.length() - 1;
final char last = str.charAt(lastIdx);
if (last == CharUtils.LF) {
if (str.charAt(lastIdx - 1) == CharUtils.CR) {
lastIdx--;
}
} else if (last != CharUtils.CR) {
lastIdx++;
}
return str.substring(0, lastIdx);
}
|
Removes one newline from end of a String if it's there, otherwise leave it alone. A newline is "{@code \n}", "{@code \r}", or
"{@code \r\n}".
<p>
NOTE: This method changed in 2.0. It now more closely matches Perl chomp.
</p>
<pre>
StringUtils.chomp(null) = null
StringUtils.chomp("") = ""
StringUtils.chomp("abc \r") = "abc "
StringUtils.chomp("abc\n") = "abc"
StringUtils.chomp("abc\r\n") = "abc"
StringUtils.chomp("abc\r\n\r\n") = "abc\r\n"
StringUtils.chomp("abc\n\r") = "abc\n"
StringUtils.chomp("abc\n\rabc") = "abc\n\rabc"
StringUtils.chomp("\r") = ""
StringUtils.chomp("\n") = ""
StringUtils.chomp("\r\n") = ""
</pre>
@param str the String to chomp a newline from, may be null.
@return String without newline, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 674
|
[
"str"
] |
String
| true
| 8
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
missingFieldNames
|
public Set<String> missingFieldNames(final RunningStats other) {
if (other == null || this.docCount == 0 || other.docCount == 0) {
return Collections.emptySet();
}
return symmetricDifference(this.getAllFieldNames(), other.getAllFieldNames());
}
|
Get the set of fields required by the aggregation which are missing in at least one document.
@param other the other {@link RunningStats} to check
@return a set of field names
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java
| 208
|
[
"other"
] | true
| 4
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
isEventSupported
|
function isEventSupported(eventNameSuffix: string): boolean {
if (!canUseDOM) {
return false;
}
const eventName = 'on' + eventNameSuffix;
let isSupported = eventName in document;
if (!isSupported) {
const element = document.createElement('div');
element.setAttribute(eventName, 'return;');
isSupported = typeof (element: any)[eventName] === 'function';
}
return isSupported;
}
|
Checks if an event is supported in the current execution environment.
NOTE: This will not work correctly for non-generic events such as `change`,
`reset`, `load`, `error`, and `select`.
Borrows from Modernizr.
@param {string} eventNameSuffix Event name, e.g. "click".
@return {boolean} True if the event is supported.
@internal
@license Modernizr 3.0.0pre (Custom Build) | MIT
|
javascript
|
packages/react-dom-bindings/src/events/isEventSupported.js
| 25
|
[
"eventNameSuffix"
] | false
| 3
| 7.12
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
parseBracketedList
|
function parseBracketedList<T extends Node>(kind: ParsingContext, parseElement: () => T, open: PunctuationSyntaxKind, close: PunctuationSyntaxKind): NodeArray<T> {
if (parseExpected(open)) {
const result = parseDelimitedList(kind, parseElement);
parseExpected(close);
return result;
}
return createMissingList<T>();
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,579
|
[
"kind",
"parseElement",
"open",
"close"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isBraceWrappedContext
|
function isBraceWrappedContext(context: FormattingContext): boolean {
return context.contextNode.kind === SyntaxKind.ObjectBindingPattern ||
context.contextNode.kind === SyntaxKind.MappedType ||
isSingleLineBlockContext(context);
}
|
A rule takes a two tokens (left/right) and a particular context
for which you're meant to look at them. You then declare what should the
whitespace annotation be between these tokens via the action param.
@param debugName Name to print
@param left The left side of the comparison
@param right The right side of the comparison
@param context A set of filters to narrow down the space in which this formatter rule applies
@param action a declaration of the expected whitespace
@param flags whether the rule deletes a line or not, defaults to no-op
|
typescript
|
src/services/formatting/rules.ts
| 584
|
[
"context"
] | true
| 3
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
replaceChars
|
public static String replaceChars(final String str, final char searchChar, final char replaceChar) {
if (str == null) {
return null;
}
return str.replace(searchChar, replaceChar);
}
|
Replaces all occurrences of a character in a String with another. This is a null-safe version of {@link String#replace(char, char)}.
<p>
A {@code null} string input returns {@code null}. An empty ("") string input returns an empty string.
</p>
<pre>
StringUtils.replaceChars(null, *, *) = null
StringUtils.replaceChars("", *, *) = ""
StringUtils.replaceChars("abcba", 'b', 'y') = "aycya"
StringUtils.replaceChars("abcba", 'z', 'y') = "abcba"
</pre>
@param str String to replace characters in, may be null.
@param searchChar the character to search for, may be null.
@param replaceChar the character to replace, may be null.
@return modified String, {@code null} if null string input.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,263
|
[
"str",
"searchChar",
"replaceChar"
] |
String
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createModuleLoader
|
function createModuleLoader(asyncLoaderHooks) {
// Don't spawn a new loader hook worker if we are already in a loader hook worker to avoid infinite recursion.
if (shouldSpawnLoaderHookWorker()) {
assert(asyncLoaderHooks === undefined, 'asyncLoaderHooks should only be provided on the loader hook thread itself');
const userLoaderPaths = getOptionValue('--experimental-loader');
if (userLoaderPaths.length > 0) {
if (!emittedLoaderFlagWarning) {
const readableURIEncode = (string) => ArrayPrototypeReduce(
[
[/'/g, '%27'], // We need to URL-encode the single quote as it's the delimiter for the --import flag.
[/%22/g, '"'], // We can decode the double quotes to improve readability.
[/%2F/ig, '/'], // We can decode the slashes to improve readability.
],
(str, { 0: regex, 1: replacement }) => RegExpPrototypeSymbolReplace(hardenRegExp(regex), str, replacement),
encodeURIComponent(string));
process.emitWarning(
'`--experimental-loader` may be removed in the future; instead use `register()`:\n' +
`--import 'data:text/javascript,import { register } from "node:module"; import { pathToFileURL } from "node:url"; ${ArrayPrototypeJoin(
ArrayPrototypeMap(userLoaderPaths, (loader) => `register(${readableURIEncode(JSONStringify(loader))}, pathToFileURL("./"))`),
'; ',
)};'`,
'ExperimentalWarning',
);
emittedLoaderFlagWarning = true;
}
const { AsyncLoaderHooksProxiedToLoaderHookWorker } = require('internal/modules/esm/hooks');
asyncLoaderHooks = new AsyncLoaderHooksProxiedToLoaderHookWorker();
}
}
return new ModuleLoader(asyncLoaderHooks);
}
|
A loader instance is used as the main entry point for loading ES modules. Currently, this is a singleton; there is
only one used for loading the main module and everything in its dependency graph, though separate instances of this
class might be instantiated as part of bootstrap for other purposes.
@param {AsyncLoaderHooksOnLoaderHookWorker|undefined} [asyncLoaderHooks]
Only provided when run on the loader hook thread.
@returns {ModuleLoader}
|
javascript
|
lib/internal/modules/esm/loader.js
| 838
|
[
"asyncLoaderHooks"
] | false
| 4
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
inverse
|
@Override
public ImmutableSetMultimap<V, K> inverse() {
ImmutableSetMultimap<V, K> result = inverse;
return (result == null) ? (inverse = invert()) : result;
}
|
{@inheritDoc}
<p>Because an inverse of a set multimap cannot contain multiple pairs with the same key and
value, this method returns an {@code ImmutableSetMultimap} rather than the {@code
ImmutableMultimap} specified in the {@code ImmutableMultimap} class.
|
java
|
android/guava/src/com/google/common/collect/ImmutableSetMultimap.java
| 554
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.