function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
variable_labels
|
def variable_labels(self) -> dict[str, str]:
"""
Return a dict associating each variable name with corresponding label.
This method retrieves variable labels from a Stata file. Variable labels are
mappings between variable names and their corresponding descriptive labels
in a Stata dataset.
Returns
-------
dict
A python dictionary.
See Also
--------
read_stata : Read Stata file into DataFrame.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"])
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> path = "/My_path/filename.dta"
>>> variable_labels = {"col_1": "This is an example"}
>>> df.to_stata(
... path,
... time_stamp=time_stamp, # doctest: +SKIP
... variable_labels=variable_labels,
... version=None,
... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.variable_labels()) # doctest: +SKIP
{'index': '', 'col_1': 'This is an example', 'col_2': ''}
>>> pd.read_stata(path) # doctest: +SKIP
index col_1 col_2
0 0 1 2
1 1 3 4
"""
self._ensure_open()
return dict(zip(self._varlist, self._variable_labels, strict=True))
|
Return a dict associating each variable name with corresponding label.
This method retrieves variable labels from a Stata file. Variable labels are
mappings between variable names and their corresponding descriptive labels
in a Stata dataset.
Returns
-------
dict
A python dictionary.
See Also
--------
read_stata : Read Stata file into DataFrame.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["col_1", "col_2"])
>>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21)
>>> path = "/My_path/filename.dta"
>>> variable_labels = {"col_1": "This is an example"}
>>> df.to_stata(
... path,
... time_stamp=time_stamp, # doctest: +SKIP
... variable_labels=variable_labels,
... version=None,
... ) # doctest: +SKIP
>>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP
... print(reader.variable_labels()) # doctest: +SKIP
{'index': '', 'col_1': 'This is an example', 'col_2': ''}
>>> pd.read_stata(path) # doctest: +SKIP
index col_1 col_2
0 0 1 2
1 1 3 4
|
python
|
pandas/io/stata.py
| 2,004
|
[
"self"
] |
dict[str, str]
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
auc
|
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : array-like of shape (n,)
X coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : array-like of shape (n,)
Y coordinates.
Returns
-------
auc : float
Area Under the Curve.
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y_true = np.array([1, 1, 2, 2])
>>> y_score = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError(
"At least 2 points are needed to compute area under curve, but x.shape = %s"
% x.shape
)
direction = 1
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("x is neither increasing nor decreasing : {}.".format(x))
area = direction * trapezoid(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in trapezoid do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return float(area)
|
Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : array-like of shape (n,)
X coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : array-like of shape (n,)
Y coordinates.
Returns
-------
auc : float
Area Under the Curve.
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y_true = np.array([1, 1, 2, 2])
>>> y_score = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y_true, y_score, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
|
python
|
sklearn/metrics/_ranking.py
| 47
|
[
"x",
"y"
] | false
| 6
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
fillna
|
def fillna(
self,
value: object | ArrayLike,
limit: int | None = None,
copy: bool = True,
) -> Self:
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like "value" can be given. It's expected
that the array-like have the same length as 'self'.
limit : int, default None
The maximum number of entries where NA values will be filled.
copy : bool, default True
Whether to make a copy of the data before filling. If False, then
the original should be modified and no new memory should be allocated.
For ExtensionArray subclasses that cannot do this, it is at the
author's discretion whether to ignore "copy=False" or to raise.
Returns
-------
ExtensionArray
With NA/NaN filled.
See Also
--------
api.extensions.ExtensionArray.dropna : Return ExtensionArray without
NA values.
api.extensions.ExtensionArray.isna : A 1-D array indicating if
each value is missing.
Examples
--------
>>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
>>> arr.fillna(0)
<IntegerArray>
[0, 0, 2, 3, 0, 0]
Length: 6, dtype: Int64
"""
mask = self.isna()
if limit is not None and limit < len(self):
# isna can return an ExtensionArray, we're assuming that comparisons
# are implemented.
# mypy doesn't like that mask can be an EA which need not have `cumsum`
modify = mask.cumsum() > limit # type: ignore[union-attr]
if modify.any():
# Only copy mask if necessary
mask = mask.copy()
mask[modify] = False
# error: Argument 2 to "check_value_size" has incompatible type
# "ExtensionArray"; expected "ndarray"
value = missing.check_value_size(
value,
mask, # type: ignore[arg-type]
len(self),
)
if mask.any():
# fill with value
if not copy:
new_values = self[:]
else:
new_values = self.copy()
new_values[mask] = value
else:
if not copy:
new_values = self[:]
else:
new_values = self.copy()
return new_values
|
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like "value" can be given. It's expected
that the array-like have the same length as 'self'.
limit : int, default None
The maximum number of entries where NA values will be filled.
copy : bool, default True
Whether to make a copy of the data before filling. If False, then
the original should be modified and no new memory should be allocated.
For ExtensionArray subclasses that cannot do this, it is at the
author's discretion whether to ignore "copy=False" or to raise.
Returns
-------
ExtensionArray
With NA/NaN filled.
See Also
--------
api.extensions.ExtensionArray.dropna : Return ExtensionArray without
NA values.
api.extensions.ExtensionArray.isna : A 1-D array indicating if
each value is missing.
Examples
--------
>>> arr = pd.array([np.nan, np.nan, 2, 3, np.nan, np.nan])
>>> arr.fillna(0)
<IntegerArray>
[0, 0, 2, 3, 0, 0]
Length: 6, dtype: Int64
|
python
|
pandas/core/arrays/base.py
| 1,232
|
[
"self",
"value",
"limit",
"copy"
] |
Self
| true
| 10
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
createResponse
|
private ProjectGenerationResponse createResponse(ClassicHttpResponse httpResponse, HttpEntity httpEntity)
throws IOException {
ProjectGenerationResponse response = new ProjectGenerationResponse(
ContentType.create(httpEntity.getContentType()));
response.setContent(FileCopyUtils.copyToByteArray(httpEntity.getContent()));
String fileName = extractFileName(httpResponse.getFirstHeader("Content-Disposition"));
if (fileName != null) {
response.setFileName(fileName);
}
return response;
}
|
Loads the service capabilities of the service at the specified URL. If the service
supports generating a textual representation of the capabilities, it is returned,
otherwise {@link InitializrServiceMetadata} is returned.
@param serviceUrl to url of the initializer service
@return the service capabilities (as a String) or the
{@link InitializrServiceMetadata} describing the service
@throws IOException if the service capabilities cannot be loaded
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrService.java
| 152
|
[
"httpResponse",
"httpEntity"
] |
ProjectGenerationResponse
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
averageLoadPenalty
|
public double averageLoadPenalty() {
long totalLoadCount = saturatedAdd(loadSuccessCount, loadExceptionCount);
return (totalLoadCount == 0) ? 0.0 : (double) totalLoadTime / totalLoadCount;
}
|
Returns the average time spent loading new values. This is defined as {@code totalLoadTime /
(loadSuccessCount + loadExceptionCount)}.
<p><b>Note:</b> the values of the metrics are undefined in case of overflow (though it is
guaranteed not to throw an exception). If you require specific handling, we recommend
implementing your own stats collector.
|
java
|
android/guava/src/com/google/common/cache/CacheStats.java
| 225
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
countriesByLanguage
|
public static List<Locale> countriesByLanguage(final String languageCode) {
if (languageCode == null) {
return Collections.emptyList();
}
return cCountriesByLanguage.computeIfAbsent(languageCode, lc -> Collections
.unmodifiableList(availableLocaleList(locale -> languageCode.equals(locale.getLanguage()) && !hasCountry(locale) && hasVariant(locale))));
}
|
Obtains the list of countries supported for a given language.
<p>
This method takes a language code and searches to find the countries available for that language. Variant locales are removed.
</p>
@param languageCode the 2 letter language code, null returns empty.
@return an unmodifiable List of Locale objects, not null.
|
java
|
src/main/java/org/apache/commons/lang3/LocaleUtils.java
| 135
|
[
"languageCode"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
wrapIfNecessary
|
public static List<MessageSourceResolvable> wrapIfNecessary(List<? extends MessageSourceResolvable> errors) {
if (CollectionUtils.isEmpty(errors)) {
return Collections.emptyList();
}
List<MessageSourceResolvable> result = new ArrayList<>(errors.size());
for (MessageSourceResolvable error : errors) {
result.add(requiresWrapping(error) ? new Error(error) : error);
}
return List.copyOf(result);
}
|
Wrap the given errors, if necessary, such that they are suitable for serialization
to JSON. {@link MessageSourceResolvable} implementations that are known to be
suitable are not wrapped.
@param errors the errors to wrap
@return a new Error list
@since 3.5.4
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/error/Error.java
| 106
|
[
"errors"
] | true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getAccessibleMethod
|
public static Method getAccessibleMethod(final Class<?> cls, final Method method) {
if (!MemberUtils.isPublic(method)) {
return null;
}
// If the declaring class is public, we are done
if (ClassUtils.isPublic(cls)) {
return method;
}
final String methodName = method.getName();
final Class<?>[] parameterTypes = method.getParameterTypes();
// Check the implemented interfaces and subinterfaces
final Method method2 = getAccessibleMethodFromInterfaceNest(cls, methodName, parameterTypes);
// Check the superclass chain
return method2 != null ? method2 : getAccessibleMethodFromSuperclass(cls, methodName, parameterTypes);
}
|
Gets an accessible method (that is, one that can be invoked via reflection) that implements the specified Method. If no such method can be found, return
{@code null}.
@param cls The implementing class, may be null.
@param method The method that we wish to call, may be null.
@return The accessible method or null.
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 110
|
[
"cls",
"method"
] |
Method
| true
| 4
| 8.4
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
convertStreamOutput
|
function convertStreamOutput(output: NotebookCellOutput): JupyterOutput {
const outputs: string[] = [];
output.items
.filter((opit) => opit.mime === CellOutputMimeTypes.stderr || opit.mime === CellOutputMimeTypes.stdout)
.map((opit) => textDecoder.decode(opit.data))
.forEach(value => {
// Ensure each line is a separate entry in an array (ending with \n).
const lines = value.split('\n');
// If the last item in `outputs` is not empty and the first item in `lines` is not empty, then concate them.
// As they are part of the same line.
if (outputs.length && lines.length && lines[0].length > 0) {
outputs[outputs.length - 1] = `${outputs[outputs.length - 1]}${lines.shift()!}`;
}
for (const line of lines) {
outputs.push(line);
}
});
for (let index = 0; index < (outputs.length - 1); index++) {
outputs[index] = `${outputs[index]}\n`;
}
// Skip last one if empty (it's the only one that could be length 0)
if (outputs.length && outputs[outputs.length - 1].length === 0) {
outputs.pop();
}
const streamType = getOutputStreamType(output) || 'stdout';
return {
output_type: 'stream',
name: streamType,
text: outputs
};
}
|
Splits the source of a cell into an array of strings, each representing a line.
Also normalizes line endings to use LF (`\n`) instead of CRLF (`\r\n`).
Same is done in deserializer as well.
|
typescript
|
extensions/ipynb/src/serializers.ts
| 308
|
[
"output"
] | true
| 9
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
maybe_convert_usecols
|
def maybe_convert_usecols(
usecols: str | list[int] | list[str] | usecols_func | None,
) -> None | list[int] | list[str] | usecols_func:
"""
Convert `usecols` into a compatible format for parsing in `parsers.py`.
Parameters
----------
usecols : object
The use-columns object to potentially convert.
Returns
-------
converted : object
The compatible format of `usecols`.
"""
if usecols is None:
return usecols
if is_integer(usecols):
raise ValueError(
"Passing an integer for `usecols` is no longer supported. "
"Please pass in a list of int from 0 to `usecols` inclusive instead."
)
if isinstance(usecols, str):
return _range2cols(usecols)
return usecols
|
Convert `usecols` into a compatible format for parsing in `parsers.py`.
Parameters
----------
usecols : object
The use-columns object to potentially convert.
Returns
-------
converted : object
The compatible format of `usecols`.
|
python
|
pandas/io/excel/_util.py
| 179
|
[
"usecols"
] |
None | list[int] | list[str] | usecols_func
| true
| 4
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
addLogger
|
private void addLogger(Map<String, LoggerConfig> loggers, String name) {
Configuration configuration = getLoggerContext().getConfiguration();
while (name != null) {
loggers.computeIfAbsent(name, configuration::getLoggerConfig);
name = getSubName(name);
}
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 413
|
[
"loggers",
"name"
] |
void
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
safeGet
|
function safeGet(object, key) {
if (key === 'constructor' && typeof object[key] === 'function') {
return;
}
if (key == '__proto__') {
return;
}
return object[key];
}
|
Gets the value at `key`, unless `key` is "__proto__" or "constructor".
@private
@param {Object} object The object to query.
@param {string} key The key of the property to get.
@returns {*} Returns the property value.
|
javascript
|
lodash.js
| 6,712
|
[
"object",
"key"
] | false
| 4
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
resolveNonPatternEmptyDirectories
|
private Set<StandardConfigDataResource> resolveNonPatternEmptyDirectories(StandardConfigDataReference reference) {
String directory = reference.getDirectory();
Assert.state(directory != null, "'directory' must not be null");
Resource resource = this.resourceLoader.getResource(directory);
return (resource instanceof ClassPathResource || !resource.exists()) ? Collections.emptySet()
: Collections.singleton(new StandardConfigDataResource(reference, resource, true));
}
|
Create a new {@link StandardConfigDataLocationResolver} instance.
@param logFactory the factory for loggers to use
@param binder a binder backed by the initial {@link Environment}
@param resourceLoader a {@link ResourceLoader} used to load resources
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataLocationResolver.java
| 288
|
[
"reference"
] | true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
processCodePathToExit
|
function processCodePathToExit(analyzer, node) {
const codePath = analyzer.codePath;
const state = CodePath.getState(codePath);
let dontForward = false;
switch (node.type) {
case 'ChainExpression':
state.popChainContext();
break;
case 'IfStatement':
case 'ConditionalExpression':
state.popChoiceContext();
break;
case 'LogicalExpression':
if (isHandledLogicalOperator(node.operator)) {
state.popChoiceContext();
}
break;
case 'AssignmentExpression':
if (isLogicalAssignmentOperator(node.operator)) {
state.popChoiceContext();
}
break;
case 'SwitchStatement':
state.popSwitchContext();
break;
case 'SwitchCase':
/*
* This is the same as the process at the 1st `consequent` node in
* `preprocess` function.
* Must do if this `consequent` is empty.
*/
if (node.consequent.length === 0) {
state.makeSwitchCaseBody(true, !node.test);
}
if (state.forkContext.reachable) {
dontForward = true;
}
break;
case 'TryStatement':
state.popTryContext();
break;
case 'BreakStatement':
forwardCurrentToHead(analyzer, node);
state.makeBreak(node.label && node.label.name);
dontForward = true;
break;
case 'ContinueStatement':
forwardCurrentToHead(analyzer, node);
state.makeContinue(node.label && node.label.name);
dontForward = true;
break;
case 'ReturnStatement':
forwardCurrentToHead(analyzer, node);
state.makeReturn();
dontForward = true;
break;
case 'ThrowStatement':
forwardCurrentToHead(analyzer, node);
state.makeThrow();
dontForward = true;
break;
case 'Identifier':
if (isIdentifierReference(node)) {
state.makeFirstThrowablePathInTryBlock();
dontForward = true;
}
break;
case 'CallExpression':
case 'ImportExpression':
case 'MemberExpression':
case 'NewExpression':
case 'YieldExpression':
state.makeFirstThrowablePathInTryBlock();
break;
case 'WhileStatement':
case 'DoWhileStatement':
case 'ForStatement':
case 'ForInStatement':
case 'ForOfStatement':
state.popLoopContext();
break;
case 'AssignmentPattern':
state.popForkContext();
break;
case 'LabeledStatement':
if (!breakableTypePattern.test(node.body.type)) {
state.popBreakContext();
}
break;
default:
break;
}
// Emits onCodePathSegmentStart events if updated.
if (!dontForward) {
forwardCurrentToHead(analyzer, node);
}
}
|
Updates the code path due to the type of a given node in leaving.
@param {CodePathAnalyzer} analyzer The instance.
@param {ASTNode} node The current AST node.
@returns {void}
|
javascript
|
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-analyzer.js
| 528
|
[
"analyzer",
"node"
] | false
| 10
| 6.16
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
sem
|
def sem(
self,
axis: Axis | None = 0,
skipna: bool = True,
ddof: int = 1,
numeric_only: bool = False,
**kwargs,
) -> Series | Any:
"""
Return unbiased standard error of the mean over requested axis.
Normalized by N-1 by default. This can be changed using the ddof argument
Parameters
----------
axis : {index (0), columns (1)}
For `Series` this parameter is unused and defaults to 0.
.. warning::
The behavior of DataFrame.sem with ``axis=None`` is deprecated,
in a future version this will reduce over both axes and return a scalar
To retain the old behavior, pass axis=0 (or do not pass axis).
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs :
Additional keywords passed.
Returns
-------
Series or DataFrame (if level specified)
Unbiased standard error of the mean over requested axis.
See Also
--------
DataFrame.var : Return unbiased variance over requested axis.
DataFrame.std : Returns sample standard deviation over requested axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.sem().round(6)
0.57735
With a DataFrame
>>> df = pd.DataFrame({"a": [1, 2], "b": [2, 3]}, index=["tiger", "zebra"])
>>> df
a b
tiger 1 2
zebra 2 3
>>> df.sem()
a 0.5
b 0.5
dtype: float64
Using axis=1
>>> df.sem(axis=1)
tiger 0.5
zebra 0.5
dtype: float64
In this case, `numeric_only` should be set to `True`
to avoid getting an error.
>>> df = pd.DataFrame({"a": [1, 2], "b": ["T", "Z"]}, index=["tiger", "zebra"])
>>> df.sem(numeric_only=True)
a 0.5
dtype: float64
"""
result = super().sem(
axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs
)
if isinstance(result, Series):
result = result.__finalize__(self, method="sem")
return result
|
Return unbiased standard error of the mean over requested axis.
Normalized by N-1 by default. This can be changed using the ddof argument
Parameters
----------
axis : {index (0), columns (1)}
For `Series` this parameter is unused and defaults to 0.
.. warning::
The behavior of DataFrame.sem with ``axis=None`` is deprecated,
in a future version this will reduce over both axes and return a scalar
To retain the old behavior, pass axis=0 (or do not pass axis).
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns. Not implemented for Series.
**kwargs :
Additional keywords passed.
Returns
-------
Series or DataFrame (if level specified)
Unbiased standard error of the mean over requested axis.
See Also
--------
DataFrame.var : Return unbiased variance over requested axis.
DataFrame.std : Returns sample standard deviation over requested axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.sem().round(6)
0.57735
With a DataFrame
>>> df = pd.DataFrame({"a": [1, 2], "b": [2, 3]}, index=["tiger", "zebra"])
>>> df
a b
tiger 1 2
zebra 2 3
>>> df.sem()
a 0.5
b 0.5
dtype: float64
Using axis=1
>>> df.sem(axis=1)
tiger 0.5
zebra 0.5
dtype: float64
In this case, `numeric_only` should be set to `True`
to avoid getting an error.
>>> df = pd.DataFrame({"a": [1, 2], "b": ["T", "Z"]}, index=["tiger", "zebra"])
>>> df.sem(numeric_only=True)
a 0.5
dtype: float64
|
python
|
pandas/core/frame.py
| 13,428
|
[
"self",
"axis",
"skipna",
"ddof",
"numeric_only"
] |
Series | Any
| true
| 2
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
abspath
|
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urllib.parse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
|
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str or pathlib.Path
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
|
python
|
numpy/lib/_datasource.py
| 371
|
[
"self",
"path"
] | false
| 2
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
finishEventHandler
|
function finishEventHandler() {
// Here we wait until all updates have propagated, which is important
// when using controlled components within layers:
// https://github.com/facebook/react/issues/1698
// Then we restore state of any controlled component.
const controlledComponentsHavePendingUpdates = needsStateRestore();
if (controlledComponentsHavePendingUpdates) {
// If a controlled event was fired, we may need to restore the state of
// the DOM node back to the controlled value. This is necessary when React
// bails out of the update without touching the DOM.
// TODO: Restore state in the microtask, after the discrete updates flush,
// instead of early flushing them here.
// @TODO Should move to flushSyncWork once legacy mode is removed but since this flushSync
// flushes passive effects we can't do this yet.
flushSyncWork();
restoreStateIfNeeded();
}
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
|
javascript
|
packages/react-dom-bindings/src/events/ReactDOMUpdateBatching.js
| 27
|
[] | false
| 2
| 6.4
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
cellSet
|
Set<Cell<R, C, V>> cellSet();
|
Returns a set of all row key / column key / value triplets. Changes to the returned set will
update the underlying table, and vice versa. The cell set does not support the {@code add} or
{@code addAll} methods.
@return set of table cells consisting of row key / column key / value triplets
|
java
|
android/guava/src/com/google/common/collect/Table.java
| 208
|
[] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
_format_duplicate_message
|
def _format_duplicate_message(self) -> DataFrame:
"""
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(["a", "b", "a"])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
"""
from pandas import Series
duplicates = self[self.duplicated(keep="first")].unique()
assert len(duplicates)
out = (
Series(np.arange(len(self)), copy=False)
.groupby(self, observed=False)
.agg(list)[duplicates]
)
if self._is_multi:
# test_format_duplicate_labels_message_multi
# error: "Type[Index]" has no attribute "from_tuples" [attr-defined]
out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined]
if self.nlevels == 1:
out = out.rename_axis("label")
return out.to_frame(name="positions")
|
Construct the DataFrame for a DuplicateLabelError.
This returns a DataFrame indicating the labels and positions
of duplicates in an index. This should only be called when it's
already known that duplicates are present.
Examples
--------
>>> idx = pd.Index(["a", "b", "a"])
>>> idx._format_duplicate_message()
positions
label
a [0, 2]
|
python
|
pandas/core/indexes/base.py
| 725
|
[
"self"
] |
DataFrame
| true
| 3
| 7.28
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
createServerData
|
function createServerData() {
let done = false;
let promise = null;
return {
read() {
if (done) {
return;
}
if (promise) {
throw promise;
}
promise = new Promise(resolve => {
setTimeout(() => {
done = true;
promise = null;
resolve();
}, API_DELAY);
});
throw promise;
},
};
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
|
javascript
|
fixtures/ssr2/server/render.js
| 75
|
[] | false
| 3
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
asin
|
public static double asin(double value) {
boolean negateResult;
if (value < 0.0) {
value = -value;
negateResult = true;
} else {
negateResult = false;
}
if (value <= ASIN_MAX_VALUE_FOR_TABS) {
int index = (int) (value * ASIN_INDEXER + 0.5);
double delta = value - index * ASIN_DELTA;
double result = asinTab[index] + delta * (asinDer1DivF1Tab[index] + delta * (asinDer2DivF2Tab[index] + delta
* (asinDer3DivF3Tab[index] + delta * asinDer4DivF4Tab[index])));
return negateResult ? -result : result;
} else if (value <= ASIN_MAX_VALUE_FOR_POWTABS) {
int index = (int) (FastMath.powFast(value * ASIN_POWTABS_ONE_DIV_MAX_VALUE, ASIN_POWTABS_POWER) * ASIN_POWTABS_SIZE_MINUS_ONE
+ 0.5);
double delta = value - asinParamPowTab[index];
double result = asinPowTab[index] + delta * (asinDer1DivF1PowTab[index] + delta * (asinDer2DivF2PowTab[index] + delta
* (asinDer3DivF3PowTab[index] + delta * asinDer4DivF4PowTab[index])));
return negateResult ? -result : result;
} else { // value > ASIN_MAX_VALUE_FOR_TABS, or value is NaN
// This part is derived from fdlibm.
if (value < 1.0) {
double t = (1.0 - value) * 0.5;
double p = t * (ASIN_PS0 + t * (ASIN_PS1 + t * (ASIN_PS2 + t * (ASIN_PS3 + t * (ASIN_PS4 + t * ASIN_PS5)))));
double q = 1.0 + t * (ASIN_QS1 + t * (ASIN_QS2 + t * (ASIN_QS3 + t * ASIN_QS4)));
double s = Math.sqrt(t);
double z = s + s * (p / q);
double result = ASIN_PIO2_HI - ((z + z) - ASIN_PIO2_LO);
return negateResult ? -result : result;
} else { // value >= 1.0, or value is NaN
if (value == 1.0) {
return negateResult ? -M_HALF_PI : M_HALF_PI;
} else {
return Double.NaN;
}
}
}
}
|
@param value Value in [-1,1].
@return Value arcsine, in radians, in [-PI/2,PI/2].
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java
| 447
|
[
"value"
] | true
| 10
| 8.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
capitalize
|
def capitalize(a):
"""
Return a copy of ``a`` with only the first character of each element
capitalized.
Calls :meth:`str.capitalize` element-wise.
For byte strings, this method is locale-dependent.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array of strings to capitalize.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.capitalize
Examples
--------
>>> import numpy as np
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
array(['a1b2', '1b2a', 'b2a1', '2a1b'],
dtype='|S4')
>>> np.strings.capitalize(c)
array(['A1b2', '1b2a', 'B2a1', '2a1b'],
dtype='|S4')
"""
a_arr = np.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
|
Return a copy of ``a`` with only the first character of each element
capitalized.
Calls :meth:`str.capitalize` element-wise.
For byte strings, this method is locale-dependent.
Parameters
----------
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Input array of strings to capitalize.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.capitalize
Examples
--------
>>> import numpy as np
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
array(['a1b2', '1b2a', 'b2a1', '2a1b'],
dtype='|S4')
>>> np.strings.capitalize(c)
array(['A1b2', '1b2a', 'B2a1', '2a1b'],
dtype='|S4')
|
python
|
numpy/_core/strings.py
| 1,203
|
[
"a"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
subarray
|
public static double[] subarray(final double[] array, int startIndexInclusive, int endIndexExclusive) {
if (array == null) {
return null;
}
startIndexInclusive = max0(startIndexInclusive);
endIndexExclusive = Math.min(endIndexExclusive, array.length);
final int newSize = endIndexExclusive - startIndexInclusive;
if (newSize <= 0) {
return EMPTY_DOUBLE_ARRAY;
}
return arraycopy(array, startIndexInclusive, 0, newSize, double[]::new);
}
|
Produces a new {@code double} array containing the elements between the start and end indices.
<p>
The start index is inclusive, the end index exclusive. Null array input produces null output.
</p>
@param array the input array.
@param startIndexInclusive the starting index. Undervalue (<0) is promoted to 0, overvalue (>array.length) results in an empty array.
@param endIndexExclusive elements up to endIndex-1 are present in the returned subarray. Undervalue (< startIndex) produces empty array, overvalue
(>array.length) is demoted to array length.
@return a new array containing the elements between the start and end indices.
@since 2.1
@see Arrays#copyOfRange(double[], int, int)
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 7,843
|
[
"array",
"startIndexInclusive",
"endIndexExclusive"
] | true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getJSONObject
|
public JSONObject getJSONObject(String name) throws JSONException {
Object object = get(name);
if (object instanceof JSONObject) {
return (JSONObject) object;
}
else {
throw JSON.typeMismatch(name, object, "JSONObject");
}
}
|
Returns the value mapped by {@code name} if it exists and is a {@code
JSONObject}.
@param name the name of the property
@return the value
@throws JSONException if the mapping doesn't exist or is not a {@code
JSONObject}.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 625
|
[
"name"
] |
JSONObject
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
keySetIterator
|
Iterator<K> keySetIterator() {
Map<K, V> delegate = delegateOrNull();
if (delegate != null) {
return delegate.keySet().iterator();
}
return new Itr<K>() {
@Override
@ParametricNullness
K getOutput(int entry) {
return key(entry);
}
};
}
|
Updates the index an iterator is pointing to after a call to remove: returns the index of the
entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the
index that *was* the next entry that would be looked at.
|
java
|
android/guava/src/com/google/common/collect/CompactHashMap.java
| 710
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
intersection
|
public static Pointcut intersection(Pointcut pc1, Pointcut pc2) {
return new ComposablePointcut(pc1).intersection(pc2);
}
|
Match all methods that <b>both</b> the given pointcuts match.
@param pc1 the first Pointcut
@param pc2 the second Pointcut
@return a distinct Pointcut that matches all methods that both
of the given Pointcuts match
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/Pointcuts.java
| 63
|
[
"pc1",
"pc2"
] |
Pointcut
| true
| 1
| 6.96
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
jsonLenient
|
public static Object jsonLenient(Object fieldValue) {
return JsonProcessor.apply(fieldValue, false, false);
}
|
Uses {@link JsonProcessor} to convert a JSON string to a structured JSON
object. This method is a more lenient version of {@link #json(Object)}. For example if given fieldValue "123 foo",
this method will return 123 rather than throwing an IllegalArgumentException.
@param fieldValue JSON string
@return structured JSON object
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java
| 74
|
[
"fieldValue"
] |
Object
| true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
randomNumeric
|
@Deprecated
public static String randomNumeric(final int count) {
return secure().nextNumeric(count);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of numeric characters.
</p>
@param count the length of random string to create.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
@deprecated Use {@link #nextNumeric(int)} from {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 568
|
[
"count"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toArray
|
@Override
@J2ktIncompatible // Incompatible return type change. Use inherited (unoptimized) implementation
public Object[] toArray() {
Object[] copyTo = new Object[size];
arraycopy(queue, 0, copyTo, 0, size);
return copyTo;
}
|
Returns an iterator over the elements contained in this collection, <i>in no particular
order</i>.
<p>The iterator is <i>fail-fast</i>: If the MinMaxPriorityQueue is modified at any time after
the iterator is created, in any way except through the iterator's own remove method, the
iterator will generally throw a {@link ConcurrentModificationException}. Thus, in the face of
concurrent modification, the iterator fails quickly and cleanly, rather than risking arbitrary,
non-deterministic behavior at an undetermined time in the future.
<p>Note that the fail-fast behavior of an iterator cannot be guaranteed as it is, generally
speaking, impossible to make any hard guarantees in the presence of unsynchronized concurrent
modification. Fail-fast iterators throw {@code ConcurrentModificationException} on a
best-effort basis. Therefore, it would be wrong to write a program that depended on this
exception for its correctness: <i>the fail-fast behavior of iterators should be used only to
detect bugs.</i>
@return an iterator over the elements contained in this collection
|
java
|
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
| 912
|
[] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
elementsOf
|
@Contract("_, false, _ -> !null")
private static @Nullable Elements elementsOf(@Nullable CharSequence name, boolean returnNullIfInvalid,
int parserCapacity) {
if (name == null) {
Assert.isTrue(returnNullIfInvalid, "'name' must not be null");
return null;
}
if (name.isEmpty()) {
return Elements.EMPTY;
}
if (name.charAt(0) == '.' || name.charAt(name.length() - 1) == '.') {
if (returnNullIfInvalid) {
return null;
}
throw new InvalidConfigurationPropertyNameException(name, Collections.singletonList('.'));
}
Elements elements = new ElementsParser(name, '.', parserCapacity).parse();
for (int i = 0; i < elements.getSize(); i++) {
if (elements.getType(i) == ElementType.NON_UNIFORM) {
if (returnNullIfInvalid) {
return null;
}
throw new InvalidConfigurationPropertyNameException(name, getInvalidChars(elements, i));
}
}
return elements;
}
|
Return a {@link ConfigurationPropertyName} for the specified string.
@param name the source name
@param returnNullIfInvalid if null should be returned if the name is not valid
@return a {@link ConfigurationPropertyName} instance
@throws InvalidConfigurationPropertyNameException if the name is not valid and
{@code returnNullIfInvalid} is {@code false}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 673
|
[
"name",
"returnNullIfInvalid",
"parserCapacity"
] |
Elements
| true
| 9
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_check_is_size
|
def _check_is_size(i, message=None, *, max=None):
"""Checks that a given integer is a valid size (i.e., is non-negative).
You should use this over ``_check(i >= 0)`` because it can prevent
``GuardOnDataDependentSymNode`` exceptions by opting yourself into alternate
semantics for ``guard_size_oblivious`` tests that treat values 0 and 1
equivalently to all other values.
When max is not None, this specifies an upper bound equivalent to
``_check(i <= max)``. This bound is also subject to alternate semantics:
in ``guard_size_oblivious`` tests, we assume that a constant max bound is
treated equivalently to all other values. Symbolic max bounds are not yet
supported.
NB: Do NOT use this in contexts where a -1 size would be valid (indicating
to infer the size from context, or if you should wrap-around or truncate).
Only use this if the only valid value is an honest to goodness size.
"""
# This is responsible for the expect_true
_check(i >= 0, message)
from torch.fx.experimental.symbolic_shapes import _advise_is_size
_advise_is_size(i)
if max is not None:
_check(i <= max, message)
from torch.fx.experimental.symbolic_shapes import _advise_is_bounded
_advise_is_bounded(i, max)
|
Checks that a given integer is a valid size (i.e., is non-negative).
You should use this over ``_check(i >= 0)`` because it can prevent
``GuardOnDataDependentSymNode`` exceptions by opting yourself into alternate
semantics for ``guard_size_oblivious`` tests that treat values 0 and 1
equivalently to all other values.
When max is not None, this specifies an upper bound equivalent to
``_check(i <= max)``. This bound is also subject to alternate semantics:
in ``guard_size_oblivious`` tests, we assume that a constant max bound is
treated equivalently to all other values. Symbolic max bounds are not yet
supported.
NB: Do NOT use this in contexts where a -1 size would be valid (indicating
to infer the size from context, or if you should wrap-around or truncate).
Only use this if the only valid value is an honest to goodness size.
|
python
|
torch/__init__.py
| 1,748
|
[
"i",
"message",
"max"
] | false
| 2
| 6.08
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
completeLastSent
|
public NetworkClient.InFlightRequest completeLastSent(String node) {
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollFirst();
inFlightRequestCount.decrementAndGet();
return inFlightRequest;
}
|
Complete the last request that was sent to a particular node.
@param node The node the request was sent to
@return The request
|
java
|
clients/src/main/java/org/apache/kafka/clients/InFlightRequests.java
| 84
|
[
"node"
] | true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
sort
|
public static <T> T[] sort(final T[] array, final Comparator<? super T> comparator) {
if (array != null) {
Arrays.sort(array, comparator);
}
return array;
}
|
Sorts the given array into ascending order and returns it.
@param <T> the array type.
@param array the array to sort (may be null).
@param comparator the comparator to determine the order of the array. A {@code null} value uses the elements'
{@link Comparable natural ordering}.
@return the given array.
@see Arrays#sort(Object[])
|
java
|
src/main/java/org/apache/commons/lang3/ArraySorter.java
| 153
|
[
"array",
"comparator"
] | true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
charactersOf
|
public static List<Character> charactersOf(CharSequence sequence) {
return new CharSequenceAsList(checkNotNull(sequence));
}
|
Returns a view of the specified {@code CharSequence} as a {@code List<Character>}, viewing
{@code sequence} as a sequence of Unicode code units. The view does not support any
modification operations, but reflects any changes to the underlying character sequence.
@param sequence the character sequence to view as a {@code List} of characters
@return an {@code List<Character>} view of the character sequence
@since 7.0
|
java
|
android/guava/src/com/google/common/collect/Lists.java
| 746
|
[
"sequence"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
applyWriteLocked
|
public <T> T applyWriteLocked(final FailableFunction<O, T, ?> function) {
return lockApplyUnlock(writeLockSupplier, function);
}
|
Provides write (exclusive) access to The object to protect for the purpose of computing a result object.
More precisely, what the method will do (in the given order):
<ol>
<li>Obtain a read (shared) lock on The object to protect. The current thread may block, until such a
lock is granted.</li>
<li>Invokes the given {@link FailableFunction function}, passing the locked object as the parameter,
receiving the functions result.</li>
<li>Release the lock, as soon as the consumers invocation is done. If the invocation results in an error, the
lock will be released anyways.</li>
<li>Return the result object, that has been received from the functions invocation.</li>
</ol>
@param <T> The result type (both the functions, and this method's.)
@param function The function, which is being invoked to compute the result. The function will receive the
hidden object.
@return The result object, which has been returned by the functions invocation.
@throws IllegalStateException The result object would be, in fact, the hidden object. This would extend
access to the hidden object beyond this methods lifetime and will therefore be prevented.
@see #acceptReadLocked(FailableConsumer)
@see #applyWriteLocked(FailableFunction)
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/locks/LockingVisitors.java
| 397
|
[
"function"
] |
T
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createConverter
|
function createConverter(name, func) {
var realName = mapping.aliasToReal[name] || name,
methodName = mapping.remap[realName] || realName,
oldOptions = options;
return function(options) {
var newUtil = isLib ? pristine : helpers,
newFunc = isLib ? pristine[methodName] : func,
newOptions = assign(assign({}, oldOptions), options);
return baseConvert(newUtil, realName, newFunc, newOptions);
};
}
|
Create a converter function for `func` of `name`.
@param {string} name The name of the function to convert.
@param {Function} func The function to convert.
@returns {Function} Returns the new converter function.
|
javascript
|
fp/_baseConvert.js
| 388
|
[
"name",
"func"
] | false
| 5
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
previousPage
|
public void previousPage() {
if (!isFirstPage()) {
this.page--;
}
}
|
Switch to previous page.
Will stay on first page if already on first page.
|
java
|
spring-beans/src/main/java/org/springframework/beans/support/PagedListHolder.java
| 238
|
[] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
print_default_config
|
def print_default_config(output_format: str) -> None:
"""Print a default configuration template in JSON or YAML format.
Args:
output_format: Either "json" or "yaml"
"""
if output_format == "json":
print(json.dumps(default_config, indent=2))
else: # yaml
for key, value in default_config.items():
if value is None:
print(f"{key}: null")
elif isinstance(value, bool):
print(f"{key}: {str(value).lower()}")
elif isinstance(value, str):
print(f'{key}: "{value}"')
elif isinstance(value, list):
print(f"{key}: {json.dumps(value)}")
else:
print(f"{key}: {value}")
|
Print a default configuration template in JSON or YAML format.
Args:
output_format: Either "json" or "yaml"
|
python
|
benchmarks/transformer/config_utils.py
| 138
|
[
"output_format"
] |
None
| true
| 9
| 6.56
|
pytorch/pytorch
| 96,034
|
google
| false
|
isIncluded
|
private boolean isIncluded(EndpointId endpointId) {
if (this.include.isEmpty()) {
return this.defaultIncludes.matches(endpointId);
}
return this.include.matches(endpointId);
}
|
Return {@code true} if the filter matches.
@param endpointId the endpoint ID to check
@return {@code true} if the filter matches
@since 2.6.0
|
java
|
module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/endpoint/expose/IncludeExcludeEndpointFilter.java
| 129
|
[
"endpointId"
] | true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
sizeOf
|
public static int sizeOf(short version,
Iterator<Map.Entry<TopicIdPartition,
FetchResponseData.PartitionData>> partIterator) {
// Since the throttleTimeMs and metadata field sizes are constant and fixed, we can
// use arbitrary values here without affecting the result.
FetchResponseData data = toMessage(Errors.NONE, 0, INVALID_SESSION_ID, partIterator, Collections.emptyList());
ObjectSerializationCache cache = new ObjectSerializationCache();
return 4 + data.size(cache, version);
}
|
Convenience method to find the size of a response.
@param version The version of the response to use.
@param partIterator The partition iterator.
@return The response size in bytes.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java
| 164
|
[
"version",
"partIterator"
] | true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
l1_min_c
|
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
"""Return the lowest bound for `C`.
The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with `l1_ratio=1`.
This value is valid if `class_weight` parameter in `fit()` is not set.
For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
"""
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
|
Return the lowest bound for `C`.
The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with `l1_ratio=1`.
This value is valid if `class_weight` parameter in `fit()` is not set.
For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
|
python
|
sklearn/svm/_bounds.py
| 26
|
[
"X",
"y",
"loss",
"fit_intercept",
"intercept_scaling"
] | false
| 5
| 7.28
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
is_due
|
def is_due(self, last_run_at: datetime) -> tuple[bool, datetime]:
"""Return tuple of ``(is_due, next_time_to_run)``.
If :setting:`beat_cron_starting_deadline` has been specified, the
scheduler will make sure that the `last_run_at` time is within the
deadline. This prevents tasks that could have been run according to
the crontab, but didn't, from running again unexpectedly.
Note:
Next time to run is in seconds.
SeeAlso:
:meth:`celery.schedules.schedule.is_due` for more information.
"""
rem_delta = self.remaining_estimate(last_run_at)
rem_secs = rem_delta.total_seconds()
rem = max(rem_secs, 0)
due = rem == 0
deadline_secs = self.app.conf.beat_cron_starting_deadline
has_passed_deadline = False
if deadline_secs is not None:
# Make sure we're looking at the latest possible feasible run
# date when checking the deadline.
last_date_checked = last_run_at
last_feasible_rem_secs = rem_secs
while rem_secs < 0:
last_date_checked = last_date_checked + abs(rem_delta)
rem_delta = self.remaining_estimate(last_date_checked)
rem_secs = rem_delta.total_seconds()
if rem_secs < 0:
last_feasible_rem_secs = rem_secs
# if rem_secs becomes 0 or positive, second-to-last
# last_date_checked must be the last feasible run date.
# Check if the last feasible date is within the deadline
# for running
has_passed_deadline = -last_feasible_rem_secs > deadline_secs
if has_passed_deadline:
# Should not be due if we've passed the deadline for looking
# at past runs
due = False
if due or has_passed_deadline:
rem_delta = self.remaining_estimate(self.now())
rem = max(rem_delta.total_seconds(), 0)
return schedstate(due, rem)
|
Return tuple of ``(is_due, next_time_to_run)``.
If :setting:`beat_cron_starting_deadline` has been specified, the
scheduler will make sure that the `last_run_at` time is within the
deadline. This prevents tasks that could have been run according to
the crontab, but didn't, from running again unexpectedly.
Note:
Next time to run is in seconds.
SeeAlso:
:meth:`celery.schedules.schedule.is_due` for more information.
|
python
|
celery/schedules.py
| 641
|
[
"self",
"last_run_at"
] |
tuple[bool, datetime]
| true
| 7
| 6.56
|
celery/celery
| 27,741
|
unknown
| false
|
isAllUpperCase
|
public static boolean isAllUpperCase(final CharSequence cs) {
if (isEmpty(cs)) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
if (!Character.isUpperCase(cs.charAt(i))) {
return false;
}
}
return true;
}
|
Tests if the CharSequence contains only uppercase characters.
<p>{@code null} will return {@code false}.
An empty String (length()=0) will return {@code false}.</p>
<pre>
StringUtils.isAllUpperCase(null) = false
StringUtils.isAllUpperCase("") = false
StringUtils.isAllUpperCase(" ") = false
StringUtils.isAllUpperCase("ABC") = true
StringUtils.isAllUpperCase("aBC") = false
StringUtils.isAllUpperCase("A C") = false
StringUtils.isAllUpperCase("A1C") = false
StringUtils.isAllUpperCase("A/C") = false
</pre>
@param cs the CharSequence to check, may be null.
@return {@code true} if only contains uppercase characters, and is non-null.
@since 2.5
@since 3.0 Changed signature from isAllUpperCase(String) to isAllUpperCase(CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,230
|
[
"cs"
] | true
| 4
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
unmodifiableSortedSetMultimap
|
public static <K extends @Nullable Object, V extends @Nullable Object>
SortedSetMultimap<K, V> unmodifiableSortedSetMultimap(SortedSetMultimap<K, V> delegate) {
if (delegate instanceof UnmodifiableSortedSetMultimap) {
return delegate;
}
return new UnmodifiableSortedSetMultimap<>(delegate);
}
|
Returns an unmodifiable view of the specified {@code SortedSetMultimap}. Query operations on
the returned multimap "read through" to the specified multimap, and attempts to modify the
returned multimap, either directly or through the multimap's views, result in an {@code
UnsupportedOperationException}.
<p>The returned multimap will be serializable if the specified multimap is serializable.
@param delegate the multimap for which an unmodifiable view is to be returned
@return an unmodifiable view of the specified multimap
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 967
|
[
"delegate"
] | true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
shouldGenerateId
|
protected boolean shouldGenerateId() {
return false;
}
|
Should an ID be generated instead of read from the passed in {@link Element}?
<p>Disabled by default; subclasses can override this to enable ID generation.
Note that this flag is about <i>always</i> generating an ID; the parser
won't even check for an "id" attribute in this case.
@return whether the parser should always generate an id
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/AbstractBeanDefinitionParser.java
| 162
|
[] | true
| 1
| 6.96
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
readResolve
|
Object readResolve() {
return isEmpty() ? EMPTY : this;
}
|
Returns an immutable array containing the same values as {@code this} array. This is logically
a no-op, and in some circumstances {@code this} itself is returned. However, if this instance
is a {@link #subArray} view of a larger array, this method will copy only the appropriate range
of values, resulting in an equivalent array with a smaller memory footprint.
|
java
|
android/guava/src/com/google/common/primitives/ImmutableDoubleArray.java
| 654
|
[] |
Object
| true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
linspace
|
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
|
Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
|
python
|
numpy/polynomial/_polybase.py
| 915
|
[
"self",
"n",
"domain"
] | false
| 2
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
createCloudinaryUrl
|
function createCloudinaryUrl(path: string, config: ImageLoaderConfig) {
// Cloudinary image URLformat:
// https://cloudinary.com/documentation/image_transformations#transformation_url_structure
// Example of a Cloudinary image URL:
// https://res.cloudinary.com/mysite/image/upload/c_scale,f_auto,q_auto,w_600/marketing/tile-topics-m.png
// For a placeholder image, we use the lowest image setting available to reduce the load time
// else we use the auto size
const quality = config.isPlaceholder ? 'q_auto:low' : 'q_auto';
let params = `f_auto,${quality}`;
if (config.width) {
params += `,w_${config.width}`;
}
if (config.loaderParams?.['rounded']) {
params += `,r_max`;
}
return `${path}/image/upload/${params}/${config.src}`;
}
|
Function that generates an ImageLoader for Cloudinary and turns it into an Angular provider.
@param path Base URL of your Cloudinary images
This URL should match one of the following formats:
https://res.cloudinary.com/mysite
https://mysite.cloudinary.com
https://subdomain.mysite.com
@returns Set of providers to configure the Cloudinary loader.
@publicApi
|
typescript
|
packages/common/src/directives/ng_optimized_image/image_loaders/cloudinary_loader.ts
| 51
|
[
"path",
"config"
] | false
| 4
| 6.8
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
setForceMergeCompletedTimestamp
|
private void setForceMergeCompletedTimestamp(ProjectId projectId, String targetIndex, ActionListener<Void> listener) {
forceMergeClusterStateUpdateTaskQueue.submitTask(
Strings.format("Adding force merge complete marker to cluster state for [%s]", targetIndex),
new UpdateForceMergeCompleteTask(listener, projectId, targetIndex, threadPool),
null
);
}
|
This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
it has sent delete requests for.
@param project The project metadata from which to get index metadata
@param dataStream The data stream
@param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
@return The set of indices that delete requests have been sent for
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,376
|
[
"projectId",
"targetIndex",
"listener"
] |
void
| true
| 1
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
get_s3_bucket_key
|
def get_s3_bucket_key(
bucket: str | None, key: str, bucket_param_name: str, key_param_name: str
) -> tuple[str, str]:
"""
Get the S3 bucket name and key.
From either:
- bucket name and key. Return the info as it is after checking `key` is a relative path.
- key. Must be a full s3:// url.
:param bucket: The S3 bucket name
:param key: The S3 key
:param bucket_param_name: The parameter name containing the bucket name
:param key_param_name: The parameter name containing the key name
:return: the parsed bucket name and key
"""
if bucket is None:
return S3Hook.parse_s3_url(key)
parsed_url = urlsplit(key)
if parsed_url.scheme != "" or parsed_url.netloc != "":
raise TypeError(
f"If `{bucket_param_name}` is provided, {key_param_name} should be a relative path "
"from root level, rather than a full s3:// url"
)
return bucket, key
|
Get the S3 bucket name and key.
From either:
- bucket name and key. Return the info as it is after checking `key` is a relative path.
- key. Must be a full s3:// url.
:param bucket: The S3 bucket name
:param key: The S3 key
:param bucket_param_name: The parameter name containing the bucket name
:param key_param_name: The parameter name containing the key name
:return: the parsed bucket name and key
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 268
|
[
"bucket",
"key",
"bucket_param_name",
"key_param_name"
] |
tuple[str, str]
| true
| 4
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
getAllSuperclassesAndInterfaces
|
private static List<Class<?>> getAllSuperclassesAndInterfaces(final Class<?> cls) {
if (cls == null) {
return null;
}
final List<Class<?>> allSuperClassesAndInterfaces = new ArrayList<>();
final List<Class<?>> allSuperclasses = ClassUtils.getAllSuperclasses(cls);
int superClassIndex = 0;
final List<Class<?>> allInterfaces = ClassUtils.getAllInterfaces(cls);
int interfaceIndex = 0;
while (interfaceIndex < allInterfaces.size() || superClassIndex < allSuperclasses.size()) {
final Class<?> acls;
if (interfaceIndex >= allInterfaces.size() || superClassIndex < allSuperclasses.size() && superClassIndex < interfaceIndex) {
acls = allSuperclasses.get(superClassIndex++);
} else {
acls = allInterfaces.get(interfaceIndex++);
}
allSuperClassesAndInterfaces.add(acls);
}
return allSuperClassesAndInterfaces;
}
|
Gets a combination of {@link ClassUtils#getAllSuperclasses(Class)} and {@link ClassUtils#getAllInterfaces(Class)}, one from superclasses, one from
interfaces, and so on in a breadth first way.
@param cls the class to look up, may be {@code null}.
@return the combined {@link List} of superclasses and interfaces in order going up from this one {@code null} if null input.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 223
|
[
"cls"
] | true
| 7
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
asSet
|
static @Nullable Set<String> asSet(String @Nullable [] array) {
return (array != null) ? Collections.unmodifiableSet(new LinkedHashSet<>(Arrays.asList(array))) : null;
}
|
Helper method that provides a null-safe way to convert a {@code String[]} to a
{@link Collection} for client libraries to use.
@param array the array to convert
@return a collection or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslOptions.java
| 115
|
[
"array"
] | true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isAssignableFrom
|
public static void isAssignableFrom(final Class<?> superType, final Class<?> type, final String message, final Object... values) {
// TODO when breaking BC, consider returning type
if (!superType.isAssignableFrom(type)) {
throw new IllegalArgumentException(getMessage(message, values));
}
}
|
Validates that the argument can be converted to the specified class, if not throws an exception.
<p>This method is useful when validating if there will be no casting errors.</p>
<pre>Validate.isAssignableFrom(SuperClass.class, object.getClass());</pre>
<p>The message of the exception is "The validated object cannot be converted to the"
followed by the name of the class and "class"</p>
@param superType the class must be validated against, not null.
@param type the class to check, not null.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@throws IllegalArgumentException if argument cannot be converted to the specified class.
@see #isAssignableFrom(Class, Class)
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 424
|
[
"superType",
"type",
"message"
] |
void
| true
| 2
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
remove
|
void remove(JarFile jarFile) {
synchronized (this) {
URL removedUrl = this.jarFileToJarFileUrl.remove(jarFile);
if (removedUrl != null) {
this.jarFileUrlToJarFile.remove(new JarFileUrlKey(removedUrl));
}
}
}
|
Remove the given jar and any related URL file from the cache.
@param jarFile the jar file to remove
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 199
|
[
"jarFile"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
adaptArgumentsIfNecessary
|
static @Nullable Object[] adaptArgumentsIfNecessary(Method method, @Nullable Object[] arguments) {
if (ObjectUtils.isEmpty(arguments)) {
return new Object[0];
}
if (method.isVarArgs() && (method.getParameterCount() == arguments.length)) {
Class<?>[] paramTypes = method.getParameterTypes();
int varargIndex = paramTypes.length - 1;
Class<?> varargType = paramTypes[varargIndex];
if (varargType.isArray()) {
Object varargArray = arguments[varargIndex];
if (varargArray instanceof Object[] && !varargType.isInstance(varargArray)) {
Object[] newArguments = new Object[arguments.length];
System.arraycopy(arguments, 0, newArguments, 0, varargIndex);
Class<?> targetElementType = varargType.componentType();
int varargLength = Array.getLength(varargArray);
Object newVarargArray = Array.newInstance(targetElementType, varargLength);
System.arraycopy(varargArray, 0, newVarargArray, 0, varargLength);
newArguments[varargIndex] = newVarargArray;
return newArguments;
}
}
}
return arguments;
}
|
Adapt the given arguments to the target signature in the given method,
if necessary: in particular, if a given vararg argument array does not
match the array type of the declared vararg parameter in the method.
@param method the target method
@param arguments the given arguments
@return a cloned argument array, or the original if no adaptation is needed
@since 4.2.3
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AopProxyUtils.java
| 256
|
[
"method",
"arguments"
] | true
| 7
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
translate
|
public abstract int translate(CharSequence input, int index, Writer out) throws IOException;
|
Translate a set of code points, represented by an int index into a CharSequence,
into another set of code points. The number of code points consumed must be returned,
and the only IOExceptions thrown must be from interacting with the Writer so that
the top level API may reliably ignore StringWriter IOExceptions.
@param input CharSequence that is being translated.
@param index int representing the current point of translation.
@param out Writer to translate the text to.
@return int count of code points consumed.
@throws IOException if and only if the Writer produces an IOException.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/CharSequenceTranslator.java
| 93
|
[
"input",
"index",
"out"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
min
|
def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue):
"""
Return the minimum along a given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : scalar or None, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
ma.minimum_fill_value
Returns the minimum filling value for a given datatype.
Examples
--------
>>> import numpy.ma as ma
>>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]]
>>> mask = [[1, 1, 0], [0, 0, 1]]
>>> masked_x = ma.masked_array(x, mask)
>>> masked_x
masked_array(
data=[[--, --, 3.0],
[0.2, -0.7, --]],
mask=[[ True, True, False],
[False, False, True]],
fill_value=1e+20)
>>> ma.min(masked_x)
-0.7
>>> ma.min(masked_x, axis=-1)
masked_array(data=[3.0, -0.7],
mask=[False, False],
fill_value=1e+20)
>>> ma.min(masked_x, axis=0, keepdims=True)
masked_array(data=[[0.2, -0.7, 3.0]],
mask=[[False, False, False]],
fill_value=1e+20)
>>> mask = [[1, 1, 1,], [1, 1, 1]]
>>> masked_x = ma.masked_array(x, mask)
>>> ma.min(masked_x, axis=0)
masked_array(data=[--, --, --],
mask=[ True, True, True],
fill_value=1e+20,
dtype=float64)
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
_mask = self._mask
newmask = _check_mask_axis(_mask, axis, **kwargs)
if fill_value is None:
fill_value = minimum_fill_value(self)
# No explicit output
if out is None:
result = self.filled(fill_value).min(
axis=axis, out=out, **kwargs).view(type(self))
if result.ndim:
# Set the mask
result.__setmask__(newmask)
# Get rid of Infs
if newmask.ndim:
np.copyto(result, result.fill_value, where=newmask)
elif newmask:
result = masked
return result
# Explicit output
self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
if out.dtype.kind in 'biu':
errmsg = "Masked data information would be lost in one or more"\
" location."
raise MaskError(errmsg)
np.copyto(out, np.nan, where=newmask)
return out
|
Return the minimum along a given axis.
Parameters
----------
axis : None or int or tuple of ints, optional
Axis along which to operate. By default, ``axis`` is None and the
flattened input is used.
If this is a tuple of ints, the minimum is selected over multiple
axes, instead of a single axis or all the axes as before.
out : array_like, optional
Alternative output array in which to place the result. Must be of
the same shape and buffer length as the expected output.
fill_value : scalar or None, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the array.
Returns
-------
amin : array_like
New array holding the result.
If ``out`` was specified, ``out`` is returned.
See Also
--------
ma.minimum_fill_value
Returns the minimum filling value for a given datatype.
Examples
--------
>>> import numpy.ma as ma
>>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]]
>>> mask = [[1, 1, 0], [0, 0, 1]]
>>> masked_x = ma.masked_array(x, mask)
>>> masked_x
masked_array(
data=[[--, --, 3.0],
[0.2, -0.7, --]],
mask=[[ True, True, False],
[False, False, True]],
fill_value=1e+20)
>>> ma.min(masked_x)
-0.7
>>> ma.min(masked_x, axis=-1)
masked_array(data=[3.0, -0.7],
mask=[False, False],
fill_value=1e+20)
>>> ma.min(masked_x, axis=0, keepdims=True)
masked_array(data=[[0.2, -0.7, 3.0]],
mask=[[False, False, False]],
fill_value=1e+20)
>>> mask = [[1, 1, 1,], [1, 1, 1]]
>>> masked_x = ma.masked_array(x, mask)
>>> ma.min(masked_x, axis=0)
masked_array(data=[--, --, --],
mask=[ True, True, True],
fill_value=1e+20,
dtype=float64)
|
python
|
numpy/ma/core.py
| 5,859
|
[
"self",
"axis",
"out",
"fill_value",
"keepdims"
] | false
| 11
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getTags
|
public static Map<String, String> getTags(String... keyValue) {
if ((keyValue.length % 2) != 0)
throw new IllegalArgumentException("keyValue needs to be specified in pairs");
Map<String, String> tags = new LinkedHashMap<>(keyValue.length / 2);
for (int i = 0; i < keyValue.length; i += 2)
tags.put(keyValue[i], keyValue[i + 1]);
return tags;
}
|
Create a set of tags using the supplied key and value pairs. The order of the tags will be kept.
@param keyValue the key and value pairs for the tags; must be an even number
@return the map of tags that can be supplied to the {@link Metrics} methods; never null
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/internals/MetricsUtils.java
| 57
|
[] | true
| 3
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
reportFailure
|
private void reportFailure(Collection<SpringBootExceptionReporter> exceptionReporters, Throwable failure) {
try {
for (SpringBootExceptionReporter reporter : exceptionReporters) {
if (reporter.reportException(failure)) {
registerLoggedException(failure);
return;
}
}
}
catch (Throwable ex) {
// Continue with normal handling of the original failure
}
if (logger.isErrorEnabled()) {
if (NativeDetector.inNativeImage()) {
// Depending on how early the failure was, logging may not work in a
// native image so we output the stack trace directly to System.out
// instead.
System.out.println("Application run failed");
failure.printStackTrace(System.out);
}
else {
logger.error("Application run failed", failure);
}
registerLoggedException(failure);
}
}
|
Called after the context has been refreshed.
@param context the application context
@param args the application arguments
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 842
|
[
"exceptionReporters",
"failure"
] |
void
| true
| 5
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
future
|
abstract CompletableFuture<?> future();
|
@return Future that will complete with the request response or failure.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 908
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createKeyStore
|
private static @Nullable KeyStore createKeyStore(String name, @Nullable PemSslStore pemSslStore) {
if (pemSslStore == null) {
return null;
}
try {
List<X509Certificate> certificates = pemSslStore.certificates();
Assert.state(!ObjectUtils.isEmpty(certificates), "Certificates must not be empty");
String alias = getAlias(pemSslStore);
KeyStore store = createKeyStore(pemSslStore.type());
PrivateKey privateKey = pemSslStore.privateKey();
if (privateKey != null) {
addPrivateKey(store, privateKey, alias, pemSslStore.password(), certificates);
}
else {
addCertificates(store, certificates, alias);
}
return store;
}
catch (Exception ex) {
throw new IllegalStateException("Unable to create %s store: %s".formatted(name, ex.getMessage()), ex);
}
}
|
Create a new {@link PemSslStoreBundle} instance.
@param pemKeyStore the PEM key store
@param pemTrustStore the PEM trust store
@since 3.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStoreBundle.java
| 89
|
[
"name",
"pemSslStore"
] |
KeyStore
| true
| 4
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
nth
|
function nth(array, n) {
return (array && array.length) ? baseNth(array, toInteger(n)) : undefined;
}
|
Gets the element at index `n` of `array`. If `n` is negative, the nth
element from the end is returned.
@static
@memberOf _
@since 4.11.0
@category Array
@param {Array} array The array to query.
@param {number} [n=0] The index of the element to return.
@returns {*} Returns the nth element of `array`.
@example
var array = ['a', 'b', 'c', 'd'];
_.nth(array, 1);
// => 'b'
_.nth(array, -2);
// => 'c';
|
javascript
|
lodash.js
| 7,774
|
[
"array",
"n"
] | false
| 3
| 7.6
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
readBytes
|
@CanIgnoreReturnValue // some processors won't return a useful result
@ParametricNullness
@J2ktIncompatible
public static <T extends @Nullable Object> T readBytes(
InputStream input, ByteProcessor<T> processor) throws IOException {
checkNotNull(input);
checkNotNull(processor);
byte[] buf = createBuffer();
int read;
do {
read = input.read(buf);
} while (read != -1 && processor.processBytes(buf, 0, read));
return processor.getResult();
}
|
Process the bytes of the given input stream using the given processor.
@param input the input stream to process
@param processor the object to which to pass the bytes of the stream
@return the result of the byte processor
@throws IOException if an I/O error occurs
@since 14.0
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 895
|
[
"input",
"processor"
] |
T
| true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
getAll
|
private static Map<String, ConfigurationPropertiesBean> getAll(ConfigurableApplicationContext applicationContext) {
Map<String, ConfigurationPropertiesBean> propertiesBeans = new LinkedHashMap<>();
ConfigurableListableBeanFactory beanFactory = applicationContext.getBeanFactory();
Iterator<String> beanNames = beanFactory.getBeanNamesIterator();
while (beanNames.hasNext()) {
String beanName = beanNames.next();
if (isConfigurationPropertiesBean(beanFactory, beanName)) {
try {
Object bean = beanFactory.getBean(beanName);
ConfigurationPropertiesBean propertiesBean = get(applicationContext, bean, beanName);
if (propertiesBean != null) {
propertiesBeans.put(beanName, propertiesBean);
}
}
catch (Exception ex) {
// Ignore
}
}
}
return propertiesBeans;
}
|
Return all {@link ConfigurationProperties @ConfigurationProperties} beans contained
in the given application context. Both directly annotated beans, as well as beans
that have {@link ConfigurationProperties @ConfigurationProperties} annotated
factory methods are included.
@param applicationContext the source application context
@return a map of all configuration properties beans keyed by the bean name
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 152
|
[
"applicationContext"
] | true
| 5
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(@Nullable Object obj) {
if (obj instanceof LocationInfo) {
LocationInfo that = (LocationInfo) obj;
return home.equals(that.home) && classloader.equals(that.classloader);
}
return false;
}
|
Recursively scan the given directory, adding resources for each file encountered. Symlinks
which have already been traversed in the current tree path will be skipped to eliminate
cycles; otherwise symlinks are traversed.
@param directory the root of the directory to scan
@param packagePrefix resource path prefix inside {@code classloader} for any files found
under {@code directory}
@param currentPath canonical files already visited in the current directory tree path, for
cycle elimination
|
java
|
android/guava/src/com/google/common/reflect/ClassPath.java
| 550
|
[
"obj"
] | true
| 3
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
serializeUnionOrIntersectionConstituents
|
function serializeUnionOrIntersectionConstituents(types: readonly TypeNode[], isIntersection: boolean): SerializedTypeNode {
// Note when updating logic here also update `getEntityNameForDecoratorMetadata` in checker.ts so that aliases can be marked as referenced
let serializedType: SerializedTypeNode | undefined;
for (let typeNode of types) {
typeNode = skipTypeParentheses(typeNode);
if (typeNode.kind === SyntaxKind.NeverKeyword) {
if (isIntersection) return factory.createVoidZero(); // Reduce to `never` in an intersection
continue; // Elide `never` in a union
}
if (typeNode.kind === SyntaxKind.UnknownKeyword) {
if (!isIntersection) return factory.createIdentifier("Object"); // Reduce to `unknown` in a union
continue; // Elide `unknown` in an intersection
}
if (typeNode.kind === SyntaxKind.AnyKeyword) {
return factory.createIdentifier("Object"); // Reduce to `any` in a union or intersection
}
if (!strictNullChecks && ((isLiteralTypeNode(typeNode) && typeNode.literal.kind === SyntaxKind.NullKeyword) || typeNode.kind === SyntaxKind.UndefinedKeyword)) {
continue; // Elide null and undefined from unions for metadata, just like what we did prior to the implementation of strict null checks
}
const serializedConstituent = serializeTypeNode(typeNode);
if (isIdentifier(serializedConstituent) && serializedConstituent.escapedText === "Object") {
// One of the individual is global object, return immediately
return serializedConstituent;
}
// If there exists union that is not `void 0` expression, check if the the common type is identifier.
// anything more complex and we will just default to Object
if (serializedType) {
// Different types
if (!equateSerializedTypeNodes(serializedType, serializedConstituent)) {
return factory.createIdentifier("Object");
}
}
else {
// Initialize the union type
serializedType = serializedConstituent;
}
}
// If we were able to find common type, use it
return serializedType ?? (factory.createVoidZero()); // Fallback is only hit if all union constituents are null/undefined/never
}
|
Serializes a type node for use with decorator type metadata.
Types are serialized in the following fashion:
- Void types point to "undefined" (e.g. "void 0")
- Function and Constructor types point to the global "Function" constructor.
- Interface types with a call or construct signature types point to the global
"Function" constructor.
- Array and Tuple types point to the global "Array" constructor.
- Type predicates and booleans point to the global "Boolean" constructor.
- String literal types and strings point to the global "String" constructor.
- Enum and number types point to the global "Number" constructor.
- Symbol types point to the global "Symbol" constructor.
- Type references to classes (or class-like variables) point to the constructor for the class.
- Anything else points to the global "Object" constructor.
@param node The type node to serialize.
|
typescript
|
src/compiler/transformers/typeSerializer.ts
| 402
|
[
"types",
"isIntersection"
] | true
| 15
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
clear
|
@Override
public void clear() {
for (int i = 0; i < size; i++) {
queue[i] = null;
}
size = 0;
}
|
Returns an iterator over the elements contained in this collection, <i>in no particular
order</i>.
<p>The iterator is <i>fail-fast</i>: If the MinMaxPriorityQueue is modified at any time after
the iterator is created, in any way except through the iterator's own remove method, the
iterator will generally throw a {@link ConcurrentModificationException}. Thus, in the face of
concurrent modification, the iterator fails quickly and cleanly, rather than risking arbitrary,
non-deterministic behavior at an undetermined time in the future.
<p>Note that the fail-fast behavior of an iterator cannot be guaranteed as it is, generally
speaking, impossible to make any hard guarantees in the presence of unsynchronized concurrent
modification. Fail-fast iterators throw {@code ConcurrentModificationException} on a
best-effort basis. Therefore, it would be wrong to write a program that depended on this
exception for its correctness: <i>the fail-fast behavior of iterators should be used only to
detect bugs.</i>
@return an iterator over the elements contained in this collection
|
java
|
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
| 904
|
[] |
void
| true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
byteToHex
|
public static String byteToHex(final byte src, final int srcPos, final String dstInit, final int dstPos, final int nHexs) {
if (0 == nHexs) {
return dstInit;
}
if ((nHexs - 1) * 4 + srcPos >= Byte.SIZE) {
throw new IllegalArgumentException("(nHexs - 1) * 4 + srcPos >= 8");
}
final StringBuilder sb = new StringBuilder(dstInit);
int append = sb.length();
for (int i = 0; i < nHexs; i++) {
final int shift = i * 4 + srcPos;
final int bits = 0xF & src >> shift;
if (dstPos + i == append) {
++append;
sb.append(intToHexDigit(bits));
} else {
sb.setCharAt(dstPos + i, intToHexDigit(bits));
}
}
return sb.toString();
}
|
Converts a byte into an array of char using the default (little-endian, LSB0) byte and bit ordering.
@param src the byte to convert.
@param srcPos the position in {@code src}, in bits, from where to start the conversion.
@param dstInit the initial value for the result String.
@param dstPos the position in {@code dst} where to copy the result.
@param nHexs the number of chars to copy to {@code dst}, must be smaller or equal to the width of the input (from srcPos to MSB).
@return {@code dst}.
@throws IllegalArgumentException if {@code (nHexs - 1) * 4 + srcPos >= 8}.
@throws StringIndexOutOfBoundsException if {@code dst.init.length() < dstPos}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 524
|
[
"src",
"srcPos",
"dstInit",
"dstPos",
"nHexs"
] |
String
| true
| 5
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
download_file_from_github
|
def download_file_from_github(
reference: str, path: str, output_file: Path, github_token: str | None = None, timeout: int = 60
) -> bool:
"""
Downloads a file from the GitHub repository of Apache Airflow using the GitHub API.
In case of any error different from 404, it will exit the process with error code 1.
:param reference: tag to download from
:param path: path of the file relative to the repository root
:param output_file: Path where the file should be downloaded
:param github_token: GitHub token to use for authentication
:param timeout: timeout in seconds for the download request, default is 60 seconds
:return: whether the file was successfully downloaded (False if the file is missing)
"""
import requests
url = f"https://api.github.com/repos/apache/airflow/contents/{path}?ref={reference}"
get_console().print(f"[info]Downloading {url} to {output_file}")
if not get_dry_run():
headers = {"Accept": "application/vnd.github.v3.raw"}
if github_token:
headers["Authorization"] = f"Bearer {github_token}"
headers["X-GitHub-Api-Version"] = "2022-11-28"
try:
response = requests.get(url, headers=headers, timeout=timeout)
log_github_rate_limit_error(response)
if response.status_code == 403:
get_console().print(
f"[error]Access denied to {url}. This may be caused by:\n"
f" 1. Network issues or VPN settings\n"
f" 2. GitHub API rate limiting\n"
f" 3. Invalid or missing GitHub token"
)
sys.exit(1)
if response.status_code == 404:
get_console().print(f"[warning]The {url} has not been found. Skipping")
return False
if response.status_code != 200:
get_console().print(
f"[error]{url} could not be downloaded. Status code {response.status_code}"
)
sys.exit(1)
output_file.write_bytes(response.content)
except requests.Timeout:
get_console().print(f"[error]The request to {url} timed out after {timeout} seconds.")
sys.exit(1)
get_console().print(f"[success]Downloaded {url} to {output_file}")
return True
|
Downloads a file from the GitHub repository of Apache Airflow using the GitHub API.
In case of any error different from 404, it will exit the process with error code 1.
:param reference: tag to download from
:param path: path of the file relative to the repository root
:param output_file: Path where the file should be downloaded
:param github_token: GitHub token to use for authentication
:param timeout: timeout in seconds for the download request, default is 60 seconds
:return: whether the file was successfully downloaded (False if the file is missing)
|
python
|
dev/breeze/src/airflow_breeze/utils/github.py
| 85
|
[
"reference",
"path",
"output_file",
"github_token",
"timeout"
] |
bool
| true
| 6
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
clear_orphaned_import_errors
|
def clear_orphaned_import_errors(
self, bundle_name: str, observed_filelocs: set[str], session: Session = NEW_SESSION
):
"""
Clear import errors for files that no longer exist.
:param session: session for ORM operations
"""
self.log.debug("Removing old import errors")
try:
errors = session.scalars(
select(ParseImportError)
.where(ParseImportError.bundle_name == bundle_name)
.options(load_only(ParseImportError.filename))
)
for error in errors:
if error.filename not in observed_filelocs:
session.delete(error)
except Exception:
self.log.exception("Error removing old import errors")
|
Clear import errors for files that no longer exist.
:param session: session for ORM operations
|
python
|
airflow-core/src/airflow/dag_processing/manager.py
| 666
|
[
"self",
"bundle_name",
"observed_filelocs",
"session"
] | true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
refresh_from_task
|
def refresh_from_task(self, task: Operator, pool_override: str | None = None) -> None:
"""
Copy common attributes from the given task.
:param task: The task object to copy from
:param pool_override: Use the pool_override instead of task's pool
"""
self.task = task
self.queue = task.queue
self.pool = pool_override or task.pool
self.pool_slots = task.pool_slots
with contextlib.suppress(Exception):
# This method is called from the different places, and sometimes the TI is not fully initialized
self.priority_weight = self.task.weight_rule.get_weight(self)
self.run_as_user = task.run_as_user
# Do not set max_tries to task.retries here because max_tries is a cumulative
# value that needs to be stored in the db.
self.executor = task.executor
self.executor_config = task.executor_config
self.operator = task.task_type
op_name = getattr(task, "operator_name", None)
self.custom_operator_name = op_name if isinstance(op_name, str) else ""
# Re-apply cluster policy here so that task default do not overload previous data
task_instance_mutation_hook(self)
|
Copy common attributes from the given task.
:param task: The task object to copy from
:param pool_override: Use the pool_override instead of task's pool
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 727
|
[
"self",
"task",
"pool_override"
] |
None
| true
| 3
| 7.2
|
apache/airflow
| 43,597
|
sphinx
| false
|
hexRingPosToH3
|
public static String hexRingPosToH3(String h3Address, int ringPos) {
return h3ToString(hexRingPosToH3(stringToH3(h3Address), ringPos));
}
|
Returns the neighbor index at the given position.
@param h3Address Origin index
@param ringPos position of the neighbour index
@return the actual neighbour at the given position
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 416
|
[
"h3Address",
"ringPos"
] |
String
| true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
topicIdValues
|
public Map<Uuid, KafkaFuture<Void>> topicIdValues() {
return topicIdFutures;
}
|
Use when {@link Admin#deleteTopics(TopicCollection, DeleteTopicsOptions)} used a TopicIdCollection
@return a map from topic IDs to futures which can be used to check the status of
individual deletions if the deleteTopics request used topic IDs. Otherwise return null.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java
| 56
|
[] | true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getAspectClassLoader
|
@Override
public @Nullable ClassLoader getAspectClassLoader() {
return (this.beanFactory instanceof ConfigurableBeanFactory cbf ?
cbf.getBeanClassLoader() : ClassUtils.getDefaultClassLoader());
}
|
Create a BeanFactoryAspectInstanceFactory, providing a type that AspectJ should
introspect to create AJType metadata. Use if the BeanFactory may consider the type
to be a subclass (as when using CGLIB), and the information should relate to a superclass.
@param beanFactory the BeanFactory to obtain instance(s) from
@param name the name of the bean
@param type the type that should be introspected by AspectJ
({@code null} indicates resolution through {@link BeanFactory#getType} via the bean name)
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/BeanFactoryAspectInstanceFactory.java
| 95
|
[] |
ClassLoader
| true
| 2
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
visitParenthesizedExpression
|
function visitParenthesizedExpression(node: ParenthesizedExpression): Expression {
const innerExpression = skipOuterExpressions(node.expression, ~(OuterExpressionKinds.Assertions | OuterExpressionKinds.ExpressionsWithTypeArguments));
if (isAssertionExpression(innerExpression) || isSatisfiesExpression(innerExpression)) {
// Make sure we consider all nested cast expressions, e.g.:
// (<any><number><any>-A).x;
const expression = visitNode(node.expression, visitor, isExpression);
Debug.assert(expression);
// We have an expression of the form: (<Type>SubExpr). Emitting this as (SubExpr)
// is really not desirable. We would like to emit the subexpression as-is. Omitting
// the parentheses, however, could cause change in the semantics of the generated
// code if the casted expression has a lower precedence than the rest of the
// expression.
//
// To preserve comments, we return a "PartiallyEmittedExpression" here which will
// preserve the position information of the original expression.
//
// Due to the auto-parenthesization rules used by the visitor and factory functions
// we can safely elide the parentheses here, as a new synthetic
// ParenthesizedExpression will be inserted if we remove parentheses too
// aggressively.
//
// If there are leading comments on the expression itself, the emitter will handle ASI
// for return, throw, and yield by re-introducing parenthesis during emit on an as-need
// basis.
return factory.createPartiallyEmittedExpression(expression, node);
}
return visitEachChild(node, visitor, context);
}
|
Determines whether to emit an accessor declaration. We should not emit the
declaration if it does not have a body and is abstract.
@param node The declaration node.
|
typescript
|
src/compiler/transformers/ts.ts
| 1,687
|
[
"node"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getAllDecoratorsOfMethod
|
function getAllDecoratorsOfMethod(method: MethodDeclaration | AccessorDeclaration, useLegacyDecorators: boolean): AllDecorators | undefined {
if (!method.body) {
return undefined;
}
const decorators = getDecorators(method);
const parameters = useLegacyDecorators ? getDecoratorsOfParameters(method) : undefined;
if (!some(decorators) && !some(parameters)) {
return undefined;
}
return { decorators, parameters };
}
|
Gets an AllDecorators object containing the decorators for the method and its parameters.
@param method The class method member.
|
typescript
|
src/compiler/transformers/utilities.ts
| 762
|
[
"method",
"useLegacyDecorators"
] | true
| 5
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
dot
|
def dot(self, other: AnyArrayLike | DataFrame) -> Series | np.ndarray:
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other`.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
if isinstance(other, (Series, ABCDataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common)
right = other.reindex(index=common)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, ABCDataFrame):
common_type = find_common_type([self.dtypes] + list(other.dtypes))
return self._constructor(
np.dot(lvals, rvals), index=other.columns, copy=False, dtype=common_type
).__finalize__(self, method="dot")
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
|
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other`.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
|
python
|
pandas/core/series.py
| 2,953
|
[
"self",
"other"
] |
Series | np.ndarray
| true
| 10
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
splitPath
|
function splitPath(path: string[]): [parentPath: string[], fieldName: string] {
const selectionPath = [...path]
const fieldName = selectionPath.pop()
if (!fieldName) {
throw new Error('unexpected empty path')
}
return [selectionPath, fieldName]
}
|
Given the validation error and arguments rendering tree, applies corresponding
formatting to an error tree and adds all relevant messages.
@param error
@param args
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyValidationError.ts
| 625
|
[
"path"
] | true
| 2
| 6.72
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
// "[]:12345" requires 8 extra bytes.
StringBuilder builder = new StringBuilder(host.length() + 8);
if (host.indexOf(':') >= 0) {
builder.append('[').append(host).append(']');
} else {
builder.append(host);
}
if (hasPort()) {
builder.append(':').append(port);
}
return builder.toString();
}
|
Rebuild the host:port string, including brackets if necessary.
|
java
|
android/guava/src/com/google/common/net/HostAndPort.java
| 295
|
[] |
String
| true
| 3
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
scanJsDocToken
|
function scanJsDocToken(): JSDocSyntaxKind {
fullStartPos = tokenStart = pos;
tokenFlags = TokenFlags.None;
if (pos >= end) {
return token = SyntaxKind.EndOfFileToken;
}
const ch = codePointUnchecked(pos);
pos += charSize(ch);
switch (ch) {
case CharacterCodes.tab:
case CharacterCodes.verticalTab:
case CharacterCodes.formFeed:
case CharacterCodes.space:
while (pos < end && isWhiteSpaceSingleLine(charCodeUnchecked(pos))) {
pos++;
}
return token = SyntaxKind.WhitespaceTrivia;
case CharacterCodes.at:
return token = SyntaxKind.AtToken;
case CharacterCodes.carriageReturn:
if (charCodeUnchecked(pos) === CharacterCodes.lineFeed) {
pos++;
}
// falls through
case CharacterCodes.lineFeed:
tokenFlags |= TokenFlags.PrecedingLineBreak;
return token = SyntaxKind.NewLineTrivia;
case CharacterCodes.asterisk:
return token = SyntaxKind.AsteriskToken;
case CharacterCodes.openBrace:
return token = SyntaxKind.OpenBraceToken;
case CharacterCodes.closeBrace:
return token = SyntaxKind.CloseBraceToken;
case CharacterCodes.openBracket:
return token = SyntaxKind.OpenBracketToken;
case CharacterCodes.closeBracket:
return token = SyntaxKind.CloseBracketToken;
case CharacterCodes.openParen:
return token = SyntaxKind.OpenParenToken;
case CharacterCodes.closeParen:
return token = SyntaxKind.CloseParenToken;
case CharacterCodes.lessThan:
return token = SyntaxKind.LessThanToken;
case CharacterCodes.greaterThan:
return token = SyntaxKind.GreaterThanToken;
case CharacterCodes.equals:
return token = SyntaxKind.EqualsToken;
case CharacterCodes.comma:
return token = SyntaxKind.CommaToken;
case CharacterCodes.dot:
return token = SyntaxKind.DotToken;
case CharacterCodes.backtick:
return token = SyntaxKind.BacktickToken;
case CharacterCodes.hash:
return token = SyntaxKind.HashToken;
case CharacterCodes.backslash:
pos--;
const extendedCookedChar = peekExtendedUnicodeEscape();
if (extendedCookedChar >= 0 && isIdentifierStart(extendedCookedChar, languageVersion)) {
tokenValue = scanExtendedUnicodeEscape(/*shouldEmitInvalidEscapeError*/ true) + scanIdentifierParts();
return token = getIdentifierToken();
}
const cookedChar = peekUnicodeEscape();
if (cookedChar >= 0 && isIdentifierStart(cookedChar, languageVersion)) {
pos += 6;
tokenFlags |= TokenFlags.UnicodeEscape;
tokenValue = String.fromCharCode(cookedChar) + scanIdentifierParts();
return token = getIdentifierToken();
}
pos++;
return token = SyntaxKind.Unknown;
}
if (isIdentifierStart(ch, languageVersion)) {
let char = ch;
while (pos < end && isIdentifierPart(char = codePointUnchecked(pos), languageVersion) || char === CharacterCodes.minus) pos += charSize(char);
tokenValue = text.substring(tokenStart, pos);
if (char === CharacterCodes.backslash) {
tokenValue += scanIdentifierParts();
}
return token = getIdentifierToken();
}
else {
return token = SyntaxKind.Unknown;
}
}
|
Unconditionally back up and scan a template expression portion.
|
typescript
|
src/compiler/scanner.ts
| 3,841
|
[] | true
| 15
| 6.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getAsText
|
@Override
public String getAsText() {
if (Boolean.TRUE.equals(getValue())) {
return (this.trueString != null ? this.trueString : VALUE_TRUE);
}
else if (Boolean.FALSE.equals(getValue())) {
return (this.falseString != null ? this.falseString : VALUE_FALSE);
}
else {
return "";
}
}
|
Create a new CustomBooleanEditor instance,
with configurable String values for true and false.
<p>The "allowEmpty" parameter states if an empty String should
be allowed for parsing, i.e. get interpreted as null value.
Else, an IllegalArgumentException gets thrown in that case.
@param trueString the String value that represents true:
for example, "true" (VALUE_TRUE), "on" (VALUE_ON),
"yes" (VALUE_YES) or some custom value
@param falseString the String value that represents false:
for example, "false" (VALUE_FALSE), "off" (VALUE_OFF),
"no" (VALUE_NO) or some custom value
@param allowEmpty if empty strings should be allowed
@see #VALUE_TRUE
@see #VALUE_FALSE
@see #VALUE_ON
@see #VALUE_OFF
@see #VALUE_YES
@see #VALUE_NO
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomBooleanEditor.java
| 157
|
[] |
String
| true
| 5
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
is_numeric_dtype
|
def is_numeric_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of a numeric dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a numeric dtype.
See Also
--------
api.types.is_integer_dtype: Check whether the provided array or dtype
is of an integer dtype.
api.types.is_unsigned_integer_dtype: Check whether the provided array
or dtype is of an unsigned integer dtype.
api.types.is_signed_integer_dtype: Check whether the provided array
or dtype is of a signed integer dtype.
Examples
--------
>>> from pandas.api.types import is_numeric_dtype
>>> is_numeric_dtype(str)
False
>>> is_numeric_dtype(int)
True
>>> is_numeric_dtype(float)
True
>>> is_numeric_dtype(np.uint64)
True
>>> is_numeric_dtype(np.datetime64)
False
>>> is_numeric_dtype(np.timedelta64)
False
>>> is_numeric_dtype(np.array(["a", "b"]))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
>>> is_numeric_dtype(pd.Index([1, 2.0]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
"""
return _is_dtype_type(
arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)
) or _is_dtype(
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric
)
|
Check whether the provided array or dtype is of a numeric dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a numeric dtype.
See Also
--------
api.types.is_integer_dtype: Check whether the provided array or dtype
is of an integer dtype.
api.types.is_unsigned_integer_dtype: Check whether the provided array
or dtype is of an unsigned integer dtype.
api.types.is_signed_integer_dtype: Check whether the provided array
or dtype is of a signed integer dtype.
Examples
--------
>>> from pandas.api.types import is_numeric_dtype
>>> is_numeric_dtype(str)
False
>>> is_numeric_dtype(int)
True
>>> is_numeric_dtype(float)
True
>>> is_numeric_dtype(np.uint64)
True
>>> is_numeric_dtype(np.datetime64)
False
>>> is_numeric_dtype(np.timedelta64)
False
>>> is_numeric_dtype(np.array(["a", "b"]))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
>>> is_numeric_dtype(pd.Index([1, 2.0]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
|
python
|
pandas/core/dtypes/common.py
| 1,246
|
[
"arr_or_dtype"
] |
bool
| true
| 3
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_build_provider_distributions
|
def _build_provider_distributions(
provider_id: str,
package_version_suffix: str,
distribution_format: str,
skip_tag_check: bool,
skip_deleting_generated_files: bool,
) -> bool:
"""
Builds provider distribution.
:param provider_id: id of the provider package
:param package_version_suffix: suffix to append to the package version
:param distribution_format: format of the distribution to build (wheel or sdist)
:param skip_tag_check: whether to skip tag check
:param skip_deleting_generated_files: whether to skip deleting generated files
:return: True if package was built, False if it was skipped.
"""
if not skip_tag_check:
should_skip, package_version_suffix = should_skip_the_package(provider_id, package_version_suffix)
if should_skip:
return False
get_console().print()
with ci_group(f"Preparing provider package [special]{provider_id}"):
get_console().print()
get_console().print(
f"[info]Provider {provider_id} building in-place with suffix: '{package_version_suffix}'."
)
with apply_version_suffix_to_provider_pyproject_toml(
provider_id, package_version_suffix
) as pyproject_toml_file:
provider_root_dir = pyproject_toml_file.parent
cleanup_build_remnants(provider_root_dir)
build_provider_distribution(
provider_id=provider_id,
distribution_format=distribution_format,
target_provider_root_sources_path=provider_root_dir,
)
move_built_distributions_and_cleanup(
provider_root_dir,
AIRFLOW_DIST_PATH,
skip_cleanup=skip_deleting_generated_files,
delete_only_build_and_dist_folders=True,
)
return True
|
Builds provider distribution.
:param provider_id: id of the provider package
:param package_version_suffix: suffix to append to the package version
:param distribution_format: format of the distribution to build (wheel or sdist)
:param skip_tag_check: whether to skip tag check
:param skip_deleting_generated_files: whether to skip deleting generated files
:return: True if package was built, False if it was skipped.
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 1,021
|
[
"provider_id",
"package_version_suffix",
"distribution_format",
"skip_tag_check",
"skip_deleting_generated_files"
] |
bool
| true
| 3
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
symlink
|
function symlink(target, path, type, callback) {
if (callback === undefined) {
callback = makeCallback(type);
type = undefined;
} else {
validateOneOf(type, 'type', ['dir', 'file', 'junction', null, undefined]);
}
if (permission.isEnabled()) {
// The permission model's security guarantees fall apart in the presence of
// relative symbolic links. Thus, we have to prevent their creation.
if (BufferIsBuffer(target)) {
if (!isAbsolute(BufferToString(target))) {
callback(new ERR_ACCESS_DENIED('relative symbolic link target'));
return;
}
} else if (typeof target !== 'string' || !isAbsolute(toPathIfFileURL(target))) {
callback(new ERR_ACCESS_DENIED('relative symbolic link target'));
return;
}
}
target = getValidatedPath(target, 'target');
path = getValidatedPath(path);
if (isWindows && type == null) {
let absoluteTarget;
try {
// Symlinks targets can be relative to the newly created path.
// Calculate absolute file name of the symlink target, and check
// if it is a directory. Ignore resolve error to keep symlink
// errors consistent between platforms if invalid path is
// provided.
absoluteTarget = pathModule.resolve(path, '..', target);
} catch {
// Continue regardless of error.
}
if (absoluteTarget !== undefined) {
stat(absoluteTarget, (err, stat) => {
const resolvedType = !err && stat.isDirectory() ? 'dir' : 'file';
const resolvedFlags = stringToSymlinkType(resolvedType);
const destination = preprocessSymlinkDestination(target,
resolvedType,
path);
const req = new FSReqCallback();
req.oncomplete = callback;
binding.symlink(
destination,
path,
resolvedFlags,
req,
);
});
return;
}
}
const destination = preprocessSymlinkDestination(target, type, path);
const flags = stringToSymlinkType(type);
const req = new FSReqCallback();
req.oncomplete = callback;
binding.symlink(destination, path, flags, req);
}
|
Creates the link called `path` pointing to `target`.
@param {string | Buffer | URL} target
@param {string | Buffer | URL} path
@param {string | null} [type]
@param {(err?: Error) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 1,758
|
[
"target",
"path",
"type",
"callback"
] | false
| 15
| 6.16
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
destroy
|
@Override
public void destroy() {
}
|
Return the description for the given request. By default this method will return a
description based on the request {@code servletPath} and {@code pathInfo}.
@param request the source request
@return the description
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/support/ErrorPageFilter.java
| 293
|
[] |
void
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
nunique_ints
|
def nunique_ints(values: ArrayLike) -> int:
"""
Return the number of unique values for integer array-likes.
Significantly faster than pandas.unique for long enough sequences.
No checks are done to ensure input is integral.
Parameters
----------
values : 1d array-like
Returns
-------
int : The number of unique values in ``values``
"""
if len(values) == 0:
return 0
values = _ensure_data(values)
# bincount requires intp
result = (np.bincount(values.ravel().astype("intp")) != 0).sum()
return result
|
Return the number of unique values for integer array-likes.
Significantly faster than pandas.unique for long enough sequences.
No checks are done to ensure input is integral.
Parameters
----------
values : 1d array-like
Returns
-------
int : The number of unique values in ``values``
|
python
|
pandas/core/algorithms.py
| 440
|
[
"values"
] |
int
| true
| 2
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
determineTargetClass
|
public static @Nullable Class<?> determineTargetClass(
ConfigurableListableBeanFactory beanFactory, @Nullable String beanName) {
if (beanName == null) {
return null;
}
if (beanFactory.containsBeanDefinition(beanName)) {
BeanDefinition bd = beanFactory.getMergedBeanDefinition(beanName);
Class<?> targetClass = (Class<?>) bd.getAttribute(ORIGINAL_TARGET_CLASS_ATTRIBUTE);
if (targetClass != null) {
return targetClass;
}
}
return beanFactory.getType(beanName);
}
|
Determine the original target class for the specified bean, if possible,
otherwise falling back to a regular {@code getType} lookup.
@param beanFactory the containing ConfigurableListableBeanFactory
@param beanName the name of the bean
@return the original target class as stored in the bean definition, if any
@since 4.2.3
@see org.springframework.beans.factory.BeanFactory#getType(String)
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AutoProxyUtils.java
| 160
|
[
"beanFactory",
"beanName"
] | true
| 4
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
moveResourceAttributes
|
private static void moveResourceAttributes(Map<String, Object> attributes, Map<String, Object> resourceAttributes) {
Set<String> ecsResourceFields = EcsOTelResourceAttributes.LATEST;
Iterator<Map.Entry<String, Object>> attributeIterator = attributes.entrySet().iterator();
while (attributeIterator.hasNext()) {
Map.Entry<String, Object> entry = attributeIterator.next();
if (ecsResourceFields.contains(entry.getKey())) {
resourceAttributes.put(entry.getKey(), entry.getValue());
attributeIterator.remove();
}
}
}
|
Renames specific ECS keys in the given document to their OpenTelemetry-compatible counterparts using logic compatible with the
{@link org.elasticsearch.ingest.IngestPipelineFieldAccessPattern#FLEXIBLE} access pattern and based on the {@code RENAME_KEYS} map.
<p>This method performs the following operations:
<ul>
<li>For each key in the {@code RENAME_KEYS} map, it checks if a corresponding field exists in the document.</li>
<li>If the field exists, it removes it from the document and adds a new field with the corresponding name from the
{@code RENAME_KEYS} map and the same value. If a field's parent objects do not exist, it will progressively build
each parent object instead of concatenating the field names together.</li>
<li>If the key is nested (contains dots), it recursively removes empty parent fields after renaming.</li>
</ul>
@param document the document to process
|
java
|
modules/ingest-otel/src/main/java/org/elasticsearch/ingest/otel/NormalizeForStreamProcessor.java
| 389
|
[
"attributes",
"resourceAttributes"
] |
void
| true
| 3
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
substringBefore
|
public static String substringBefore(final String str, final int find) {
if (isEmpty(str)) {
return str;
}
final int pos = str.indexOf(find);
if (pos == INDEX_NOT_FOUND) {
return str;
}
return str.substring(0, pos);
}
|
Gets the substring before the first occurrence of a separator. The separator is not returned.
<p>
A {@code null} string input will return {@code null}. An empty ("") string input will return the empty string.
</p>
<p>
If nothing is found, the string input is returned.
</p>
<pre>
StringUtils.substringBefore(null, *) = null
StringUtils.substringBefore("", *) = ""
StringUtils.substringBefore("abc", 'a') = ""
StringUtils.substringBefore("abcba", 'b') = "a"
StringUtils.substringBefore("abc", 'c') = "ab"
StringUtils.substringBefore("abc", 'd') = "abc"
</pre>
@param str the String to get a substring from, may be null.
@param find the character (Unicode code point) to find.
@return the substring before the first occurrence of the specified character, {@code null} if null String input.
@since 3.12.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,363
|
[
"str",
"find"
] |
String
| true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
collectPropertiesToMerge
|
protected List<PropertiesHolder> collectPropertiesToMerge(Locale locale) {
String[] basenames = StringUtils.toStringArray(getBasenameSet());
List<PropertiesHolder> holders = new ArrayList<>(basenames.length);
for (int i = basenames.length - 1; i >= 0; i--) {
List<String> filenames = calculateAllFilenames(basenames[i], locale);
for (int j = filenames.size() - 1; j >= 0; j--) {
String filename = filenames.get(j);
PropertiesHolder propHolder = getProperties(filename);
if (propHolder.getProperties() != null) {
holders.add(propHolder);
}
}
}
return holders;
}
|
Determine the properties to merge based on the specified basenames.
@param locale the locale
@return the list of properties holders
@since 6.1.4
@see #getBasenameSet()
@see #calculateAllFilenames
@see #mergeProperties
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 277
|
[
"locale"
] | true
| 4
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_validate_skipfooter_arg
|
def _validate_skipfooter_arg(skipfooter: int) -> int:
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
# Incompatible return value type (got "Union[int, integer[Any]]", expected "int")
return skipfooter # type: ignore[return-value]
|
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
|
python
|
pandas/io/parsers/python_parser.py
| 1,529
|
[
"skipfooter"
] |
int
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
inclusiveBetween
|
public static <T> void inclusiveBetween(final T start, final T end, final Comparable<T> value, final String message, final Object... values) {
// TODO when breaking BC, consider returning value
if (value.compareTo(start) < 0 || value.compareTo(end) > 0) {
throw new IllegalArgumentException(getMessage(message, values));
}
}
|
Validate that the specified argument object fall between the two
inclusive values specified; otherwise, throws an exception with the
specified message.
<pre>Validate.inclusiveBetween(0, 2, 1, "Not in boundaries");</pre>
@param <T> the type of the argument object.
@param start the inclusive start value, not null.
@param end the inclusive end value, not null.
@param value the object to validate, not null.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@throws IllegalArgumentException if the value falls outside the boundaries.
@see #inclusiveBetween(Object, Object, Comparable)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 377
|
[
"start",
"end",
"value",
"message"
] |
void
| true
| 3
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
handleResponse
|
ApiResult<K, V> handleResponse(Node broker, Set<K> keys, AbstractResponse response);
|
Callback that is invoked when a request returns successfully.
The handler should parse the response, check for errors, and return a
result which indicates which keys (if any) have either been completed or
failed with an unrecoverable error.
It is also possible that the response indicates an incorrect target brokerId
(e.g. in the case of a NotLeader error when the request is bound for a partition
leader). In this case the key will be "unmapped" from the target brokerId
and lookup will be retried.
Note that keys which received a retriable error should be left out of the
result. They will be retried automatically.
@param broker the broker that the associated request was sent to
@param keys the set of keys from the associated request
@param response the response received from the broker
@return result indicating key completion, failure, and unmapping
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java
| 72
|
[
"broker",
"keys",
"response"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
convertToDashedElement
|
private CharSequence convertToDashedElement(CharSequence element) {
return convertElement(element, true, ElementsParser::isValidChar);
}
|
Return an element in the name in the given form.
@param elementIndex the element index
@param form the form to return
@return the last element
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 180
|
[
"element"
] |
CharSequence
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
enterIfInterruptibly
|
@SuppressWarnings("GoodTime") // should accept a java.time.Duration
public boolean enterIfInterruptibly(Guard guard, long time, TimeUnit unit)
throws InterruptedException {
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
ReentrantLock lock = this.lock;
if (!lock.tryLock(time, unit)) {
return false;
}
boolean satisfied = false;
try {
return satisfied = guard.isSatisfied();
} finally {
if (!satisfied) {
lock.unlock();
}
}
}
|
Enters this monitor if the guard is satisfied. Blocks at most the given time acquiring the
lock, but does not wait for the guard to be satisfied, and may be interrupted.
@return whether the monitor was entered, which guarantees that the guard is now satisfied
|
java
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
| 777
|
[
"guard",
"time",
"unit"
] | true
| 4
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
deleteShareGroups
|
default DeleteShareGroupsResult deleteShareGroups(Collection<String> groupIds) {
return deleteShareGroups(groupIds, new DeleteShareGroupsOptions());
}
|
Delete share groups from the cluster with the default options.
@param groupIds Collection of share group ids which are to be deleted.
@return The DeleteShareGroupsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 2,033
|
[
"groupIds"
] |
DeleteShareGroupsResult
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
write
|
@Override
public long write(ByteBuffer[] srcs, int offset, int length) throws IOException {
if ((offset < 0) || (length < 0) || (offset > srcs.length - length))
throw new IndexOutOfBoundsException();
int totalWritten = 0;
int i = offset;
while (i < offset + length) {
if (srcs[i].hasRemaining() || hasPendingWrites()) {
int written = write(srcs[i]);
if (written > 0) {
totalWritten += written;
}
}
if (!srcs[i].hasRemaining() && !hasPendingWrites()) {
i++;
} else {
// if we are unable to write the current buffer to socketChannel we should break,
// as we might have reached max socket send buffer size.
break;
}
}
return totalWritten;
}
|
Writes a sequence of bytes to this channel from the subsequence of the given buffers.
@param srcs The buffers from which bytes are to be retrieved
@param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length.
@param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset.
@return returns no.of bytes written , possibly zero.
@throws IOException If some other I/O error occurs
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 753
|
[
"srcs",
"offset",
"length"
] | true
| 10
| 8.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
rbf_kernel
|
def rbf_kernel(X, Y=None, gamma=None):
"""Compute the rbf (gaussian) kernel between X and Y.
.. code-block:: text
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel : ndarray of shape (n_samples_X, n_samples_Y)
The RBF kernel.
Examples
--------
>>> from sklearn.metrics.pairwise import rbf_kernel
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> rbf_kernel(X, Y)
array([[0.71, 0.51],
[0.51, 0.71]])
"""
xp, _ = get_namespace(X, Y)
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
# exponentiate K in-place when using numpy
K = _modify_in_place_if_numpy(xp, xp.exp, K, out=K)
return K
|
Compute the rbf (gaussian) kernel between X and Y.
.. code-block:: text
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, n_features)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=None
If None, defaults to 1.0 / n_features.
Returns
-------
kernel : ndarray of shape (n_samples_X, n_samples_Y)
The RBF kernel.
Examples
--------
>>> from sklearn.metrics.pairwise import rbf_kernel
>>> X = [[0, 0, 0], [1, 1, 1]]
>>> Y = [[1, 0, 0], [1, 1, 0]]
>>> rbf_kernel(X, Y)
array([[0.71, 0.51],
[0.51, 0.71]])
|
python
|
sklearn/metrics/pairwise.py
| 1,571
|
[
"X",
"Y",
"gamma"
] | false
| 2
| 7.84
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
load_dotenv
|
def load_dotenv(
path: str | os.PathLike[str] | None = None, load_defaults: bool = True
) -> bool:
"""Load "dotenv" files to set environment variables. A given path takes
precedence over ``.env``, which takes precedence over ``.flaskenv``. After
loading and combining these files, values are only set if the key is not
already set in ``os.environ``.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location.
:param load_defaults: Search for and load the default ``.flaskenv`` and
``.env`` files.
:return: ``True`` if at least one env var was loaded.
.. versionchanged:: 3.1
Added the ``load_defaults`` parameter. A given path takes precedence
over default files.
.. versionchanged:: 2.0
The current directory is not changed to the location of the
loaded file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
try:
import dotenv
except ImportError:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env files present. Install python-dotenv"
" to use them.",
fg="yellow",
err=True,
)
return False
data: dict[str, str | None] = {}
if load_defaults:
for default_name in (".flaskenv", ".env"):
if not (default_path := dotenv.find_dotenv(default_name, usecwd=True)):
continue
data |= dotenv.dotenv_values(default_path, encoding="utf-8")
if path is not None and os.path.isfile(path):
data |= dotenv.dotenv_values(path, encoding="utf-8")
for key, value in data.items():
if key in os.environ or value is None:
continue
os.environ[key] = value
return bool(data) # True if at least one env var was loaded.
|
Load "dotenv" files to set environment variables. A given path takes
precedence over ``.env``, which takes precedence over ``.flaskenv``. After
loading and combining these files, values are only set if the key is not
already set in ``os.environ``.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location.
:param load_defaults: Search for and load the default ``.flaskenv`` and
``.env`` files.
:return: ``True`` if at least one env var was loaded.
.. versionchanged:: 3.1
Added the ``load_defaults`` parameter. A given path takes precedence
over default files.
.. versionchanged:: 2.0
The current directory is not changed to the location of the
loaded file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
|
python
|
src/flask/cli.py
| 698
|
[
"path",
"load_defaults"
] |
bool
| true
| 12
| 8.08
|
pallets/flask
| 70,946
|
sphinx
| false
|
tryDrainReferenceQueues
|
void tryDrainReferenceQueues() {
if (tryLock()) {
try {
maybeDrainReferenceQueues();
} finally {
unlock();
}
}
}
|
Cleanup collected entries when the lock is available.
|
java
|
android/guava/src/com/google/common/collect/MapMakerInternalMap.java
| 1,362
|
[] |
void
| true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.