function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
_translate_train_sizes
|
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like of shape (n_ticks,)
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.floating):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError(
"train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples, n_max_required_samples)
)
train_sizes_abs = (train_sizes_abs * n_max_training_samples).astype(
dtype=int, copy=False
)
train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples)
else:
if (
n_min_required_samples <= 0
or n_max_required_samples > n_max_training_samples
):
raise ValueError(
"train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (
n_max_training_samples,
n_min_required_samples,
n_max_required_samples,
)
)
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn(
"Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than the size of "
"'train_sizes': %d instead of %d." % (train_sizes_abs.shape[0], n_ticks),
RuntimeWarning,
)
return train_sizes_abs
|
Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like of shape (n_ticks,)
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
|
python
|
sklearn/model_selection/_validation.py
| 2,072
|
[
"train_sizes",
"n_max_training_samples"
] | false
| 8
| 7.44
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
onStartup
|
@Override
public void onStartup(ServletContext servletContext) throws ServletException {
servletContext.setAttribute(LoggingApplicationListener.REGISTER_SHUTDOWN_HOOK_PROPERTY, false);
// Logger initialization is deferred in case an ordered
// LogServletContextInitializer is being used
this.logger = LogFactory.getLog(getClass());
WebApplicationContext rootApplicationContext = createRootApplicationContext(servletContext);
if (rootApplicationContext != null) {
servletContext.addListener(new SpringBootContextLoaderListener(rootApplicationContext, servletContext));
}
else {
this.logger.debug("No ContextLoaderListener registered, as createRootApplicationContext() did not "
+ "return an application context");
}
}
|
Set if the {@link ErrorPageFilter} should be registered. Set to {@code false} if
error page mappings should be handled through the server and not Spring Boot.
@param registerErrorPageFilter if the {@link ErrorPageFilter} should be registered.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/support/SpringBootServletInitializer.java
| 103
|
[
"servletContext"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
process
|
private void process(final StreamsOnTasksAssignedCallbackCompletedEvent event) {
if (requestManagers.streamsMembershipManager.isEmpty()) {
log.warn("An internal error occurred; the Streams membership manager was not present, so the notification " +
"of the onTasksAssigned callback execution could not be sent");
return;
}
requestManagers.streamsMembershipManager.get().onTasksAssignedCallbackCompleted(event);
}
|
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user.
@param event Event containing a boolean to indicate if the callback handler is configured or not.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
| 696
|
[
"event"
] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
getChunk
|
static byte[] getChunk(InputStream is) throws IOException {
byte[] buf = new byte[MAX_CHUNK_SIZE];
int chunkSize = 0;
while (chunkSize < MAX_CHUNK_SIZE) {
int read = is.read(buf, chunkSize, MAX_CHUNK_SIZE - chunkSize);
if (read == -1) {
break;
}
chunkSize += read;
}
if (chunkSize < MAX_CHUNK_SIZE) {
buf = Arrays.copyOf(buf, chunkSize);
}
return buf;
}
|
This method fetches the database file for the given database from the passed-in source, then indexes that database
file into the .geoip_databases Elasticsearch index, deleting any old versions of the database from the index if they exist.
@param name The name of the database to be downloaded and indexed into an Elasticsearch index
@param checksum The checksum to compare to the computed checksum of the downloaded file
@param source The supplier of an InputStream that will actually download the file
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java
| 369
|
[
"is"
] | true
| 4
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
initApplicationEventMulticaster
|
protected void initApplicationEventMulticaster() {
ConfigurableListableBeanFactory beanFactory = getBeanFactory();
if (beanFactory.containsLocalBean(APPLICATION_EVENT_MULTICASTER_BEAN_NAME)) {
this.applicationEventMulticaster =
beanFactory.getBean(APPLICATION_EVENT_MULTICASTER_BEAN_NAME, ApplicationEventMulticaster.class);
if (logger.isTraceEnabled()) {
logger.trace("Using ApplicationEventMulticaster [" + this.applicationEventMulticaster + "]");
}
}
else {
this.applicationEventMulticaster = new SimpleApplicationEventMulticaster(beanFactory);
beanFactory.registerSingleton(APPLICATION_EVENT_MULTICASTER_BEAN_NAME, this.applicationEventMulticaster);
if (logger.isTraceEnabled()) {
logger.trace("No '" + APPLICATION_EVENT_MULTICASTER_BEAN_NAME + "' bean, using " +
"[" + this.applicationEventMulticaster.getClass().getSimpleName() + "]");
}
}
}
|
Initialize the {@link ApplicationEventMulticaster}.
<p>Uses {@link SimpleApplicationEventMulticaster} if none defined in the context.
@see #APPLICATION_EVENT_MULTICASTER_BEAN_NAME
@see org.springframework.context.event.SimpleApplicationEventMulticaster
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 852
|
[] |
void
| true
| 4
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parsePort
|
async function parsePort(host: string | undefined, strPort: string | undefined): Promise<number> {
if (strPort) {
let range: { start: number; end: number } | undefined;
if (strPort.match(/^\d+$/)) {
return parseInt(strPort, 10);
} else if (range = parseRange(strPort)) {
const port = await findFreePort(host, range.start, range.end);
if (port !== undefined) {
return port;
}
// Remote-SSH extension relies on this exact port error message, treat as an API
console.warn(`--port: Could not find free port in range: ${range.start} - ${range.end} (inclusive).`);
process.exit(1);
} else {
console.warn(`--port "${strPort}" is not a valid number or range. Ranges must be in the form 'from-to' with 'from' an integer larger than 0 and not larger than 'end'.`);
process.exit(1);
}
}
return 8000;
}
|
If `--port` is specified and describes a single port, connect to that port.
If `--port`describes a port range
then find a free port in that range. Throw error if no
free port available in range.
In absence of specified ports, connect to port 8000.
|
typescript
|
src/server-main.ts
| 170
|
[
"host",
"strPort"
] | true
| 7
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| true
|
|
removeAll
|
public static boolean[] removeAll(final boolean[] array, final int... indices) {
return (boolean[]) removeAll((Object) array, indices);
}
|
Removes the elements at the specified positions from the specified array. All remaining elements are shifted to the left.
<p>
This method returns a new array with the same elements of the input array except those at the specified positions. The component type of the returned
array is always the same as that of the input array.
</p>
<p>
If the input array is {@code null}, an IndexOutOfBoundsException will be thrown, because in that case no valid index can be specified.
</p>
<pre>
ArrayUtils.removeAll([true, false, true], 0, 2) = [false]
ArrayUtils.removeAll([true, false, true], 1, 2) = [true]
</pre>
@param array the array to remove the element from, may not be {@code null}.
@param indices the positions of the elements to be removed.
@return A new array containing the existing elements except those at the specified positions.
@throws IndexOutOfBoundsException if any index is out of range (index < 0 || index >= array.length), or if the array is {@code null}.
@since 3.0.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,964
|
[
"array"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
always
|
static PropertySourceOptions always(Options options) {
if (options == Options.NONE) {
return ALWAYS_NONE;
}
return new AlwaysPropertySourceOptions(options);
}
|
Create a new {@link PropertySourceOptions} instance that always returns the
same options regardless of the property source.
@param options the options to return
@return a new {@link PropertySourceOptions} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigData.java
| 143
|
[
"options"
] |
PropertySourceOptions
| true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
setCc
|
@Override
public void setCc(String... cc) throws MailParseException {
try {
this.helper.setCc(cc);
}
catch (MessagingException ex) {
throw new MailParseException(ex);
}
}
|
Return the JavaMail MimeMessage that this MimeMailMessage is based on.
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMailMessage.java
| 126
|
[] |
void
| true
| 2
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
transform
|
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projection of X in the first principal components.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
"""
if sparse.issparse(X):
n_samples = X.shape[0]
output = []
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
output.append(super().transform(X[batch].toarray()))
return np.vstack(output)
else:
return super().transform(X)
|
Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projection of X in the first principal components.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
|
python
|
sklearn/decomposition/_incremental_pca.py
| 378
|
[
"self",
"X"
] | false
| 5
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_weighted_cluster_center
|
def _weighted_cluster_center(self, X):
"""Calculate and store the centroids/medoids of each cluster.
This requires `X` to be a raw feature array, not precomputed
distances. Rather than return outputs directly, this helper method
instead stores them in the `self.{centroids, medoids}_` attributes.
The choice for which attributes are calculated and stored is mediated
by the value of `self.store_centers`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The feature array that the estimator was fit with.
"""
# Number of non-noise clusters
n_clusters = len(set(self.labels_) - {-1, -2})
mask = np.empty((X.shape[0],), dtype=np.bool_)
make_centroids = self.store_centers in ("centroid", "both")
make_medoids = self.store_centers in ("medoid", "both")
if make_centroids:
self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
if make_medoids:
self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
# Need to handle iteratively seen each cluster may have a different
# number of samples, hence we can't create a homogeneous 3D array.
for idx in range(n_clusters):
mask = self.labels_ == idx
data = X[mask]
strength = self.probabilities_[mask]
if make_centroids:
self.centroids_[idx] = np.average(data, weights=strength, axis=0)
if make_medoids:
# TODO: Implement weighted argmin PWD backend
dist_mat = pairwise_distances(
data, metric=self.metric, **self._metric_params
)
dist_mat = dist_mat * strength
medoid_index = np.argmin(dist_mat.sum(axis=1))
self.medoids_[idx] = data[medoid_index]
return
|
Calculate and store the centroids/medoids of each cluster.
This requires `X` to be a raw feature array, not precomputed
distances. Rather than return outputs directly, this helper method
instead stores them in the `self.{centroids, medoids}_` attributes.
The choice for which attributes are calculated and stored is mediated
by the value of `self.store_centers`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The feature array that the estimator was fit with.
|
python
|
sklearn/cluster/_hdbscan/hdbscan.py
| 923
|
[
"self",
"X"
] | false
| 6
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
createSystemModuleBody
|
function createSystemModuleBody(node: SourceFile, dependencyGroups: DependencyGroup[]) {
// Shape of the body in system modules:
//
// function (exports) {
// <list of local aliases for imports>
// <hoisted variable declarations>
// <hoisted function declarations>
// return {
// setters: [
// <list of setter function for imports>
// ],
// execute: function() {
// <module statements>
// }
// }
// <temp declarations>
// }
//
// i.e:
//
// import {x} from 'file1'
// var y = 1;
// export function foo() { return y + x(); }
// console.log(y);
//
// Will be transformed to:
//
// function(exports) {
// function foo() { return y + file_1.x(); }
// exports("foo", foo);
// var file_1, y;
// return {
// setters: [
// function(v) { file_1 = v }
// ],
// execute(): function() {
// y = 1;
// console.log(y);
// }
// };
// }
const statements: Statement[] = [];
// We start a new lexical environment in this function body, but *not* in the
// body of the execute function. This allows us to emit temporary declarations
// only in the outer module body and not in the inner one.
startLexicalEnvironment();
// Add any prologue directives.
const ensureUseStrict = getStrictOptionValue(compilerOptions, "alwaysStrict") || isExternalModule(currentSourceFile);
const statementOffset = factory.copyPrologue(node.statements, statements, ensureUseStrict, topLevelVisitor);
// var __moduleName = context_1 && context_1.id;
statements.push(
factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList([
factory.createVariableDeclaration(
"__moduleName",
/*exclamationToken*/ undefined,
/*type*/ undefined,
factory.createLogicalAnd(
contextObject,
factory.createPropertyAccessExpression(contextObject, "id"),
),
),
]),
),
);
// Visit the synthetic external helpers import declaration if present
visitNode(moduleInfo.externalHelpersImportDeclaration, topLevelVisitor, isStatement);
// Visit the statements of the source file, emitting any transformations into
// the `executeStatements` array. We do this *before* we fill the `setters` array
// as we both emit transformations as well as aggregate some data used when creating
// setters. This allows us to reduce the number of times we need to loop through the
// statements of the source file.
const executeStatements = visitNodes(node.statements, topLevelVisitor, isStatement, statementOffset);
// Emit early exports for function declarations.
addRange(statements, hoistedStatements);
// We emit hoisted variables early to align roughly with our previous emit output.
// Two key differences in this approach are:
// - Temporary variables will appear at the top rather than at the bottom of the file
insertStatementsAfterStandardPrologue(statements, endLexicalEnvironment());
const exportStarFunction = addExportStarIfNeeded(statements)!; // TODO: GH#18217
const modifiers = node.transformFlags & TransformFlags.ContainsAwait ?
factory.createModifiersFromModifierFlags(ModifierFlags.Async) :
undefined;
const moduleObject = factory.createObjectLiteralExpression([
factory.createPropertyAssignment("setters", createSettersArray(exportStarFunction, dependencyGroups)),
factory.createPropertyAssignment(
"execute",
factory.createFunctionExpression(
modifiers,
/*asteriskToken*/ undefined,
/*name*/ undefined,
/*typeParameters*/ undefined,
/*parameters*/ [],
/*type*/ undefined,
factory.createBlock(executeStatements, /*multiLine*/ true),
),
),
], /*multiLine*/ true);
statements.push(factory.createReturnStatement(moduleObject));
return factory.createBlock(statements, /*multiLine*/ true);
}
|
Adds the statements for the module body function for the source file.
@param node The source file for the module.
@param dependencyGroups The grouped dependencies of the module.
|
typescript
|
src/compiler/transformers/module/system.ts
| 310
|
[
"node",
"dependencyGroups"
] | false
| 3
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toPrimitive
|
public static double[] toPrimitive(final Double[] array) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_DOUBLE_ARRAY;
}
final double[] result = new double[array.length];
for (int i = 0; i < array.length; i++) {
result[i] = array[i].doubleValue();
}
return result;
}
|
Converts an array of object Doubles to primitives.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array a {@link Double} array, may be {@code null}.
@return a {@code double} array, {@code null} if null array input.
@throws NullPointerException if an array element is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,957
|
[
"array"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
hextetsToIPv6String
|
private static String hextetsToIPv6String(int[] hextets) {
// While scanning the array, handle these state transitions:
// start->num => "num" start->gap => "::"
// num->num => ":num" num->gap => "::"
// gap->num => "num" gap->gap => ""
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
|
Convert a list of hextets into a human-readable IPv6 address.
<p>In order for "::" compression to work, the input should contain negative sentinel values in
place of the elided zeroes.
@param hextets {@code int[]} array of eight 16-bit hextets, or -1s
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 537
|
[
"hextets"
] |
String
| true
| 6
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
get_slice_bound
|
def get_slice_bound(self, label, side: Literal["left", "right"]) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
The label for which to calculate the slice bound.
side : {'left', 'right'}
if 'left' return leftmost position of given label.
if 'right' return one-past-the-rightmost position of given label.
Returns
-------
int
Index of label.
See Also
--------
Index.get_loc : Get integer location, slice or boolean mask for requested
label.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.get_slice_bound(3, "left")
3
>>> idx.get_slice_bound(3, "right")
4
If ``label`` is non-unique in the index, an error will be raised.
>>> idx_duplicate = pd.Index(["a", "b", "a", "c", "d"])
>>> idx_duplicate.get_slice_bound("a", "left")
Traceback (most recent call last):
KeyError: Cannot get left slice bound for non-unique label: 'a'
"""
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg, must be either "
f"'left' or 'right': {side}"
)
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
raise KeyError(
f"Cannot get {side} slice bound for non-monotonic index "
f"with a missing label {original_label!r}. "
"Either sort the index or specify an existing label."
) from None
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array, which
# is OK as long as they are representable by a slice.
assert is_bool_dtype(slc.dtype)
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
f"label: {original_label!r}"
)
if isinstance(slc, slice):
if side == "left":
return slc.start
else:
return slc.stop
else:
if side == "right":
return slc + 1
else:
return slc
|
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
The label for which to calculate the slice bound.
side : {'left', 'right'}
if 'left' return leftmost position of given label.
if 'right' return one-past-the-rightmost position of given label.
Returns
-------
int
Index of label.
See Also
--------
Index.get_loc : Get integer location, slice or boolean mask for requested
label.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.get_slice_bound(3, "left")
3
>>> idx.get_slice_bound(3, "right")
4
If ``label`` is non-unique in the index, an error will be raised.
>>> idx_duplicate = pd.Index(["a", "b", "a", "c", "d"])
>>> idx_duplicate.get_slice_bound("a", "left")
Traceback (most recent call last):
KeyError: Cannot get left slice bound for non-unique label: 'a'
|
python
|
pandas/core/indexes/base.py
| 6,821
|
[
"self",
"label",
"side"
] |
int
| true
| 10
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getLineNumber
|
public int getLineNumber() {
Throwable cause = getCause();
if (cause instanceof SAXParseException parseEx) {
return parseEx.getLineNumber();
}
return -1;
}
|
Return the line number in the XML resource that failed.
@return the line number if available (in case of a SAXParseException); -1 else
@see org.xml.sax.SAXParseException#getLineNumber()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/XmlBeanDefinitionStoreException.java
| 53
|
[] | true
| 2
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
completeQuietly
|
void completeQuietly(final Utils.ThrowingRunnable function,
final String msg,
final AtomicReference<Throwable> firstException) {
try {
function.run();
} catch (TimeoutException e) {
log.debug("Timeout expired before the {} operation could complete.", msg);
} catch (Exception e) {
firstException.compareAndSet(null, e);
}
}
|
This method can be used by cases where the caller has an event that needs to both block for completion but
also process background events. For some events, in order to fully process the associated logic, the
{@link ConsumerNetworkThread background thread} needs assistance from the application thread to complete.
If the application thread simply blocked on the event after submitting it, the processing would deadlock.
The logic herein is basically a loop that performs two tasks in each iteration:
<ol>
<li>Process background events, if any</li>
<li><em>Briefly</em> wait for {@link CompletableApplicationEvent an event} to complete</li>
</ol>
<p/>
Each iteration gives the application thread an opportunity to process background events, which may be
necessary to complete the overall processing.
@param future Event that contains a {@link CompletableFuture}; it is on this future that the
application thread will wait for completion
@param timer Overall timer that bounds how long to wait for the event to complete
@param ignoreErrorEventException Predicate to ignore background errors.
Any exceptions found while processing background events that match the predicate won't be propagated.
@return {@code true} if the event completed within the timeout, {@code false} otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,329
|
[
"function",
"msg",
"firstException"
] |
void
| true
| 3
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
generateReturnCode
|
@Override
public CodeBlock generateReturnCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode) {
CodeBlock.Builder code = CodeBlock.builder();
code.addStatement("return $L", BEAN_DEFINITION_VARIABLE);
return code.build();
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 238
|
[
"generationContext",
"beanRegistrationCode"
] |
CodeBlock
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toStringArray
|
public static String[] toStringArray(final Object[] array) {
return toStringArray(array, "null");
}
|
Returns an array containing the string representation of each element in the argument array.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array the {@code Object[]} to be processed, may be {@code null}.
@return {@code String[]} of the same size as the source with its element's string representation, {@code null} if null array input.
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 9,283
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkStrictModeWithStatement
|
function checkStrictModeWithStatement(node: WithStatement) {
// Grammar checking for withStatement
if (inStrictMode) {
errorOnFirstToken(node, Diagnostics.with_statements_are_not_allowed_in_strict_mode);
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,729
|
[
"node"
] | false
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
startTask
|
private void startTask(ProjectId projectId, Runnable onFailure) {
persistentTasksService.sendProjectStartRequest(
projectId,
getTaskId(projectId, projectResolver.supportsMultipleProjects()),
GEOIP_DOWNLOADER,
new GeoIpTaskParams(),
MasterNodeRequest.INFINITE_MASTER_NODE_TIMEOUT,
ActionListener.wrap(r -> logger.debug("Started geoip downloader task"), e -> {
Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e;
if (t instanceof ResourceAlreadyExistsException == false) {
logger.warn("failed to create geoip downloader task", e);
onFailure.run();
}
})
);
}
|
Check if a processor is a pipeline processor containing at least a geoip processor. This method also updates
pipelineHasGeoProcessorById with a result for any pipelines it looks at.
@param processor Processor config.
@param downloadDatabaseOnPipelineCreation Should the download_database_on_pipeline_creation of the geoip processor be true or false.
@param pipelineConfigById A Map of pipeline id to PipelineConfiguration
@param pipelineHasGeoProcessorById A Map of pipeline id to Boolean, indicating whether the pipeline references a geoip processor
(true), does not reference a geoip processor (false), or we are currently trying to figure that
out (null).
@return true if a geoip processor is found in the processors of this processor if this processor is a pipeline processor.
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java
| 534
|
[
"projectId",
"onFailure"
] |
void
| true
| 3
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
resolveConstructorBoundProperties
|
private Stream<PropertyDescriptor> resolveConstructorBoundProperties(TypeElement declaringElement,
TypeElementMembers members, ExecutableElement bindConstructor) {
Map<String, PropertyDescriptor> candidates = new LinkedHashMap<>();
bindConstructor.getParameters().forEach((parameter) -> {
PropertyDescriptor descriptor = extracted(declaringElement, members, parameter);
register(candidates, descriptor);
});
return candidates.values().stream();
}
|
Return the {@link PropertyDescriptor} instances that are valid candidates for the
specified {@link TypeElement type} based on the specified {@link ExecutableElement
factory method}, if any.
@param type the target type
@param factoryMethod the method that triggered the metadata for that {@code type}
or {@code null}
@return the candidate properties for metadata generation
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptorResolver.java
| 78
|
[
"declaringElement",
"members",
"bindConstructor"
] | true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
setIntrospectionClass
|
protected void setIntrospectionClass(Class<?> clazz) {
if (this.cachedIntrospectionResults != null && this.cachedIntrospectionResults.getBeanClass() != clazz) {
this.cachedIntrospectionResults = null;
}
}
|
Set the class to introspect.
Needs to be called when the target object changes.
@param clazz the class to introspect
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java
| 152
|
[
"clazz"
] |
void
| true
| 3
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
iterator
|
@Override
public Iterator<ConfigDataEnvironmentContributor> iterator() {
return this.root.iterator();
}
|
Return a {@link Binder} backed by the contributors.
@param activationContext the activation context
@param filter a filter used to limit the contributors
@param options binder options to apply
@return a binder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributors.java
| 251
|
[] | true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
addCandidateComponentsFromIndex
|
private Set<BeanDefinition> addCandidateComponentsFromIndex(CandidateComponentsIndex index, String basePackage) {
Set<BeanDefinition> candidates = new LinkedHashSet<>();
try {
Set<String> types = new HashSet<>();
for (TypeFilter filter : this.includeFilters) {
String stereotype = extractStereotype(filter);
if (stereotype == null) {
throw new IllegalArgumentException("Failed to extract stereotype from " + filter);
}
types.addAll(index.getCandidateTypes(basePackage, stereotype));
}
boolean traceEnabled = logger.isTraceEnabled();
boolean debugEnabled = logger.isDebugEnabled();
for (String type : types) {
MetadataReader metadataReader = getMetadataReaderFactory().getMetadataReader(type);
if (isCandidateComponent(metadataReader)) {
ScannedGenericBeanDefinition sbd = new ScannedGenericBeanDefinition(metadataReader);
sbd.setSource(metadataReader.getResource());
if (isCandidateComponent(sbd)) {
if (debugEnabled) {
logger.debug("Using candidate component class from index: " + type);
}
candidates.add(sbd);
}
else {
if (debugEnabled) {
logger.debug("Ignored because not a concrete top-level class: " + type);
}
}
}
else {
if (traceEnabled) {
logger.trace("Ignored because matching an exclude filter: " + type);
}
}
}
}
catch (IOException ex) {
throw new BeanDefinitionStoreException("I/O failure during classpath scanning", ex);
}
return candidates;
}
|
Extract the stereotype to use for the specified compatible filter.
@param filter the filter to handle
@return the stereotype in the index matching this filter
@since 5.0
@see #indexSupportsIncludeFilter(TypeFilter)
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
| 403
|
[
"index",
"basePackage"
] | true
| 8
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
poll
|
@Override
public NetworkClientDelegate.PollResult poll(long currentTimeMs) {
if (coordinatorRequestManager.coordinator().isEmpty() || membershipManager.shouldSkipHeartbeat()) {
membershipManager.onHeartbeatRequestSkipped();
maybePropagateCoordinatorFatalErrorEvent();
return NetworkClientDelegate.PollResult.EMPTY;
}
pollTimer.update(currentTimeMs);
if (pollTimer.isExpired() && !membershipManager.isLeavingGroup()) {
logger.warn("Consumer poll timeout has expired. This means the time between " +
"subsequent calls to poll() was longer than the configured max.poll.interval.ms, " +
"which typically implies that the poll loop is spending too much time processing " +
"messages. You can address this either by increasing max.poll.interval.ms or by " +
"reducing the maximum size of batches returned in poll() with max.poll.records.");
membershipManager.onPollTimerExpired();
NetworkClientDelegate.UnsentRequest leaveHeartbeat = makeHeartbeatRequestAndLogResponse(currentTimeMs);
// We can ignore the leave response because we can join before or after receiving the response.
heartbeatRequestState.reset();
heartbeatState.reset();
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(leaveHeartbeat));
}
if (shouldHeartbeatBeforeIntervalExpires() || heartbeatRequestState.canSendRequest(currentTimeMs)) {
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequestAndHandleResponse(currentTimeMs);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(request));
} else {
return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs));
}
}
|
This will build a heartbeat request if one must be sent, determined based on the member
state. A heartbeat is sent when all of the following applies:
<ol>
<li>Member is part of the consumer group or wants to join it.</li>
<li>The heartbeat interval has expired, or the member is in a state that indicates
that it should heartbeat without waiting for the interval.</li>
</ol>
This will also determine the maximum wait time until the next poll based on the member's
state.
<ol>
<li>If the member is without a coordinator or is in a failed state, the timer is set
to Long.MAX_VALUE, as there's no need to send a heartbeat.</li>
<li>If the member cannot send a heartbeat due to either exponential backoff, it will
return the remaining time left on the backoff timer.</li>
<li>If the member's heartbeat timer has not expired, It will return the remaining time
left on the heartbeat timer.</li>
<li>If the member can send a heartbeat, the timer is set to the current heartbeat interval.</li>
</ol>
@return {@link org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult} that includes a
heartbeat request if one must be sent, and the time to wait until the next poll.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java
| 363
|
[
"currentTimeMs"
] | true
| 7
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
withBindRestrictions
|
public Bindable<T> withBindRestrictions(BindRestriction... additionalRestrictions) {
EnumSet<BindRestriction> bindRestrictions = EnumSet.copyOf(this.bindRestrictions);
bindRestrictions.addAll(Arrays.asList(additionalRestrictions));
return new Bindable<>(this.type, this.boxedType, this.value, this.annotations, bindRestrictions,
this.bindMethod);
}
|
Create an updated {@link Bindable} instance with additional bind restrictions.
@param additionalRestrictions any additional restrictions to apply
@return an updated {@link Bindable}
@since 2.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 225
|
[] | true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
access
|
function access(path, mode, callback) {
if (typeof mode === 'function') {
callback = mode;
mode = F_OK;
}
path = getValidatedPath(path);
callback = makeCallback(callback);
const req = new FSReqCallback();
req.oncomplete = callback;
binding.access(path, mode, req);
}
|
Tests a user's permissions for the file or directory
specified by `path`.
@param {string | Buffer | URL} path
@param {number} [mode]
@param {(err?: Error) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 215
|
[
"path",
"mode",
"callback"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
destroySingletons
|
public void destroySingletons() {
if (logger.isTraceEnabled()) {
logger.trace("Destroying singletons in " + this);
}
this.singletonsCurrentlyInDestruction = true;
String[] disposableBeanNames;
synchronized (this.disposableBeans) {
disposableBeanNames = StringUtils.toStringArray(this.disposableBeans.keySet());
}
for (int i = disposableBeanNames.length - 1; i >= 0; i--) {
destroySingleton(disposableBeanNames[i]);
}
this.containedBeanMap.clear();
this.dependentBeanMap.clear();
this.dependenciesForBeanMap.clear();
this.singletonLock.lock();
try {
clearSingletonCache();
}
finally {
this.singletonLock.unlock();
}
}
|
Return the names of all beans that the specified bean depends on, if any.
@param beanName the name of the bean
@return the array of names of beans which the bean depends on,
or an empty array if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 693
|
[] |
void
| true
| 3
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
assert_prek_installed
|
def assert_prek_installed():
"""
Check if prek is installed in the right version.
:return: True is the prek is installed in the right version.
"""
# Local import to make autocomplete work
import yaml
from packaging.version import Version
prek_config = yaml.safe_load((AIRFLOW_ROOT_PATH / ".pre-commit-config.yaml").read_text())
min_prek_version = prek_config["minimum_prek_version"]
python_executable = sys.executable
get_console().print(f"[info]Checking prek installed for {python_executable}[/]")
need_to_reinstall_prek = False
try:
command_result = run_command(
["prek", "--version"],
capture_output=True,
text=True,
check=False,
)
if command_result.returncode == 0:
if command_result.stdout:
prek_version = command_result.stdout.split(" ")[1].strip()
if Version(prek_version) >= Version(min_prek_version):
get_console().print(
f"\n[success]Package prek is installed. "
f"Good version {prek_version} (>= {min_prek_version})[/]\n"
)
else:
get_console().print(
f"\n[error]Package name prek version is wrong. It should be "
f"at least {min_prek_version} and is {prek_version}.[/]\n\n"
)
sys.exit(1)
else:
get_console().print(
"\n[warning]Could not determine version of prek. You might need to update it![/]\n"
)
else:
need_to_reinstall_prek = True
get_console().print("\n[error]Error checking for prek-installation:[/]\n")
get_console().print(command_result.stderr)
except FileNotFoundError as e:
need_to_reinstall_prek = True
get_console().print(f"\n[error]Error checking for prek installation: [/]\n{e}\n")
if need_to_reinstall_prek:
get_console().print("[info]Make sure to install prek. For example by running:\n")
get_console().print(" uv tool install prek\n")
get_console().print("Or if you prefer pipx:\n")
get_console().print(" pipx install prek")
sys.exit(1)
|
Check if prek is installed in the right version.
:return: True is the prek is installed in the right version.
|
python
|
dev/breeze/src/airflow_breeze/utils/run_utils.py
| 207
|
[] | false
| 8
| 7.2
|
apache/airflow
| 43,597
|
unknown
| false
|
|
brokers
|
public Collection<Node> brokers() {
return holder().brokers.values();
}
|
Get all brokers returned in metadata response
@return the brokers
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java
| 229
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
hideStackFrames
|
function hideStackFrames(fn) {
function wrappedFn(...args) {
try {
return ReflectApply(fn, this, args);
} catch (error) {
Error.stackTraceLimit && ErrorCaptureStackTrace(error, wrappedFn);
throw error;
}
}
wrappedFn.withoutStackTrace = fn;
return wrappedFn;
}
|
This function removes unnecessary frames from Node.js core errors.
@template {(...args: unknown[]) => unknown} T
@param {T} fn
@returns {T}
|
javascript
|
lib/internal/errors.js
| 540
|
[
"fn"
] | false
| 3
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
readLiteral
|
private Object readLiteral() throws JSONException {
String literal = nextToInternal("{}[]/\\:,=;# \t\f");
if (literal.isEmpty()) {
throw syntaxError("Expected literal value");
}
else if ("null".equalsIgnoreCase(literal)) {
return JSONObject.NULL;
}
else if ("true".equalsIgnoreCase(literal)) {
return Boolean.TRUE;
}
else if ("false".equalsIgnoreCase(literal)) {
return Boolean.FALSE;
}
/* try to parse as an integral type... */
if (literal.indexOf('.') == -1) {
int base = 10;
String number = literal;
if (number.startsWith("0x") || number.startsWith("0X")) {
number = number.substring(2);
base = 16;
}
else if (number.startsWith("0") && number.length() > 1) {
number = number.substring(1);
base = 8;
}
try {
long longValue = Long.parseLong(number, base);
if (longValue <= Integer.MAX_VALUE && longValue >= Integer.MIN_VALUE) {
return (int) longValue;
}
else {
return longValue;
}
}
catch (NumberFormatException e) {
/*
* This only happens for integral numbers greater than Long.MAX_VALUE,
* numbers in exponential form (5e-10) and unquoted strings. Fall through
* to try floating point.
*/
}
}
/* ...next try to parse as a floating point... */
try {
return Double.valueOf(literal);
}
catch (NumberFormatException ex) {
// Ignore
}
/* ... finally give up. We have an unquoted string */
return new String(literal); // a new string avoids leaking memory
}
|
Reads a null, boolean, numeric or unquoted string literal value. Numeric values
will be returned as an Integer, Long, or Double, in that order of preference.
@return a literal value
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
| 273
|
[] |
Object
| true
| 14
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
requireMethod
|
private static Method requireMethod(final Method method) {
return Objects.requireNonNull(method, "method");
}
|
Throws NullPointerException if {@code method} is {@code null}.
@param method The method to test.
@return The given method.
@throws NullPointerException if {@code method} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
| 236
|
[
"method"
] |
Method
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getCanonicalName
|
private static String getCanonicalName(final String name) {
String className = StringUtils.deleteWhitespace(name);
if (className == null) {
return null;
}
int dim = 0;
final int len = className.length();
while (dim < len && className.charAt(dim) == '[') {
dim++;
if (dim > MAX_DIMENSIONS) {
throw new IllegalArgumentException(String.format("Maximum array dimension %d exceeded", MAX_DIMENSIONS));
}
}
if (dim >= len) {
throw new IllegalArgumentException(String.format("Invalid class name %s", name));
}
if (dim < 1) {
return className;
}
className = className.substring(dim);
if (className.startsWith("L")) {
if (!className.endsWith(";") || className.length() < 3) {
throw new IllegalArgumentException(String.format("Invalid class name %s", name));
}
className = className.substring(1, className.length() - 1);
} else if (className.length() == 1) {
final String primitive = REVERSE_ABBREVIATION_MAP.get(className.substring(0, 1));
if (primitive == null) {
throw new IllegalArgumentException(String.format("Invalid class name %s", name));
}
className = primitive;
} else {
throw new IllegalArgumentException(String.format("Invalid class name %s", name));
}
final StringBuilder canonicalClassNameBuffer = new StringBuilder(className.length() + dim * 2);
canonicalClassNameBuffer.append(className);
for (int i = 0; i < dim; i++) {
canonicalClassNameBuffer.append("[]");
}
return canonicalClassNameBuffer.toString();
}
|
Converts a given name of class into canonical format. If name of class is not a name of array class it returns
unchanged name.
<p>
The method does not change the {@code $} separators in case the class is inner class.
</p>
<p>
Example:
<ul>
<li>{@code getCanonicalName("[I") = "int[]"}</li>
<li>{@code getCanonicalName("[Ljava.lang.String;") = "java.lang.String[]"}</li>
<li>{@code getCanonicalName("java.lang.String") = "java.lang.String"}</li>
</ul>
</p>
@param name the name of class.
@return canonical form of class name.
@throws IllegalArgumentException if the class name is invalid.
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 496
|
[
"name"
] |
String
| true
| 13
| 9.36
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
load64
|
static long load64(byte[] input, int offset) {
// We don't want this in production code as this is the most critical part of the loop.
assert input.length >= offset + 8;
// Delegates to the fast (unsafe) version or the fallback.
return byteArray.getLongLittleEndian(input, offset);
}
|
Load 8 bytes into long in a little endian manner, from the substring between position and
position + 8. The array must have at least 8 bytes from offset (inclusive).
@param input the input bytes
@param offset the offset into the array at which to start
@return a long of a concatenated 8 bytes
|
java
|
android/guava/src/com/google/common/hash/LittleEndianByteArray.java
| 51
|
[
"input",
"offset"
] | true
| 1
| 7.2
|
google/guava
| 51,352
|
javadoc
| false
|
|
genericArrayTypeToString
|
private static String genericArrayTypeToString(final GenericArrayType genericArrayType) {
return String.format("%s[]", toString(genericArrayType.getGenericComponentType()));
}
|
Formats a {@link GenericArrayType} as a {@link String}.
@param genericArrayType {@link GenericArrayType} to format.
@return String.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 583
|
[
"genericArrayType"
] |
String
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
millis
|
public long millis() {
return timeUnit.toMillis(duration);
}
|
@return the number of {@link #timeUnit()} units this value contains
|
java
|
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
| 130
|
[] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
convertProperties
|
protected void convertProperties(Properties props) {
Enumeration<?> propertyNames = props.propertyNames();
while (propertyNames.hasMoreElements()) {
String propertyName = (String) propertyNames.nextElement();
String propertyValue = props.getProperty(propertyName);
String convertedValue = convertProperty(propertyName, propertyValue);
if (!ObjectUtils.nullSafeEquals(propertyValue, convertedValue)) {
props.setProperty(propertyName, convertedValue);
}
}
}
|
Convert the given merged properties, converting property values
if necessary. The result will then be processed.
<p>The default implementation will invoke {@link #convertPropertyValue}
for each property value, replacing the original with the converted value.
@param props the Properties to convert
@see #processProperties
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/PropertyResourceConfigurer.java
| 101
|
[
"props"
] |
void
| true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
ohlc
|
def ohlc(self) -> DataFrame:
"""
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
See Also
--------
DataFrame.agg : Aggregate using one or more operations over the specified axis.
DataFrame.resample : Resample time-series data.
DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns.
Examples
--------
For SeriesGroupBy:
>>> lst = [
... "SPX",
... "CAC",
... "SPX",
... "CAC",
... "SPX",
... "CAC",
... "SPX",
... "CAC",
... ]
>>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst)
>>> ser
SPX 3.4
CAC 9.0
SPX 7.2
CAC 5.2
SPX 8.8
CAC 9.4
SPX 0.1
CAC 0.5
dtype: float64
>>> ser.groupby(level=0).ohlc()
open high low close
CAC 9.0 9.4 0.5 0.5
SPX 3.4 8.8 0.1 0.1
For DataFrameGroupBy:
>>> data = {
... 2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2, 1],
... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0],
... }
>>> df = pd.DataFrame(
... data, index=["SPX", "CAC", "SPX", "CAC", "SPX", "CAC", "SPX", "CAC"]
... )
>>> df
2022 2023
SPX 1.2 3.4
CAC 2.3 9.0
SPX 8.9 7.2
CAC 4.5 5.2
SPX 4.4 8.8
CAC 3.0 9.4
SPX 2.0 8.2
CAC 1.0 1.0
>>> df.groupby(level=0).ohlc()
2022 2023
open high low close open high low close
CAC 2.3 4.5 1.0 1.0 9.0 9.4 1.0 1.0
SPX 1.2 8.9 1.2 2.0 3.4 8.8 3.4 8.2
For Resampler:
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 5],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").ohlc()
open high low close
2023-01-01 1 3 1 2
2023-02-01 4 5 3 5
"""
if self.obj.ndim == 1:
obj = self._selected_obj
is_numeric = is_numeric_dtype(obj.dtype)
if not is_numeric:
raise DataError("No numeric types to aggregate")
res_values = self._grouper._cython_operation(
"aggregate", obj._values, "ohlc", axis=0, min_count=-1
)
agg_names = ["open", "high", "low", "close"]
result = self.obj._constructor_expanddim(
res_values, index=self._grouper.result_index, columns=agg_names
)
return result
result = self._apply_to_column_groupbys(lambda sgb: sgb.ohlc())
return result
|
Compute open, high, low and close values of a group, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Returns
-------
DataFrame
Open, high, low and close values within each group.
See Also
--------
DataFrame.agg : Aggregate using one or more operations over the specified axis.
DataFrame.resample : Resample time-series data.
DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns.
Examples
--------
For SeriesGroupBy:
>>> lst = [
... "SPX",
... "CAC",
... "SPX",
... "CAC",
... "SPX",
... "CAC",
... "SPX",
... "CAC",
... ]
>>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst)
>>> ser
SPX 3.4
CAC 9.0
SPX 7.2
CAC 5.2
SPX 8.8
CAC 9.4
SPX 0.1
CAC 0.5
dtype: float64
>>> ser.groupby(level=0).ohlc()
open high low close
CAC 9.0 9.4 0.5 0.5
SPX 3.4 8.8 0.1 0.1
For DataFrameGroupBy:
>>> data = {
... 2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2, 1],
... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0],
... }
>>> df = pd.DataFrame(
... data, index=["SPX", "CAC", "SPX", "CAC", "SPX", "CAC", "SPX", "CAC"]
... )
>>> df
2022 2023
SPX 1.2 3.4
CAC 2.3 9.0
SPX 8.9 7.2
CAC 4.5 5.2
SPX 4.4 8.8
CAC 3.0 9.4
SPX 2.0 8.2
CAC 1.0 1.0
>>> df.groupby(level=0).ohlc()
2022 2023
open high low close open high low close
CAC 2.3 4.5 1.0 1.0 9.0 9.4 1.0 1.0
SPX 1.2 8.9 1.2 2.0 3.4 8.8 3.4 8.2
For Resampler:
>>> ser = pd.Series(
... [1, 3, 2, 4, 3, 5],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").ohlc()
open high low close
2023-01-01 1 3 1 2
2023-02-01 4 5 3 5
|
python
|
pandas/core/groupby/groupby.py
| 3,408
|
[
"self"
] |
DataFrame
| true
| 3
| 8.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
_matches_client_defaults
|
def _matches_client_defaults(cls, var: Any, attrname: str) -> bool:
"""
Check if a field value matches client_defaults and should be excluded.
This implements the hierarchical defaults optimization where values that match
client_defaults are omitted from individual task serialization.
:param var: The value to check
:param attrname: The attribute name
:return: True if value matches client_defaults and should be excluded
"""
try:
# Get cached client defaults for tasks
task_defaults = cls.generate_client_defaults()
# Check if this field is in client_defaults and values match
if attrname in task_defaults and var == task_defaults[attrname]:
return True
except Exception:
# If anything goes wrong with client_defaults, fall back to normal logic
pass
return False
|
Check if a field value matches client_defaults and should be excluded.
This implements the hierarchical defaults optimization where values that match
client_defaults are omitted from individual task serialization.
:param var: The value to check
:param attrname: The attribute name
:return: True if value matches client_defaults and should be excluded
|
python
|
airflow-core/src/airflow/serialization/serialized_objects.py
| 1,692
|
[
"cls",
"var",
"attrname"
] |
bool
| true
| 3
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
collect_node_descendants
|
def collect_node_descendants(
graph: torch.fx.Graph,
) -> dict[torch.fx.Node, OrderedSet[torch.fx.Node]]:
"""
Collects the descendants of each node in the graph.
Args:
graph (torch.fx.Graph): The graph to collect descendants from.
Returns:
dict[torch.fx.Node, OrderedSet[torch.fx.Node]]: A dictionary mapping each node to its descendants.
"""
node_descendants: dict[torch.fx.Node, OrderedSet[torch.fx.Node]] = (
collections.defaultdict(OrderedSet)
)
outdegree = collections.defaultdict(int)
queue = []
for node in graph.nodes:
n_outdegree = len(node.users)
if n_outdegree == 0:
queue.append(node)
else:
outdegree[node] = len(node.users)
while queue:
node = queue.pop()
for input_node in node.all_input_nodes:
node_descendants[input_node] |= node_descendants[node]
node_descendants[input_node].add(node)
outdegree[input_node] -= 1
if outdegree[input_node] == 0:
queue.append(input_node)
return node_descendants
|
Collects the descendants of each node in the graph.
Args:
graph (torch.fx.Graph): The graph to collect descendants from.
Returns:
dict[torch.fx.Node, OrderedSet[torch.fx.Node]]: A dictionary mapping each node to its descendants.
|
python
|
torch/_inductor/fx_passes/bucketing.py
| 234
|
[
"graph"
] |
dict[torch.fx.Node, OrderedSet[torch.fx.Node]]
| true
| 7
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
parseCacheAnnotations
|
@Nullable Collection<CacheOperation> parseCacheAnnotations(Method method);
|
Parse the cache definition for the given method,
based on an annotation type understood by this parser.
<p>This essentially parses a known cache annotation into Spring's metadata
attribute class. Returns {@code null} if the method is not cacheable.
@param method the annotated method
@return the configured caching operation, or {@code null} if none found
@see AnnotationCacheOperationSource#findCacheOperations(Method)
|
java
|
spring-context/src/main/java/org/springframework/cache/annotation/CacheAnnotationParser.java
| 79
|
[
"method"
] | true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
set_operator_weight
|
def set_operator_weight(op_name: str, weight: float) -> None:
"""Set the selection weight for a specific operator.
Args:
op_name: The registered operator name (e.g., "add", "arg") OR fully-qualified torch op
(e.g., "torch.nn.functional.relu", "torch.matmul")
weight: New relative selection weight (must be > 0)
"""
if weight <= 0:
raise ValueError("Operator weight must be > 0")
# Try by registry key
op = _global_registry.get(op_name)
if op is not None:
op.weight = float(weight)
return
# Fallback: try to locate by fully-qualified torch op name
for candidate in _global_registry.list_operators().values():
if getattr(candidate, "torch_op_name", None) == op_name:
candidate.weight = float(weight)
return
raise KeyError(f"Operator '{op_name}' not found by registry name or torch op name")
|
Set the selection weight for a specific operator.
Args:
op_name: The registered operator name (e.g., "add", "arg") OR fully-qualified torch op
(e.g., "torch.nn.functional.relu", "torch.matmul")
weight: New relative selection weight (must be > 0)
|
python
|
tools/experimental/torchfuzz/operators/registry.py
| 177
|
[
"op_name",
"weight"
] |
None
| true
| 5
| 6.56
|
pytorch/pytorch
| 96,034
|
google
| false
|
apply
|
def apply(
self,
f,
align_keys: list[str] | None = None,
**kwargs,
) -> Self:
"""
Iterate over the blocks, collect and create a new BlockManager.
Parameters
----------
f : str or callable
Name of the Block method to apply.
align_keys: List[str] or None, default None
**kwargs
Keywords to pass to `f`
Returns
-------
BlockManager
"""
assert "filter" not in kwargs
align_keys = align_keys or []
result_blocks: list[Block] = []
# fillna: Series/DataFrame is responsible for making sure value is aligned
aligned_args = {k: kwargs[k] for k in align_keys}
for b in self.blocks:
if aligned_args:
for k, obj in aligned_args.items():
if isinstance(obj, (ABCSeries, ABCDataFrame)):
# The caller is responsible for ensuring that
# obj.axes[-1].equals(self.items)
if obj.ndim == 1:
kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
else:
kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
else:
# otherwise we have an ndarray
kwargs[k] = obj[b.mgr_locs.indexer]
if callable(f):
applied = b.apply(f, **kwargs)
else:
applied = getattr(b, f)(**kwargs)
result_blocks = extend_blocks(applied, result_blocks)
out = type(self).from_blocks(result_blocks, self.axes)
return out
|
Iterate over the blocks, collect and create a new BlockManager.
Parameters
----------
f : str or callable
Name of the Block method to apply.
align_keys: List[str] or None, default None
**kwargs
Keywords to pass to `f`
Returns
-------
BlockManager
|
python
|
pandas/core/internals/managers.py
| 395
|
[
"self",
"f",
"align_keys"
] |
Self
| true
| 11
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
reauthenticationLatencyMs
|
public Long reauthenticationLatencyMs() {
return authenticator.reauthenticationLatencyMs();
}
|
Return the number of milliseconds that elapsed while re-authenticating this
session from the perspective of this instance, if applicable, otherwise null.
The server-side perspective will yield a lower value than the client-side
perspective of the same re-authentication because the client-side observes an
additional network round-trip.
@return the number of milliseconds that elapsed while re-authenticating this
session from the perspective of this instance, if applicable,
otherwise null
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 628
|
[] |
Long
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
_coerce_indexer_frozen
|
def _coerce_indexer_frozen(array_like, categories, copy: bool = False) -> np.ndarray:
"""
Coerce the array-like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
"""
array_like = coerce_indexer_dtype(array_like, categories)
if copy:
array_like = array_like.copy()
array_like.flags.writeable = False
return array_like
|
Coerce the array-like indexer to the smallest integer dtype that can encode all
of the given categories.
Parameters
----------
array_like : array-like
categories : array-like
copy : bool
Returns
-------
np.ndarray
Non-writeable.
|
python
|
pandas/core/indexes/multi.py
| 4,411
|
[
"array_like",
"categories",
"copy"
] |
np.ndarray
| true
| 2
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
slice_locs
|
def slice_locs(
self,
start: SliceType = None,
end: SliceType = None,
step: int | None = None,
) -> tuple[int, int]:
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
Returns
-------
tuple[int, int]
Returns a tuple of two integers representing the slice locations for the
input labels within the index.
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
--------
>>> idx = pd.Index(list("abcd"))
>>> idx.slice_locs(start="b", end="c")
(1, 3)
>>> idx = pd.Index(list("bcde"))
>>> idx.slice_locs(start="a", end="c")
(0, 2)
"""
inc = step is None or step >= 0
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
# GH 16785: If start and end happen to be date strings with UTC offsets
# attempt to parse and check that the offsets are the same
if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):
try:
ts_start = Timestamp(start)
ts_end = Timestamp(end)
except (ValueError, TypeError):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
raise ValueError("Both dates must have the same UTC offset")
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, "left")
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, "right")
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
|
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
Returns
-------
tuple[int, int]
Returns a tuple of two integers representing the slice locations for the
input labels within the index.
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
--------
>>> idx = pd.Index(list("abcd"))
>>> idx.slice_locs(start="b", end="c")
(1, 3)
>>> idx = pd.Index(list("bcde"))
>>> idx.slice_locs(start="a", end="c")
(0, 2)
|
python
|
pandas/core/indexes/base.py
| 6,910
|
[
"self",
"start",
"end",
"step"
] |
tuple[int, int]
| true
| 14
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
min
|
public static double min(final double a, final double b) {
if (Double.isNaN(a)) {
return b;
}
if (Double.isNaN(b)) {
return a;
}
return Math.min(a, b);
}
|
Gets the minimum of two {@code double} values.
<p>NaN is only returned if all numbers are NaN as per IEEE-754r.</p>
@param a value 1.
@param b value 2.
@return the smallest of the values.
|
java
|
src/main/java/org/apache/commons/lang3/math/IEEE754rUtils.java
| 173
|
[
"a",
"b"
] | true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
nextHook
|
function nextHook(): null | Hook {
const hook = currentHook;
if (hook !== null) {
currentHook = hook.next;
}
return hook;
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-debug-tools/src/ReactDebugHooks.js
| 153
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
findResource
|
@Override
public URL findResource(String name) {
if (!this.hasJarUrls) {
return super.findResource(name);
}
Optimizations.enable(false);
try {
return super.findResource(name);
}
finally {
Optimizations.disable();
}
}
|
Create a new {@link LaunchedClassLoader} instance.
@param urls the URLs from which to load classes and resources
@param parent the parent class loader for delegation
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrlClassLoader.java
| 66
|
[
"name"
] |
URL
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
offset
|
public long offset() {
return this.offset;
}
|
The offset of the record in the topic/partition.
@return the offset of the record, or -1 if {{@link #hasOffset()}} returns false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/RecordMetadata.java
| 69
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
unescapeJava
|
public static final String unescapeJava(final String input) {
return UNESCAPE_JAVA.translate(input);
}
|
Unescapes any Java literals found in the {@link String}.
For example, it will turn a sequence of {@code '\'} and
{@code 'n'} into a newline character, unless the {@code '\'}
is preceded by another {@code '\'}.
@param input the {@link String} to unescape, may be null
@return a new unescaped {@link String}, {@code null} if null string input
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 742
|
[
"input"
] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
adjacentNodes
|
@Override
public Set<N> adjacentNodes() {
if (orderedNodeConnections == null) {
return Collections.unmodifiableSet(adjacentNodeValues.keySet());
} else {
return new AbstractSet<N>() {
@Override
public UnmodifiableIterator<N> iterator() {
Iterator<NodeConnection<N>> nodeConnections = orderedNodeConnections.iterator();
Set<N> seenNodes = new HashSet<>();
return new AbstractIterator<N>() {
@Override
protected @Nullable N computeNext() {
while (nodeConnections.hasNext()) {
NodeConnection<N> nodeConnection = nodeConnections.next();
boolean added = seenNodes.add(nodeConnection.node);
if (added) {
return nodeConnection.node;
}
}
return endOfData();
}
};
}
@Override
public int size() {
return adjacentNodeValues.size();
}
@Override
public boolean contains(@Nullable Object obj) {
return adjacentNodeValues.containsKey(obj);
}
};
}
}
|
All node connections in this graph, in edge insertion order.
<p>Note: This field and {@link #adjacentNodeValues} cannot be combined into a single
LinkedHashMap because one target node may be mapped to both a predecessor and a successor. A
LinkedHashMap combines two such edges into a single node-value pair, even though the edges may
not have been inserted consecutively.
|
java
|
android/guava/src/com/google/common/graph/DirectedGraphConnections.java
| 233
|
[] | true
| 4
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
wakeupTrigger
|
WakeupTrigger wakeupTrigger() {
return wakeupTrigger;
}
|
Get the current subscription. or an empty set if no such call has
been made.
@return The set of topics currently subscribed to
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,868
|
[] |
WakeupTrigger
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
read_array
|
def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
max_header_size=_MAX_HEADER_SIZE):
"""
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: False
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2.
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
"""
if allow_pickle:
# Effectively ignore max_header_size, since `allow_pickle` indicates
# that the input is fully trusted.
max_header_size = 2**64
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(
fp, version, max_header_size=max_header_size)
if len(shape) == 0:
count = 1
else:
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
if not allow_pickle:
raise ValueError("Object arrays cannot be loaded when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
"to numpy.load" % (err,)) from err
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
# Use np.ndarray instead of np.empty since the latter does
# not correctly instantiate zero-width string dtypes; see
# https://github.com/numpy/numpy/pull/6430
array = numpy.ndarray(count, dtype=dtype)
if dtype.itemsize > 0:
# If dtype.itemsize == 0 then there's nothing more to read
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * dtype.itemsize)
data = _read_bytes(fp, read_size, "array data")
array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype,
count=read_count)
if array.size != count:
raise ValueError(
"Failed to read all data for array. "
f"Expected {shape} = {count} elements, "
f"could only read {array.size} elements. "
"(file seems not fully written?)"
)
if fortran_order:
array = array.reshape(shape[::-1])
array = array.transpose()
else:
array = array.reshape(shape)
return array
|
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: False
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2.
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
|
python
|
numpy/lib/_format_impl.py
| 781
|
[
"fp",
"allow_pickle",
"pickle_kwargs",
"max_header_size"
] | false
| 15
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
maybeTruncateReason
|
public static String maybeTruncateReason(final String reason) {
if (reason.length() > 255) {
return reason.substring(0, 255);
} else {
return reason;
}
}
|
Ensures that the provided {@code reason} remains within a range of 255 chars.
@param reason This is the reason that is sent to the broker over the wire
as a part of {@code JoinGroupRequest} or {@code LeaveGroupRequest} messages.
@return a provided reason as is or truncated reason if it exceeds the 255 chars threshold.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java
| 78
|
[
"reason"
] |
String
| true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
_cum_func
|
def _cum_func(
func: Callable,
values: np.ndarray,
*,
skipna: bool = True,
) -> np.ndarray:
"""
Accumulations for 1D datetimelike arrays.
Parameters
----------
func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation). Values is changed is modified inplace.
skipna : bool, default True
Whether to skip NA.
"""
try:
fill_value = {
np.maximum.accumulate: np.iinfo(np.int64).min,
np.cumsum: 0,
np.minimum.accumulate: np.iinfo(np.int64).max,
}[func]
except KeyError as err:
raise ValueError(
f"No accumulation for {func} implemented on BaseMaskedArray"
) from err
mask = isna(values)
y = values.view("i8")
y[mask] = fill_value
if not skipna:
mask = np.maximum.accumulate(mask)
# GH 57956
result = func(y, axis=0)
result[mask] = iNaT
if values.dtype.kind in "mM":
return result.view(values.dtype.base)
return result
|
Accumulations for 1D datetimelike arrays.
Parameters
----------
func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate
values : np.ndarray
Numpy array with the values (can be of any dtype that support the
operation). Values is changed is modified inplace.
skipna : bool, default True
Whether to skip NA.
|
python
|
pandas/core/array_algos/datetimelike_accumulations.py
| 19
|
[
"func",
"values",
"skipna"
] |
np.ndarray
| true
| 3
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
is_datetime64_any_dtype
|
def is_datetime64_any_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
bool
Whether or not the array or dtype is of the datetime64 dtype.
See Also
--------
api.types.is_datetime64_dtype : Check whether an array-like or dtype is of the
datetime64 dtype.
api.is_datetime64_ns_dtype : Check whether the provided array or dtype is of the
datetime64[ns] dtype.
api.is_datetime64tz_dtype : Check whether an array-like or dtype is of a
DatetimeTZDtype dtype.
Examples
--------
>>> from pandas.api.types import is_datetime64_any_dtype
>>> from pandas.api.types import DatetimeTZDtype
>>> is_datetime64_any_dtype(str)
False
>>> is_datetime64_any_dtype(int)
False
>>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
True
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_any_dtype(np.array(["a", "b"]))
False
>>> is_datetime64_any_dtype(np.array([1, 2]))
False
>>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
True
>>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
True
"""
if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)):
# GH#33400 fastpath for dtype object
return arr_or_dtype.kind == "M"
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
except TypeError:
return False
return (
lib.is_np_dtype(tipo, "M")
or isinstance(tipo, DatetimeTZDtype)
or (isinstance(tipo, ExtensionDtype) and tipo.kind == "M")
)
|
Check whether the provided array or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
bool
Whether or not the array or dtype is of the datetime64 dtype.
See Also
--------
api.types.is_datetime64_dtype : Check whether an array-like or dtype is of the
datetime64 dtype.
api.is_datetime64_ns_dtype : Check whether the provided array or dtype is of the
datetime64[ns] dtype.
api.is_datetime64tz_dtype : Check whether an array-like or dtype is of a
DatetimeTZDtype dtype.
Examples
--------
>>> from pandas.api.types import is_datetime64_any_dtype
>>> from pandas.api.types import DatetimeTZDtype
>>> is_datetime64_any_dtype(str)
False
>>> is_datetime64_any_dtype(int)
False
>>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
True
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_any_dtype(np.array(["a", "b"]))
False
>>> is_datetime64_any_dtype(np.array([1, 2]))
False
>>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]"))
True
>>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]"))
True
|
python
|
pandas/core/dtypes/common.py
| 999
|
[
"arr_or_dtype"
] |
bool
| true
| 6
| 7.92
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
bootstrap_plot
|
def bootstrap_plot(
series: Series,
fig: Figure | None = None,
size: int = 50,
samples: int = 500,
**kwds,
) -> Figure:
"""
Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
less than or equal to the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.figure.Figure
Matplotlib figure.
See Also
--------
DataFrame.plot : Basic plotting for DataFrame objects.
Series.plot : Basic plotting for Series objects.
Examples
--------
This example draws a basic bootstrap plot for a Series.
.. plot::
:context: close-figs
>>> s = pd.Series(np.random.uniform(size=100))
>>> pd.plotting.bootstrap_plot(s) # doctest: +SKIP
<Figure size 640x480 with 6 Axes>
"""
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.bootstrap_plot(
series=series, fig=fig, size=size, samples=samples, **kwds
)
|
Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
less than or equal to the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.figure.Figure
Matplotlib figure.
See Also
--------
DataFrame.plot : Basic plotting for DataFrame objects.
Series.plot : Basic plotting for Series objects.
Examples
--------
This example draws a basic bootstrap plot for a Series.
.. plot::
:context: close-figs
>>> s = pd.Series(np.random.uniform(size=100))
>>> pd.plotting.bootstrap_plot(s) # doctest: +SKIP
<Figure size 640x480 with 6 Axes>
|
python
|
pandas/plotting/_misc.py
| 439
|
[
"series",
"fig",
"size",
"samples"
] |
Figure
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
negativeBuckets
|
@Override
public ExponentialHistogram.Buckets negativeBuckets() {
return negativeBuckets;
}
|
Attempts to add a bucket to the positive or negative range of this histogram.
<br>
Callers must adhere to the following rules:
<ul>
<li>All buckets for the negative values range must be provided before the first one from the positive values range.</li>
<li>For both the negative and positive ranges, buckets must be provided with their indices in ascending order.</li>
<li>It is not allowed to provide the same bucket more than once.</li>
<li>It is not allowed to add empty buckets ({@code count <= 0}).</li>
</ul>
If any of these rules are violated, this call will fail with an exception.
If the bucket cannot be added because the maximum capacity has been reached, the call will not modify the state
of this histogram and will return {@code false}.
@param index the index of the bucket to add
@param count the count to associate with the given bucket
@param isPositive {@code true} if the bucket belongs to the positive range, {@code false} if it belongs to the negative range
@return {@code true} if the bucket was added, {@code false} if it could not be added due to insufficient capacity
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
| 196
|
[] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
hitRate
|
public double hitRate() {
long requestCount = requestCount();
return (requestCount == 0) ? 1.0 : (double) hitCount / requestCount;
}
|
Returns the ratio of cache requests which were hits. This is defined as {@code hitCount /
requestCount}, or {@code 1.0} when {@code requestCount == 0}. Note that {@code hitRate +
missRate =~ 1.0}.
|
java
|
android/guava/src/com/google/common/cache/CacheStats.java
| 123
|
[] | true
| 2
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
makeFindCoordinatorRequest
|
NetworkClientDelegate.UnsentRequest makeFindCoordinatorRequest(final long currentTimeMs) {
coordinatorRequestState.onSendAttempt(currentTimeMs);
FindCoordinatorRequestData data = new FindCoordinatorRequestData()
.setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id())
.setKey(this.groupId);
NetworkClientDelegate.UnsentRequest unsentRequest = new NetworkClientDelegate.UnsentRequest(
new FindCoordinatorRequest.Builder(data),
Optional.empty()
);
return unsentRequest.whenComplete((clientResponse, throwable) -> {
getAndClearFatalError();
if (clientResponse != null) {
FindCoordinatorResponse response = (FindCoordinatorResponse) clientResponse.responseBody();
onResponse(clientResponse.receivedTimeMs(), response);
} else {
onFailedResponse(unsentRequest.handler().completionTimeMs(), throwable);
}
});
}
|
Poll for the FindCoordinator request.
If we don't need to discover a coordinator, this method will return a PollResult with Long.MAX_VALUE backoff time and an empty list.
If we are still backing off from a previous attempt, this method will return a PollResult with the remaining backoff time and an empty list.
Otherwise, this returns will return a PollResult with a singleton list of UnsentRequest and Long.MAX_VALUE backoff time.
Note that this method does not involve any actual network IO, and it only determines if we need to send a new request or not.
@param currentTimeMs current time in ms.
@return {@link NetworkClientDelegate.PollResult}. This will not be {@code null}.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java
| 113
|
[
"currentTimeMs"
] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
parseStringItems
|
private Map<String, String> parseStringItems(JSONObject json) throws JSONException {
Map<String, String> result = new HashMap<>();
for (Iterator<?> iterator = json.keys(); iterator.hasNext();) {
String key = (String) iterator.next();
Object value = json.get(key);
if (value instanceof String string) {
result.put(key, string);
}
}
return result;
}
|
Returns the defaults applicable to the service.
@return the defaults of the service
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrServiceMetadata.java
| 216
|
[
"json"
] | true
| 3
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "RandomUtils [random=" + random() + "]";
}
|
Generates a random long within the specified range.
@param startInclusive the smallest value that can be returned, must be non-negative.
@param endExclusive the upper bound (not included).
@throws IllegalArgumentException if {@code startInclusive > endExclusive} or if {@code startInclusive} is negative.
@return the random long.
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 454
|
[] |
String
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_last_ti
|
def get_last_ti(self, dag: SerializedDAG, session: Session = NEW_SESSION) -> TI | None:
"""Get Last TI from the dagrun to build and pass Execution context object from server to then run callbacks."""
tis = self.get_task_instances(session=session)
# tis from a dagrun may not be a part of dag.partial_subset,
# since dag.partial_subset is a subset of the dag.
# This ensures that we will only use the accessible TI
# context for the callback.
if dag.partial:
tis = [ti for ti in tis if not ti.state == State.NONE]
# filter out removed tasks
tis = natsorted(
(ti for ti in tis if ti.state != TaskInstanceState.REMOVED),
key=lambda ti: ti.task_id,
)
if not tis:
return None
ti = tis[-1] # get last TaskInstance of DagRun
return ti
|
Get Last TI from the dagrun to build and pass Execution context object from server to then run callbacks.
|
python
|
airflow-core/src/airflow/models/dagrun.py
| 1,409
|
[
"self",
"dag",
"session"
] |
TI | None
| true
| 3
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
coordinatorNotAvailable
|
public static <T> RequestFuture<T> coordinatorNotAvailable() {
return failure(Errors.COORDINATOR_NOT_AVAILABLE.exception());
}
|
Convert from a request future of one type to another type
@param adapter The adapter which does the conversion
@param <S> The type of the future adapted to
@return The new future
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 243
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
adopt_or_reset_orphaned_tasks
|
def adopt_or_reset_orphaned_tasks(self, session: Session = NEW_SESSION) -> int:
"""
Adopt or reset any TaskInstance in resettable state if its SchedulerJob is no longer running.
:return: the number of TIs reset
"""
self.log.info("Adopting or resetting orphaned tasks for active dag runs")
timeout = conf.getint("scheduler", "scheduler_health_check_threshold")
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
result = session.execute(
update(Job)
.where(
Job.job_type == "SchedulerJob",
Job.state == JobState.RUNNING,
Job.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.values(state=JobState.FAILED)
)
num_failed: int = getattr(result, "rowcount", 0)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + "_end", num_failed)
query = (
select(TI)
.options(lazyload(TI.dag_run)) # avoids double join to dag_run
.where(TI.state.in_(State.adoptable_states))
.join(TI.queued_by_job)
.where(Job.state.is_distinct_from(JobState.RUNNING))
.join(TI.dag_run)
.where(DagRun.state == DagRunState.RUNNING)
.options(load_only(TI.dag_id, TI.task_id, TI.run_id))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_adopt_or_reset_query = with_row_locks(
query, of=TI, session=session, skip_locked=True
)
tis_to_adopt_or_reset: list[TaskInstance] = list(
session.scalars(tis_to_adopt_or_reset_query)
)
to_reset: list[TaskInstance] = []
exec_to_tis = self._executor_to_tis(tis_to_adopt_or_reset, session)
for executor, tis in exec_to_tis.items():
to_reset.extend(executor.try_adopt_task_instances(tis))
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = None
ti.queued_by_job_id = None
for ti in set(tis_to_adopt_or_reset) - set(to_reset):
ti.queued_by_job_id = self.job.id
# If old ti from Airflow 2 and last_heartbeat_at is None, set last_heartbeat_at to now
if ti.last_heartbeat_at is None:
ti.last_heartbeat_at = timezone.utcnow()
# If old ti from Airflow 2 and dag_run.conf is None, set dag_run.conf to {}
if ti.dag_run.conf is None:
ti.dag_run.conf = {}
Stats.incr("scheduler.orphaned_tasks.cleared", len(to_reset))
Stats.incr("scheduler.orphaned_tasks.adopted", len(tis_to_adopt_or_reset) - len(to_reset))
if to_reset:
task_instance_str = "\n\t".join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
|
Adopt or reset any TaskInstance in resettable state if its SchedulerJob is no longer running.
:return: the number of TIs reset
|
python
|
airflow-core/src/airflow/jobs/scheduler_job_runner.py
| 2,527
|
[
"self",
"session"
] |
int
| true
| 9
| 6.64
|
apache/airflow
| 43,597
|
unknown
| false
|
checkInvalidTopics
|
private void checkInvalidTopics(Cluster cluster) {
if (!cluster.invalidTopics().isEmpty()) {
log.error("Metadata response reported invalid topics {}", cluster.invalidTopics());
invalidTopics = new HashSet<>(cluster.invalidTopics());
}
}
|
Updates the partition-leadership info in the metadata. Update is done by merging existing metadata with the input leader information and nodes.
This is called whenever partition-leadership updates are returned in a response from broker(ex - ProduceResponse & FetchResponse).
Note that the updates via Metadata RPC are handled separately in ({@link #update}).
Both partitionLeader and leaderNodes override the existing metadata. Non-overlapping metadata is kept as it is.
@param partitionLeaders map of new leadership information for partitions.
@param leaderNodes a list of nodes for leaders in the above map.
@return a set of partitions, for which leaders were updated.
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 469
|
[
"cluster"
] |
void
| true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
containsSingleton
|
boolean containsSingleton(String beanName);
|
Check if this registry contains a singleton instance with the given name.
<p>Only checks already instantiated singletons; does not return {@code true}
for singleton bean definitions which have not been instantiated yet.
<p>The main purpose of this method is to check manually registered singletons
(see {@link #registerSingleton}). Can also be used to check whether a
singleton defined by a bean definition has already been created.
<p>To check whether a bean factory contains a bean definition with a given name,
use ListableBeanFactory's {@code containsBeanDefinition}. Calling both
{@code containsBeanDefinition} and {@code containsSingleton} answers
whether a specific bean factory contains a local bean instance with the given name.
<p>Use BeanFactory's {@code containsBean} for general checks whether the
factory knows about a bean with a given name (whether manually registered singleton
instance or created by bean definition), also checking ancestor factories.
<p><b>NOTE:</b> This lookup method is not aware of FactoryBean prefixes or aliases.
You need to resolve the canonical bean name first before checking the singleton status.
@param beanName the name of the bean to look for
@return if this bean factory contains a singleton instance with the given name
@see #registerSingleton
@see org.springframework.beans.factory.ListableBeanFactory#containsBeanDefinition
@see org.springframework.beans.factory.BeanFactory#containsBean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/SingletonBeanRegistry.java
| 110
|
[
"beanName"
] | true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
currentLeader
|
public synchronized LeaderAndEpoch currentLeader(TopicPartition topicPartition) {
Optional<MetadataResponse.PartitionMetadata> maybeMetadata = partitionMetadataIfCurrent(topicPartition);
if (maybeMetadata.isEmpty())
return new LeaderAndEpoch(Optional.empty(), Optional.ofNullable(lastSeenLeaderEpochs.get(topicPartition)));
MetadataResponse.PartitionMetadata partitionMetadata = maybeMetadata.get();
Optional<Integer> leaderEpochOpt = partitionMetadata.leaderEpoch;
Optional<Node> leaderNodeOpt = partitionMetadata.leaderId.flatMap(metadataSnapshot::nodeById);
return new LeaderAndEpoch(leaderNodeOpt, leaderEpochOpt);
}
|
@return a mapping from topic names to topic IDs for all topics with valid IDs in the cache
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 295
|
[
"topicPartition"
] |
LeaderAndEpoch
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
appendExportsOfVariableStatement
|
function appendExportsOfVariableStatement(statements: Statement[] | undefined, node: VariableStatement, exportSelf: boolean): Statement[] | undefined {
if (moduleInfo.exportEquals) {
return statements;
}
for (const decl of node.declarationList.declarations) {
if (decl.initializer || exportSelf) {
statements = appendExportsOfBindingElement(statements, decl, exportSelf);
}
}
return statements;
}
|
Appends the exports of a VariableStatement to a statement list, returning the statement
list.
@param statements A statement list to which the down-level export statements are to be
appended. If `statements` is `undefined`, a new array is allocated if statements are
appended.
@param node The VariableStatement whose exports are to be recorded.
@param exportSelf A value indicating whether to also export each VariableDeclaration of
`nodes` declaration list.
|
typescript
|
src/compiler/transformers/module/system.ts
| 1,074
|
[
"statements",
"node",
"exportSelf"
] | true
| 4
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
withUpperBounds
|
public WildcardTypeBuilder withUpperBounds(final Type... bounds) {
this.upperBounds = bounds;
return this;
}
|
Specify upper bounds of the wildcard type to build.
@param bounds to set.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 218
|
[] |
WildcardTypeBuilder
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
__call__
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
length_scale**2
)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
python
|
sklearn/gaussian_process/kernels.py
| 1,525
|
[
"self",
"X",
"Y",
"eval_gradient"
] | false
| 10
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
eye
|
def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
device : str, optional
The device on which to place the created array. Default: None.
For Array-API interoperability only, so must be ``"cpu"`` if passed.
.. versionadded:: 2.0.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> import numpy as np
>>> np.eye(2, dtype=np.int_)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
if like is not None:
return _eye_with_like(
like, N, M=M, k=k, dtype=dtype, order=order, device=device
)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order, device=device)
if k >= M:
return m
# Ensure M and k are integers, so we don't get any surprise casting
# results in the expressions `M-k` and `M+1` used below. This avoids
# a problem with inputs with type (for example) np.uint64.
M = operator.index(M)
k = operator.index(k)
if k >= 0:
i = k
else:
i = (-k) * M
m[:M - k].flat[i::M + 1] = 1
return m
|
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
device : str, optional
The device on which to place the created array. Default: None.
For Array-API interoperability only, so must be ``"cpu"`` if passed.
.. versionadded:: 2.0.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> import numpy as np
>>> np.eye(2, dtype=np.int_)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
|
python
|
numpy/lib/_twodim_base_impl.py
| 178
|
[
"N",
"M",
"k",
"dtype",
"order",
"device",
"like"
] | false
| 6
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
instantiate
|
public @Nullable T instantiate(@Nullable ClassLoader classLoader, String name) {
return instantiate(TypeSupplier.forName(classLoader, name));
}
|
Instantiate the given set of class name, injecting constructor arguments as
necessary.
@param classLoader the source classloader
@param name the class name to instantiate
@return an instantiated instance
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/Instantiator.java
| 149
|
[
"classLoader",
"name"
] |
T
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
nextIndex
|
int nextIndex(int index) {
return (index + 1 < size) ? index + 1 : -1;
}
|
Constructs a new instance of {@code ObjectCountHashMap} with the specified capacity.
@param capacity the initial capacity of this {@code ObjectCountHashMap}.
|
java
|
android/guava/src/com/google/common/collect/ObjectCountHashMap.java
| 177
|
[
"index"
] | true
| 2
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
nextInTable
|
boolean nextInTable() {
while (nextTableIndex >= 0) {
if ((nextEntry = currentTable.get(nextTableIndex--)) != null) {
if (advanceTo(nextEntry) || nextInChain()) {
return true;
}
}
}
return false;
}
|
Finds the next entry in the current table. Returns true if an entry was found.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 4,261
|
[] | true
| 5
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
rangeEndTimestamp
|
private long rangeEndTimestamp() {
return BASE_TIMESTAMP + (long) (batchSize * deltaTime * queryRange);
}
|
Calculates the upper bound for the timestamp range query based on {@code batchSize},
{@code deltaTime}, and {@code queryRange}.
@return the computed upper bound for the timestamp range query
|
java
|
benchmarks/src/main/java/org/elasticsearch/benchmark/search/query/range/DateFieldMapperDocValuesSkipperBenchmark.java
| 269
|
[] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
lastIndexOfIgnoreCase
|
@Deprecated
public static int lastIndexOfIgnoreCase(final CharSequence str, final CharSequence searchStr, final int startPos) {
return Strings.CI.lastIndexOf(str, searchStr, startPos);
}
|
Case in-sensitive find of the last index within a CharSequence from the specified position.
<p>
A {@code null} CharSequence will return {@code -1}. A negative start position returns {@code -1}. An empty ("") search CharSequence always matches unless
the start position is negative. A start position greater than the string length searches the whole string. The search starts at the startPos and works
backwards; matches starting after the start position are ignored.
</p>
<pre>
StringUtils.lastIndexOfIgnoreCase(null, *, *) = -1
StringUtils.lastIndexOfIgnoreCase(*, null, *) = -1
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "A", 8) = 7
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "B", 8) = 5
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "AB", 8) = 4
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "B", 9) = 5
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "B", -1) = -1
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "A", 0) = 0
StringUtils.lastIndexOfIgnoreCase("aabaabaa", "B", 0) = -1
</pre>
@param str the CharSequence to check, may be null.
@param searchStr the CharSequence to find, may be null.
@param startPos the start position.
@return the last index of the search CharSequence (always ≤ startPos), -1 if no match or {@code null} input.
@since 2.5
@since 3.0 Changed signature from lastIndexOfIgnoreCase(String, String, int) to lastIndexOfIgnoreCase(CharSequence, CharSequence, int)
@deprecated Use {@link Strings#lastIndexOf(CharSequence, CharSequence, int) Strings.CI.lastIndexOf(CharSequence, CharSequence, int)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 4,992
|
[
"str",
"searchStr",
"startPos"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
describeCluster
|
default DescribeClusterResult describeCluster() {
return describeCluster(new DescribeClusterOptions());
}
|
Get information about the nodes in the cluster, using the default options.
<p>
This is a convenience method for {@link #describeCluster(DescribeClusterOptions)} with default options.
See the overload for more details.
@return The DescribeClusterResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 342
|
[] |
DescribeClusterResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
getDefaultNativeImageArguments
|
protected List<String> getDefaultNativeImageArguments(String applicationClassName) {
List<String> args = new ArrayList<>();
args.add("-H:Class=" + applicationClassName);
args.add("--no-fallback");
return args;
}
|
Return the native image arguments to use.
<p>By default, the main class to use, as well as standard application flags
are added.
<p>If the returned list is empty, no {@code native-image.properties} is
contributed.
@param applicationClassName the fully qualified class name of the application
entry point
@return the native image options to contribute
|
java
|
spring-context/src/main/java/org/springframework/context/aot/ContextAotProcessor.java
| 136
|
[
"applicationClassName"
] | true
| 1
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toMillisInt
|
public static int toMillisInt(final Duration duration) {
Objects.requireNonNull(duration, "duration");
// intValue() does not do a narrowing conversion here
return LONG_TO_INT_RANGE.fit(Long.valueOf(duration.toMillis())).intValue();
}
|
Converts a Duration to milliseconds bound to an int (instead of a long).
<p>
Handy for low-level APIs that take millisecond timeouts in ints rather than longs.
</p>
<ul>
<li>If the duration milliseconds are greater than {@link Integer#MAX_VALUE}, then return
{@link Integer#MAX_VALUE}.</li>
<li>If the duration milliseconds are lesser than {@link Integer#MIN_VALUE}, then return
{@link Integer#MIN_VALUE}.</li>
</ul>
@param duration The duration to convert, not null.
@return int milliseconds.
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationUtils.java
| 252
|
[
"duration"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
stripAll
|
public static String[] stripAll(final String[] strs, final String stripChars) {
final int strsLen = ArrayUtils.getLength(strs);
if (strsLen == 0) {
return strs;
}
return ArrayUtils.setAll(new String[strsLen], i -> strip(strs[i], stripChars));
}
|
Strips any of a set of characters from the start and end of every String in an array.
<p>
Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<p>
A new array is returned each time, except for length zero. A {@code null} array will return {@code null}. An empty array will return itself. A
{@code null} array entry will be ignored. A {@code null} stripChars will strip whitespace as defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.stripAll(null, *) = null
StringUtils.stripAll([], *) = []
StringUtils.stripAll(["abc", " abc"], null) = ["abc", "abc"]
StringUtils.stripAll(["abc ", null], null) = ["abc", null]
StringUtils.stripAll(["abc ", null], "yz") = ["abc ", null]
StringUtils.stripAll(["yabcz", null], "yz") = ["abc", null]
</pre>
@param strs the array to remove characters from, may be null.
@param stripChars the characters to remove, null treated as whitespace.
@return the stripped Strings, {@code null} if null array input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,918
|
[
"strs",
"stripChars"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isExcluded
|
private boolean isExcluded(RegisteredBean registeredBean) {
if (isImplicitlyExcluded(registeredBean)) {
return true;
}
for (BeanRegistrationExcludeFilter excludeFilter : this.excludeFilters) {
if (excludeFilter.isExcludedFromAotProcessing(registeredBean)) {
logger.trace(LogMessage.format(
"Excluding registered bean '%s' from bean factory %s due to %s",
registeredBean.getBeanName(),
ObjectUtils.identityToString(registeredBean.getBeanFactory()),
excludeFilter.getClass().getName()));
return true;
}
}
return false;
}
|
Return a {@link BeanDefinitionMethodGenerator} for the given
{@link RegisteredBean} or {@code null} if the registered bean is excluded
by a {@link BeanRegistrationExcludeFilter}. The resulting
{@link BeanDefinitionMethodGenerator} will include all
{@link BeanRegistrationAotProcessor} provided contributions.
@param registeredBean the registered bean
@return a new {@link BeanDefinitionMethodGenerator} instance or {@code null}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanDefinitionMethodGeneratorFactory.java
| 116
|
[
"registeredBean"
] | true
| 3
| 7.12
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
setCacheOperationSources
|
public void setCacheOperationSources(CacheOperationSource... cacheOperationSources) {
Assert.notEmpty(cacheOperationSources, "At least 1 CacheOperationSource needs to be specified");
this.cacheOperationSource = (cacheOperationSources.length > 1 ?
new CompositeCacheOperationSource(cacheOperationSources) : cacheOperationSources[0]);
}
|
Set one or more cache operation sources which are used to find the cache
attributes. If more than one source is provided, they will be aggregated
using a {@link CompositeCacheOperationSource}.
@see #setCacheOperationSource
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 175
|
[] |
void
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get_rows
|
def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]:
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= infer_nrows:
break
self.buffer = iter(buffer_rows)
return detect_rows
|
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
|
python
|
pandas/io/parsers/python_parser.py
| 1,420
|
[
"self",
"infer_nrows",
"skiprows"
] |
list[str]
| true
| 5
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
throwIfInPreparedState
|
private void throwIfInPreparedState() {
if (transactionManager != null &&
transactionManager.isTransactional() &&
transactionManager.isPrepared()
) {
throw new IllegalStateException("Cannot perform operation while the transaction is in a prepared state. " +
"Only commitTransaction(), abortTransaction(), or completeTransaction() are permitted.");
}
}
|
Throws an exception if the transaction is in a prepared state.
In a two-phase commit (2PC) flow, once a transaction enters the prepared state,
only commit, abort, or complete operations are allowed.
@throws IllegalStateException if any other operation is attempted in the prepared state.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
| 1,080
|
[] |
void
| true
| 4
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
getPackageName
|
public static String getPackageName(String className) {
if (StringUtils.isEmpty(className)) {
return StringUtils.EMPTY;
}
int i = 0;
// Strip array encoding
while (className.charAt(i) == '[') {
i++;
}
className = className.substring(i);
// Strip Object type encoding
if (className.charAt(0) == 'L' && className.charAt(className.length() - 1) == ';') {
className = className.substring(1);
}
i = className.lastIndexOf(PACKAGE_SEPARATOR_CHAR);
if (i == -1) {
return StringUtils.EMPTY;
}
return className.substring(0, i);
}
|
Gets the package name from a {@link String}.
<p>
The string passed in is assumed to be a class name.
</p>
<p>
If the class is unpackaged, return an empty string.
</p>
@param className the className to get the package name for, may be {@code null}.
@return the package name or an empty string.
|
java
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
| 809
|
[
"className"
] |
String
| true
| 6
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
commitAsync
|
@Override
public synchronized void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) {
ensureNotClosed();
committed.putAll(offsets);
if (callback != null) {
callback.onComplete(offsets, null);
}
}
|
Sets the maximum number of records returned in a single call to {@link #poll(Duration)}.
@param maxPollRecords the max.poll.records.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java
| 352
|
[
"offsets",
"callback"
] |
void
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
constrainLng
|
private static double constrainLng(double lng) {
while (lng > Math.PI) {
lng = lng - Constants.M_2PI;
}
while (lng < -Math.PI) {
lng = lng + Constants.M_2PI;
}
return lng;
}
|
constrainLng makes sure longitudes are in the proper bounds
@param lng The origin lng value
@return The corrected lng value
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java
| 118
|
[
"lng"
] | true
| 3
| 7.28
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
visitImportDeclaration
|
function visitImportDeclaration(node: ImportDeclaration): VisitResult<Statement | undefined> {
let statements: Statement[] | undefined;
if (node.importClause) {
hoistVariableDeclaration(getLocalNameForExternalImport(factory, node, currentSourceFile)!); // TODO: GH#18217
}
return singleOrMany(appendExportsOfImportDeclaration(statements, node));
}
|
Visits an ImportDeclaration node.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/system.ts
| 753
|
[
"node"
] | true
| 2
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
readMoreChars
|
private void readMoreChars() throws IOException {
// Possibilities:
// 1) array has space available on right-hand side (between limit and capacity)
// 2) array has space available on left-hand side (before position)
// 3) array has no space available
//
// In case 2 we shift the existing chars to the left, and in case 3 we create a bigger
// array, then they both become case 1.
if (availableCapacity(charBuffer) == 0) {
if (charBuffer.position() > 0) {
// (2) There is room in the buffer. Move existing bytes to the beginning.
Java8Compatibility.flip(charBuffer.compact());
} else {
// (3) Entire buffer is full, need bigger buffer.
charBuffer = grow(charBuffer);
}
}
// (1) Read more characters into free space at end of array.
int limit = charBuffer.limit();
int numChars = reader.read(charBuffer.array(), limit, availableCapacity(charBuffer));
if (numChars == -1) {
endOfInput = true;
} else {
Java8Compatibility.limit(charBuffer, limit + numChars);
}
}
|
Handle the case of underflow caused by needing more input characters.
|
java
|
android/guava/src/com/google/common/io/ReaderInputStream.java
| 203
|
[] |
void
| true
| 4
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
check_and_track
|
def check_and_track(self, proxy_node: Proxy) -> bool:
"""
Check if a tensor can be added as a subgraph output without causing aliasing issues.
Given a proxy node, extracts its example tensor value and checks if its storage
aliases with any previously tracked storages (from inputs or other outputs).
If there's no aliasing conflict, the tensor's storage is added to the tracked set.
Args:
proxy_node: An FX proxy node whose example_value is the tensor to check.
Returns:
True if the tensor doesn't alias with tracked storages (safe to add as output),
False if it aliases (should be filtered out).
"""
from torch._higher_order_ops.utils import _collect_fake_inputs
from torch.multiprocessing.reductions import StorageWeakRef
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
example_value = _collect_fake_inputs([proxy_node])[0]
# Non-tensor outputs (e.g., symints) don't have aliasing concerns
if not isinstance(example_value, torch.Tensor):
return True
# Check if any storage aliases with existing inputs/outputs
tensor_storages = get_tensor_storages(example_value)
if tensor_storages & self.excluded_storages:
return False
# Track this tensor's storage (for wrapper subclasses, inner storages were already checked)
if not is_traceable_wrapper_subclass(example_value):
if not (example_value.is_sparse or example_value.is_sparse_csr):
self.excluded_storages.add(
StorageWeakRef(example_value._typed_storage())
)
return True
|
Check if a tensor can be added as a subgraph output without causing aliasing issues.
Given a proxy node, extracts its example tensor value and checks if its storage
aliases with any previously tracked storages (from inputs or other outputs).
If there's no aliasing conflict, the tensor's storage is added to the tracked set.
Args:
proxy_node: An FX proxy node whose example_value is the tensor to check.
Returns:
True if the tensor doesn't alias with tracked storages (safe to add as output),
False if it aliases (should be filtered out).
|
python
|
torch/_dynamo/variables/higher_order_ops.py
| 503
|
[
"self",
"proxy_node"
] |
bool
| true
| 6
| 8.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
get
|
@Override
public <T> @Nullable T get(Object key, Callable<T> valueLoader) {
try {
return valueLoader.call();
}
catch (Exception ex) {
throw new ValueRetrievalException(key, valueLoader, ex);
}
}
|
Create a {@link NoOpCache} instance with the specified name.
@param name the name of the cache
|
java
|
spring-context/src/main/java/org/springframework/cache/support/NoOpCache.java
| 73
|
[
"key",
"valueLoader"
] |
T
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_generate_kernel_inputs_key
|
def _generate_kernel_inputs_key(kernel_inputs: KernelInputs) -> str:
"""
Generate a key based on input node properties and scalars.
The key includes dtype, size, and stride information for each input node,
plus scalar values as key=value pairs separated by & signs.
"""
# Get node information using existing methods
dtypes = kernel_inputs.dtypes()
shapes = kernel_inputs.shapes_hinted()
strides = kernel_inputs.strides_hinted()
# Create tuple of (dtype, shape_list, stride_list) for each node
node_info = tuple(
(dtype, list(shape), list(stride))
for dtype, shape, stride in zip(dtypes, shapes, strides)
)
# Create base key from node information
fmt_key = str(node_info)
# Add scalar information if present
if kernel_inputs._scalars:
# Sort scalars for consistent key generation and join with &
scalar_parts = [
f"{key}={value}"
for key, value in sorted(kernel_inputs._scalars.items())
]
scalars_key = "&".join(scalar_parts)
fmt_key = f"{fmt_key}+{scalars_key}"
return f"{fmt_key}"
|
Generate a key based on input node properties and scalars.
The key includes dtype, size, and stride information for each input node,
plus scalar values as key=value pairs separated by & signs.
|
python
|
torch/_inductor/lookup_table/choices.py
| 59
|
[
"kernel_inputs"
] |
str
| true
| 2
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
handshakeFinished
|
private void handshakeFinished() throws IOException {
// SSLEngine.getHandshakeStatus is transient and it doesn't record FINISHED status properly.
// It can move from FINISHED status to NOT_HANDSHAKING after the handshake is completed.
// Hence we also need to check handshakeResult.getHandshakeStatus() if the handshake finished or not
if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) {
//we are complete if we have delivered the last packet
//remove OP_WRITE if we are complete, otherwise we still have data to write
if (netWriteBuffer.hasRemaining())
key.interestOps(key.interestOps() | SelectionKey.OP_WRITE);
else {
SSLSession session = sslEngine.getSession();
state = session.getProtocol().equals(TLS13) ? State.POST_HANDSHAKE : State.READY;
key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE);
log.debug("SSL handshake completed successfully with peerHost '{}' peerPort {} peerPrincipal '{}' protocol '{}' cipherSuite '{}'",
session.getPeerHost(), session.getPeerPort(), peerPrincipal(), session.getProtocol(), session.getCipherSuite());
metadataRegistry.registerCipherInformation(
new CipherInformation(session.getCipherSuite(), session.getProtocol()));
}
log.trace("SSLHandshake FINISHED channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {} ",
channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position());
} else {
throw new IOException("NOT_HANDSHAKING during handshake");
}
}
|
Checks if the handshake status is finished
Sets the interestOps for the selectionKey.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 453
|
[] |
void
| true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
close
|
function close(fd, callback = defaultCloseCallback) {
if (callback !== defaultCloseCallback)
callback = makeCallback(callback);
const req = new FSReqCallback();
req.oncomplete = callback;
binding.close(fd, req);
}
|
Closes the file descriptor.
@param {number} fd
@param {(err?: Error) => any} [callback]
@returns {void}
|
javascript
|
lib/fs.js
| 498
|
[
"fd"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
optDouble
|
public double optDouble(String name, double fallback) {
Object object = opt(name);
Double result = JSON.toDouble(object);
return result != null ? result : fallback;
}
|
Returns the value mapped by {@code name} if it exists and is a double or can be
coerced to a double. Returns {@code fallback} otherwise.
@param name the name of the property
@param fallback a fallback value
@return the value or {@code fallback}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 461
|
[
"name",
"fallback"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
size
|
@Override
public long size() {
return count;
}
|
Returns the number of samples represented in this histogram. If you want to know how many
centroids are being used, try centroids().size().
@return the number of samples that have been added.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLTreeDigest.java
| 206
|
[] | true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.