function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
getStateType
|
private Class<?> getStateType() {
S state = getState();
if (state instanceof Enum<?> enumState) {
return enumState.getDeclaringClass();
}
return state.getClass();
}
|
Return the changed availability state.
@return the availability state
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/availability/AvailabilityChangeEvent.java
| 61
|
[] | true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getDatabaseType
|
public static String getDatabaseType(final Path database) throws IOException {
final long fileSize = Files.size(database);
try (InputStream in = Files.newInputStream(database)) {
// read the last BUFFER_SIZE bytes (or the fileSize, whichever is smaller)
final long skip = fileSize > BUFFER_SIZE ? fileSize - BUFFER_SIZE : 0;
final long skipped = in.skip(skip);
if (skipped != skip) {
throw new IOException("failed to skip [" + skip + "] bytes while reading [" + database + "]");
}
final byte[] tail = new byte[BUFFER_SIZE];
int read = 0;
int actualBytesRead;
do {
actualBytesRead = in.read(tail, read, BUFFER_SIZE - read);
read += actualBytesRead;
} while (actualBytesRead > 0);
// find the database_type header
int metadataOffset = -1;
int markerOffset = 0;
for (int i = 0; i < tail.length; i++) {
byte b = tail[i];
if (b == DATABASE_TYPE_MARKER[markerOffset]) {
markerOffset++;
} else {
markerOffset = 0;
}
if (markerOffset == DATABASE_TYPE_MARKER.length) {
metadataOffset = i + 1;
break;
}
}
if (metadataOffset == -1) {
throw new IOException("database type marker not found");
}
// read the database type
final int offsetByte = fromBytes(tail[metadataOffset]);
final int type = offsetByte >>> 5;
if (type != 2) { // 2 is the type indicator in the mmdb format for a UTF-8 string
throw new IOException("type must be UTF-8 string");
}
int size = offsetByte & 0x1f;
if (size == 29) {
// then we need to read in yet another byte and add it onto this size
// this can actually occur in practice, a 29+ character type description isn't that hard to imagine
size = 29 + fromBytes(tail[metadataOffset + 1]);
metadataOffset += 1;
} else if (size >= 30) {
// we'd need to read two or three more bytes to get the size, but this means the type length is >=285
throw new IOException("database_type too long [size indicator == " + size + "]");
}
return new String(tail, metadataOffset + 1, size, StandardCharsets.UTF_8);
}
}
|
Read the database type from the database. We do this manually instead of relying on the built-in mechanism to avoid reading the
entire database into memory merely to read the type. This is especially important to maintain on master nodes where pipelines are
validated. If we read the entire database into memory, we could potentially run into low-memory constraints on such nodes where
loading this data would otherwise be wasteful if they are not also ingest nodes.
@return the database type
@throws IOException if an I/O exception occurs reading the database type
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/MMDBUtil.java
| 41
|
[
"database"
] |
String
| true
| 10
| 7.12
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
translateInputToOutputLocationList
|
static DebugLocationsVector
translateInputToOutputLocationList(const BinaryFunction &BF,
const DebugLocationsVector &InputLL) {
DebugLocationsVector OutputLL;
// If the function hasn't changed - there's nothing to update.
if (!BF.isEmitted())
return InputLL;
for (const DebugLocationEntry &Entry : InputLL) {
DebugAddressRangesVector OutRanges =
BF.translateInputToOutputRange({Entry.LowPC, Entry.HighPC});
if (!OutRanges.empty() && !OutputLL.empty()) {
if (OutRanges.front().LowPC == OutputLL.back().HighPC &&
Entry.Expr == OutputLL.back().Expr) {
OutputLL.back().HighPC =
std::max(OutputLL.back().HighPC, OutRanges.front().HighPC);
OutRanges.erase(OutRanges.begin());
}
}
llvm::transform(OutRanges, std::back_inserter(OutputLL),
[&Entry](const DebugAddressRange &R) {
return DebugLocationEntry{R.LowPC, R.HighPC, Entry.Expr};
});
}
// Sort and merge adjacent entries with identical locations.
llvm::stable_sort(
OutputLL, [](const DebugLocationEntry &A, const DebugLocationEntry &B) {
return A.LowPC < B.LowPC;
});
DebugLocationsVector MergedLL;
uint64_t PrevHighPC = 0;
const SmallVectorImpl<uint8_t> *PrevExpr = nullptr;
for (const DebugLocationEntry &Entry : OutputLL) {
if (Entry.LowPC <= PrevHighPC && *PrevExpr == Entry.Expr) {
MergedLL.back().HighPC = std::max(Entry.HighPC, MergedLL.back().HighPC);
} else {
const uint64_t Begin = std::max(Entry.LowPC, PrevHighPC);
const uint64_t End = std::max(Begin, Entry.HighPC);
MergedLL.emplace_back(DebugLocationEntry{Begin, End, Entry.Expr});
}
PrevHighPC = MergedLL.back().HighPC;
PrevExpr = &MergedLL.back().Expr;
}
return MergedLL;
}
|
Similar to translateInputToOutputRanges() but operates on location lists.
|
cpp
|
bolt/lib/Rewrite/DWARFRewriter.cpp
| 133
|
[] | true
| 9
| 6.08
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
size
|
long size() throws IOException;
|
Return the size of this block.
@return the block size
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/DataBlock.java
| 38
|
[] | true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isServletApplication
|
private static boolean isServletApplication() {
for (String servletIndicatorClass : SERVLET_INDICATOR_CLASSES) {
if (!ClassUtils.isPresent(servletIndicatorClass, null)) {
return false;
}
}
return true;
}
|
Deduce the {@link WebApplicationType} from the current classpath.
@return the deduced web application
@since 4.0.1
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/WebApplicationType.java
| 73
|
[] | true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
endNode
|
function endNode(): void {
if (parent.children) {
mergeChildren(parent.children, parent);
sortChildren(parent.children);
}
parent = parentsStack.pop()!;
trackedEs5Classes = trackedEs5ClassesStack.pop();
}
|
Call after calling `startNode` and adding children to it.
|
typescript
|
src/services/navigationBar.ts
| 288
|
[] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
collate
|
public static List<Configurations> collate(Collection<Configurations> configurations) {
LinkedList<Configurations> collated = new LinkedList<>();
for (Configurations configuration : sortConfigurations(configurations)) {
if (collated.isEmpty() || collated.getLast().getClass() != configuration.getClass()) {
collated.add(configuration);
}
else {
collated.set(collated.size() - 1, collated.getLast().merge(configuration));
}
}
return collated;
}
|
Collate the given configuration by sorting and merging them.
@param configurations the source configuration
@return the collated configurations
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/annotation/Configurations.java
| 166
|
[
"configurations"
] | true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
clearAssignmentAndLeaveGroup
|
private void clearAssignmentAndLeaveGroup() {
subscriptions.unsubscribe();
clearAssignment();
// Transition to ensure that a heartbeat request is sent out to effectively leave the
// group (even in the case where the member had no assignment to release or when the
// callback execution failed.)
transitionToSendingLeaveGroup(false);
}
|
Transition to {@link MemberState#PREPARE_LEAVING} to release the assignment. Once completed,
transition to {@link MemberState#LEAVING} to send the heartbeat request and leave the group.
This is expected to be invoked when the user calls the unsubscribe API or is closing the consumer.
@param runCallbacks {@code true} to insert the step to execute the {@link ConsumerRebalanceListener} callback,
{@code false} to skip
@return Future that will complete when the callback execution completes and the heartbeat
to leave the group has been sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 627
|
[] |
void
| true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
indexOf
|
public static int indexOf(final Object[] array, final Object objectToFind) {
return indexOf(array, objectToFind, 0);
}
|
Finds the index of the given object in the array.
<p>
This method returns {@link #INDEX_NOT_FOUND} ({@code -1}) for a {@code null} input array.
</p>
@param array the array to search for the object, may be {@code null}.
@param objectToFind the object to find, may be {@code null}.
@return the index of the object within the array, {@link #INDEX_NOT_FOUND} ({@code -1}) if not found or {@code null} array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 2,701
|
[
"array",
"objectToFind"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
initializeClusterIPC
|
function initializeClusterIPC() {
if (process.argv[1] && process.env.NODE_UNIQUE_ID) {
const cluster = require('cluster');
cluster._setupWorker();
// Make sure it's not accidentally inherited by child processes.
delete process.env.NODE_UNIQUE_ID;
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 595
|
[] | false
| 3
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
expireAfterAccess
|
@Deprecated // GoodTime
@CanIgnoreReturnValue
public CacheBuilder<K, V> expireAfterAccess(long duration, TimeUnit unit) {
checkState(
expireAfterAccessNanos == UNSET_INT,
"expireAfterAccess was already set to %s ns",
expireAfterAccessNanos);
checkArgument(duration >= 0, "duration cannot be negative: %s %s", duration, unit);
this.expireAfterAccessNanos = unit.toNanos(duration);
return this;
}
|
Specifies that each entry should be automatically removed from the cache once a fixed duration
has elapsed after the entry's creation, the most recent replacement of its value, or its last
access. Access time is reset by all cache read and write operations (including {@code
Cache.asMap().get(Object)} and {@code Cache.asMap().put(K, V)}), but not by {@code
containsKey(Object)}, nor by operations on the collection-views of {@link Cache#asMap}. So, for
example, iterating through {@code Cache.asMap().entrySet()} does not reset access time for the
entries you retrieve.
<p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long)
maximumSize}{@code (0)}, ignoring any otherwise-specified maximum size or weight. This can be
useful in testing, or to disable caching temporarily without a code change.
<p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or
write operations. Expired entries are cleaned up as part of the routine maintenance described
in the class javadoc.
<p>If you can represent the duration as a {@link Duration} (which should be preferred when
feasible), use {@link #expireAfterAccess(Duration)} instead.
@param duration the length of time after an entry is last accessed that it should be
automatically removed
@param unit the unit that {@code duration} is expressed in
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalArgumentException if {@code duration} is negative
@throws IllegalStateException if {@link #expireAfterAccess} was already set
@deprecated Use {@link #expireAfterAccess(Duration)} instead.
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 836
|
[
"duration",
"unit"
] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
newReference
|
public static <V extends @Nullable Object> AtomicReference<V> newReference(
@ParametricNullness V initialValue) {
return new AtomicReference<>(initialValue);
}
|
Creates an {@code AtomicReference} instance with the given initial value.
@param initialValue the initial value
@return a new {@code AtomicReference} with the given initial value
|
java
|
android/guava/src/com/google/common/util/concurrent/Atomics.java
| 47
|
[
"initialValue"
] | true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
state_from_response
|
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from response dictionary.
:param response: response from AWS API
:return: execution state of the cluster step
"""
return response["Step"]["Status"]["State"]
|
Get state from response dictionary.
:param response: response from AWS API
:return: execution state of the cluster step
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
| 620
|
[
"response"
] |
str
| true
| 1
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
applyAsLong
|
long applyAsLong(T t, U u) throws E;
|
Applies this function to the given arguments.
@param t the first function argument
@param u the second function argument
@return the function result
@throws E Thrown when the function fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableToLongBiFunction.java
| 58
|
[
"t",
"u"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
visitArrayLiteralExpression
|
function visitArrayLiteralExpression(node: ArrayLiteralExpression): Expression {
if (some(node.elements, isSpreadElement)) {
// We are here because we contain a SpreadElementExpression.
return transformAndSpreadElements(node.elements, /*isArgumentList*/ false, !!node.multiLine, /*hasTrailingComma*/ !!node.elements.hasTrailingComma);
}
return visitEachChild(node, visitor, context);
}
|
Visits an ArrayLiteralExpression that contains a spread element.
@param node An ArrayLiteralExpression node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 4,308
|
[
"node"
] | true
| 2
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
repackage
|
private void repackage(JarFile sourceJar, File destination, Libraries libraries,
@Nullable FileTime lastModifiedTime) throws IOException {
try (JarWriter writer = new JarWriter(destination, lastModifiedTime)) {
write(sourceJar, libraries, writer, lastModifiedTime != null);
}
if (lastModifiedTime != null) {
destination.setLastModified(lastModifiedTime.toMillis());
}
}
|
Repackage to the given destination so that it can be launched using '
{@literal java -jar}'.
@param destination the destination file (may be the same as the source)
@param libraries the libraries required to run the archive
@param lastModifiedTime an optional last modified time to apply to the archive and
its contents
@throws IOException if the file cannot be repackaged
@since 4.0.0
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Repackager.java
| 138
|
[
"sourceJar",
"destination",
"libraries",
"lastModifiedTime"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
duplicated
|
def duplicated(self, keep: DropKeep = "first") -> Series:
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(["llama", "cow", "llama", "beetle", "llama"])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep="first")
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep="last")
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
res = self._duplicated(keep=keep)
result = self._constructor(res, index=self.index, copy=False)
return result.__finalize__(self, method="duplicated")
|
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(["llama", "cow", "llama", "beetle", "llama"])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep="first")
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep="last")
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
|
python
|
pandas/core/series.py
| 2,339
|
[
"self",
"keep"
] |
Series
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
matrix_norm
|
def matrix_norm(x, /, *, keepdims=False, ord="fro"):
"""
Computes the matrix norm of a matrix (or a stack of matrices) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array having shape (..., M, N) and whose two innermost
dimensions form ``MxN`` matrices.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in
the result as dimensions with size one. Default: False.
ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional
The order of the norm. For details see the table under ``Notes``
in `numpy.linalg.norm`.
See Also
--------
numpy.linalg.norm : Generic norm function
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.matrix_norm(b)
7.745966692414834
>>> LA.matrix_norm(b, ord='fro')
7.745966692414834
>>> LA.matrix_norm(b, ord=np.inf)
9.0
>>> LA.matrix_norm(b, ord=-np.inf)
2.0
>>> LA.matrix_norm(b, ord=1)
7.0
>>> LA.matrix_norm(b, ord=-1)
6.0
>>> LA.matrix_norm(b, ord=2)
7.3484692283495345
>>> LA.matrix_norm(b, ord=-2)
1.8570331885190563e-016 # may vary
"""
x = asanyarray(x)
return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
|
Computes the matrix norm of a matrix (or a stack of matrices) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array having shape (..., M, N) and whose two innermost
dimensions form ``MxN`` matrices.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in
the result as dimensions with size one. Default: False.
ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional
The order of the norm. For details see the table under ``Notes``
in `numpy.linalg.norm`.
See Also
--------
numpy.linalg.norm : Generic norm function
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.matrix_norm(b)
7.745966692414834
>>> LA.matrix_norm(b, ord='fro')
7.745966692414834
>>> LA.matrix_norm(b, ord=np.inf)
9.0
>>> LA.matrix_norm(b, ord=-np.inf)
2.0
>>> LA.matrix_norm(b, ord=1)
7.0
>>> LA.matrix_norm(b, ord=-1)
6.0
>>> LA.matrix_norm(b, ord=2)
7.3484692283495345
>>> LA.matrix_norm(b, ord=-2)
1.8570331885190563e-016 # may vary
|
python
|
numpy/linalg/_linalg.py
| 3,440
|
[
"x",
"keepdims",
"ord"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
setBasicColor
|
function setBasicColor(styleCode: number): void {
// const theme = themeService.getColorTheme();
let colorType: 'foreground' | 'background' | undefined;
let colorIndex: number | undefined;
if (styleCode >= 30 && styleCode <= 37) {
colorIndex = styleCode - 30;
colorType = 'foreground';
} else if (styleCode >= 90 && styleCode <= 97) {
colorIndex = (styleCode - 90) + 8; // High-intensity (bright)
colorType = 'foreground';
} else if (styleCode >= 40 && styleCode <= 47) {
colorIndex = styleCode - 40;
colorType = 'background';
} else if (styleCode >= 100 && styleCode <= 107) {
colorIndex = (styleCode - 100) + 8; // High-intensity (bright)
colorType = 'background';
}
if (colorIndex !== undefined && colorType) {
changeColor(colorType, ansiColorIdentifiers[colorIndex]?.colorValue);
}
}
|
Calculate and set styling for basic bright and dark ANSI color codes. Uses
theme colors if available. Automatically distinguishes between foreground
and background colors; does not support color-clearing codes 39 and 49.
@param styleCode Integer color code on one of the following ranges:
[30-37, 90-97, 40-47, 100-107]. If not on one of these ranges, will do
nothing.
|
typescript
|
extensions/notebook-renderers/src/ansi.ts
| 356
|
[
"styleCode"
] | true
| 14
| 6.88
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return "(principal=" + (principal == null ? "<any>" : principal) +
", host=" + (host == null ? "<any>" : host) +
", operation=" + operation +
", permissionType=" + permissionType + ")";
}
|
Returns a string describing an ANY or UNKNOWN field, or null if there is
no such field.
|
java
|
clients/src/main/java/org/apache/kafka/common/acl/AccessControlEntryData.java
| 75
|
[] |
String
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
String toString(ToStringFormat format, boolean upperCase) {
String string = this.string[format.ordinal()];
if (string == null) {
string = buildToString(format);
this.string[format.ordinal()] = string;
}
return (!upperCase) ? string : string.toUpperCase(Locale.ENGLISH);
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 555
|
[
"format",
"upperCase"
] |
String
| true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
needsRefresh
|
@Contract("null, _ -> true")
public static boolean needsRefresh(@Nullable InjectionMetadata metadata, Class<?> clazz) {
return (metadata == null || metadata.needsRefresh(clazz));
}
|
Check whether the given injection metadata needs to be refreshed.
@param metadata the existing metadata instance
@param clazz the current target class
@return {@code true} indicating a refresh, {@code false} otherwise
@see #needsRefresh(Class)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/InjectionMetadata.java
| 186
|
[
"metadata",
"clazz"
] | true
| 2
| 7.2
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
cdf
|
public abstract double cdf(double x);
|
Returns the fraction of all points added which are ≤ x. Points
that are exactly equal get half credit (i.e. we use the mid-point
rule)
@param x The cutoff for the cdf.
@return The fraction of all data which is less or equal to x.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java
| 144
|
[
"x"
] | true
| 1
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getLinesBetweenNodes
|
function getLinesBetweenNodes(parent: Node, node1: Node, node2: Node): number {
if (getEmitFlags(parent) & EmitFlags.NoIndentation) {
return 0;
}
parent = skipSynthesizedParentheses(parent);
node1 = skipSynthesizedParentheses(node1);
node2 = skipSynthesizedParentheses(node2);
// Always use a newline for synthesized code if the synthesizer desires it.
if (getStartsOnNewLine(node2)) {
return 1;
}
if (currentSourceFile && !nodeIsSynthesized(parent) && !nodeIsSynthesized(node1) && !nodeIsSynthesized(node2)) {
if (preserveSourceNewlines) {
return getEffectiveLines(
includeComments =>
getLinesBetweenRangeEndAndRangeStart(
node1,
node2,
currentSourceFile!,
includeComments,
),
);
}
return rangeEndIsOnSameLineAsRangeStart(node1, node2, currentSourceFile) ? 0 : 1;
}
return 0;
}
|
Emits a list without brackets or raising events.
NOTE: You probably don't want to call this directly and should be using `emitList` or `emitExpressionList` instead.
|
typescript
|
src/compiler/emitter.ts
| 5,179
|
[
"parent",
"node1",
"node2"
] | true
| 9
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
shutDownSharedReactorSchedulers
|
protected void shutDownSharedReactorSchedulers(ServletContext servletContext) {
if (Schedulers.class.getClassLoader() == servletContext.getClassLoader()) {
Schedulers.shutdownNow();
}
}
|
Shuts down the reactor {@link Schedulers} that were initialized by
{@code Schedulers.boundedElastic()} (or similar). The default implementation
{@link Schedulers#shutdownNow()} schedulers if they were initialized on this web
application's class loader.
@param servletContext the web application's servlet context
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/support/SpringBootServletInitializer.java
| 149
|
[
"servletContext"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
filterTo
|
public FilterResult filterTo(RecordFilter filter, ByteBuffer destinationBuffer, BufferSupplier decompressionBufferSupplier) {
return filterTo(batches(), filter, destinationBuffer, decompressionBufferSupplier);
}
|
Filter the records into the provided ByteBuffer.
@param filter The filter function
@param destinationBuffer The byte buffer to write the filtered records to
@param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. For small
record batches, allocating a potentially large buffer (64 KB for LZ4) will
dominate the cost of decompressing and iterating over the records in the
batch. As such, a supplier that reuses buffers will have a significant
performance impact.
@return A FilterResult with a summary of the output (for metrics) and potentially an overflow buffer
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java
| 138
|
[
"filter",
"destinationBuffer",
"decompressionBufferSupplier"
] |
FilterResult
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
cutedsl
|
def cutedsl(self, kernel_name: str, source_code: str):
"""
Compile CuteDSL (CUTLASS Python DSL) kernels.
Args:
kernel_name: Name of the kernel to be defined
source_code: Source code of the CuteDSL kernel, as a string
Note:
CuteDSL currently requires source files to do its compilation, there we
use the PyCodeCache to write the source code to a file and load it.
"""
from torch._inductor.codegen.cutedsl.cutedsl_kernel import (
CuteDSLKernelWrapper,
MAIN_SUFFIX,
)
kernel_code_log.info("CuteDSL Kernel:\n%s", source_code)
def task():
key, path = torch._inductor.codecache.PyCodeCache.write(source_code)
mod = torch._inductor.codecache.PyCodeCache.load_by_key_path(key, path)
# Find our special entry point named function
main_func_name = f"{kernel_name}_{MAIN_SUFFIX}"
if not hasattr(mod, main_func_name):
available = [name for name in dir(mod) if callable(getattr(mod, name))]
raise RuntimeError(
f"Could not find CuteDSL main kernel function '{main_func_name}'. Available callables: {available}"
)
return CuteDSLKernelWrapper(getattr(mod, main_func_name), kernel_path=path)
if get_compile_threads() <= 1:
return task()
else:
future = self.submit(task)
return LambdaFuture(lambda: future.result())
|
Compile CuteDSL (CUTLASS Python DSL) kernels.
Args:
kernel_name: Name of the kernel to be defined
source_code: Source code of the CuteDSL kernel, as a string
Note:
CuteDSL currently requires source files to do its compilation, there we
use the PyCodeCache to write the source code to a file and load it.
|
python
|
torch/_inductor/async_compile.py
| 565
|
[
"self",
"kernel_name",
"source_code"
] | true
| 4
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
getOutputChannel
|
function getOutputChannel(): vscode.OutputChannel {
if (!_channel) {
_channel = vscode.window.createOutputChannel('Gulp Auto Detection');
}
return _channel;
}
|
Check if the given filename is a file.
If returns false in case the file does not exist or
the file stats cannot be accessed/queried or it
is no file at all.
@param filename
the filename to the checked
@returns
true in case the file exists, in any other case false.
|
typescript
|
extensions/gulp/src/main.ts
| 73
|
[] | true
| 2
| 7.92
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
where
|
def where(cond, left_op, right_op, use_numexpr: bool = True):
"""
Evaluate the where condition cond on left_op and right_op.
Parameters
----------
cond : np.ndarray[bool]
left_op : return if cond is True
right_op : return if cond is False
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
assert _where is not None
if use_numexpr:
return _where(cond, left_op, right_op)
else:
return _where_standard(cond, left_op, right_op)
|
Evaluate the where condition cond on left_op and right_op.
Parameters
----------
cond : np.ndarray[bool]
left_op : return if cond is True
right_op : return if cond is False
use_numexpr : bool, default True
Whether to try to use numexpr.
|
python
|
pandas/core/computation/expressions.py
| 247
|
[
"cond",
"left_op",
"right_op",
"use_numexpr"
] | true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
pollOnClose
|
@Override
public NetworkClientDelegate.PollResult pollOnClose(long currentTimeMs) {
if (membershipManager.isLeavingGroup()) {
NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequestAndLogResponse(currentTimeMs);
return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), List.of(request));
}
return EMPTY;
}
|
Generate a heartbeat request to leave the group if the state is still LEAVING when this is
called to close the consumer.
<p/>
Note that when closing the consumer, even though an event to Unsubscribe is generated
(triggers callbacks and sends leave group), it could be the case that the Unsubscribe event
processing does not complete in time and moves on to close the managers (ex. calls to
close with zero timeout). So we could end up on this pollOnClose with the member in
{@link MemberState#PREPARE_LEAVING} (ex. app thread did not have the time to process the
event to execute callbacks), or {@link MemberState#LEAVING} (ex. the leave request could
not be sent due to coordinator not available at that time). In all cases, the pollOnClose
will be triggered right before sending the final requests, so we ensure that we generate
the request to leave if needed.
@param currentTimeMs The current system time in milliseconds at which the method was called
@return PollResult containing the request to send
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java
| 411
|
[
"currentTimeMs"
] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isAnyNodeConnecting
|
private boolean isAnyNodeConnecting() {
for (Node node : metadataUpdater.fetchNodes()) {
if (connectionStates.isConnecting(node.idString())) {
return true;
}
}
return false;
}
|
Return true if there's at least one connection establishment is currently underway
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 1,158
|
[] | true
| 2
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
configure_s3_resources
|
def configure_s3_resources(self, config: dict) -> None:
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
"""
s3_operations = config.pop("S3Operations", None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get("S3CreateBucket", [])
upload_ops = s3_operations.get("S3Upload", [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op["Bucket"])
for op in upload_ops:
if op["Tar"]:
self.tar_and_s3_upload(op["Path"], op["Key"], op["Bucket"])
else:
self.s3_hook.load_file(op["Path"], op["Key"], op["Bucket"])
|
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 190
|
[
"self",
"config"
] |
None
| true
| 6
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
generateConnectionId
|
public static String generateConnectionId(Socket socket, int processorId, int connectionIndex) {
String localHost = socket.getLocalAddress().getHostAddress();
int localPort = socket.getLocalPort();
String remoteHost = socket.getInetAddress().getHostAddress();
int remotePort = socket.getPort();
return localHost + ":" + localPort + "-" + remoteHost + ":" + remotePort + "-" + processorId + "-" + connectionIndex;
}
|
Generates a unique connection ID for the given socket.
@param socket The socket for which the connection ID is to be generated.
@param processorId The ID of the server processor that will handle this connection.
@param connectionIndex The index to be used in the connection ID to ensure uniqueness.
@return A string representing the unique connection ID.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/ServerConnectionId.java
| 122
|
[
"socket",
"processorId",
"connectionIndex"
] |
String
| true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
deprecate_option
|
def deprecate_option(
key: str,
category: type[Warning],
msg: str | None = None,
rkey: str | None = None,
removal_ver: str | None = None,
) -> None:
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key : str
Name of the option to be deprecated.
must be a fully-qualified option name (e.g "x.y.z.rkey").
category : Warning
Warning class for the deprecation.
msg : str, optional
Warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey : str, optional
Name of an option to reroute access to.
If specified, any referenced `key` will be
re-routed to `rkey` including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver : str, optional
Specifies the version in which this option will
be removed. used by the default message if no `msg` is specified.
Raises
------
OptionError
If the specified key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError(f"Option '{key}' has already been defined as deprecated.")
_deprecated_options[key] = DeprecatedOption(key, category, msg, rkey, removal_ver)
|
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key : str
Name of the option to be deprecated.
must be a fully-qualified option name (e.g "x.y.z.rkey").
category : Warning
Warning class for the deprecation.
msg : str, optional
Warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey : str, optional
Name of an option to reroute access to.
If specified, any referenced `key` will be
re-routed to `rkey` including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver : str, optional
Specifies the version in which this option will
be removed. used by the default message if no `msg` is specified.
Raises
------
OptionError
If the specified key has already been deprecated.
|
python
|
pandas/_config/config.py
| 596
|
[
"key",
"category",
"msg",
"rkey",
"removal_ver"
] |
None
| true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
failure_message_from_response
|
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
fail_details = response["Step"]["Status"].get("FailureDetails")
if fail_details:
return (
f"for reason {fail_details.get('Reason')} "
f"with message {fail_details.get('Message')} and log file {fail_details.get('LogFile')}"
)
return None
|
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
| 630
|
[
"response"
] |
str | None
| true
| 2
| 7.76
|
apache/airflow
| 43,597
|
sphinx
| false
|
stream
|
public static <O> FailableStream<O> stream(final Collection<O> collection) {
return new FailableStream<>(collection.stream());
}
|
Converts the given collection into a {@link FailableStream}. The {@link FailableStream} consists of the
collections elements. Shortcut for
<pre>
Functions.stream(collection.stream());</pre>
@param collection The collection, which is being converted into a {@link FailableStream}.
@param <O> The collections element type. (In turn, the result streams element type.)
@return The created {@link FailableStream}.
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 558
|
[
"collection"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
applyAsBoolean
|
boolean applyAsBoolean(T t) throws E;
|
Applies this function to the given arguments.
@param t the first function argument
@return the function result
@throws E Thrown when the function fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableToBooleanFunction.java
| 53
|
[
"t"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getCurrentPatchForPatchVersions
|
async function getCurrentPatchForPatchVersions(patchMajorMinor: { major: number; minor: number }): Promise<number> {
// TODO: could we add the name of the branch, as well as the relevant versions => faster
// $ npm view '@prisma/client@3.0.x' version --json
// [
// "3.0.1",
// "3.0.2"
// ]
// We retry a few times if it fails
// npm can have some hiccups
const remoteVersionsString = await pRetry(
async () => {
return await runResult('.', 'npm view @prisma/client@* version --json')
},
{
retries: 6,
onFailedAttempt: (e) => {
console.error(e)
},
},
)
let versions = JSON.parse(remoteVersionsString)
// inconsistent npm api
if (!Array.isArray(versions)) {
versions = [versions]
}
const relevantVersions: Array<{
major: number
minor: number
patch: number
}> = versions
.map((v) => {
const match = semverRegex.exec(v)
if (match?.groups) {
return {
major: Number(match.groups.major),
minor: Number(match.groups.minor),
patch: Number(match.groups.patch),
}
}
return null
})
.filter((group) => group && group.minor === patchMajorMinor.minor && group.major === patchMajorMinor.major)
if (relevantVersions.length === 0) {
return 0
}
// sort descending by patch
relevantVersions.sort((a, b) => {
return a.patch < b.patch ? 1 : -1
})
return relevantVersions[0].patch
}
|
Takes the max dev version + 1
For now supporting X.Y.Z-dev.#
@param packages Local package definitions
|
typescript
|
scripts/ci/publish.ts
| 249
|
[
"patchMajorMinor"
] | true
| 7
| 6.8
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
addOrSuppress
|
private static Exception addOrSuppress(Exception firstException, Exception e) {
if (firstException == null) {
firstException = e;
} else {
firstException.addSuppressed(e);
}
return firstException;
}
|
Closes all given {@link Closeable}s. Some of the {@linkplain Closeable}s may be null; they are
ignored. After everything is closed, the method either throws the first exception it hit
while closing with other exceptions added as suppressed, or completes normally if there were
no exceptions.
@param objects objects to close
|
java
|
libs/core/src/main/java/org/elasticsearch/core/IOUtils.java
| 130
|
[
"firstException",
"e"
] |
Exception
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
join
|
public static String join(String separator, boolean... array) {
checkNotNull(separator);
if (array.length == 0) {
return "";
}
// For pre-sizing a builder, just get the right order of magnitude
StringBuilder builder = new StringBuilder(array.length * 7);
builder.append(array[0]);
for (int i = 1; i < array.length; i++) {
builder.append(separator).append(array[i]);
}
return builder.toString();
}
|
Returns a string containing the supplied {@code boolean} values separated by {@code separator}.
For example, {@code join("-", false, true, false)} returns the string {@code
"false-true-false"}.
@param separator the text that should appear between consecutive values in the resulting string
(but not at the start or end)
@param array an array of {@code boolean} values, possibly empty
|
java
|
android/guava/src/com/google/common/primitives/Booleans.java
| 286
|
[
"separator"
] |
String
| true
| 3
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
initialize
|
private static <C extends ConfigurableApplicationContext> void initialize(
C applicationContext, String... initializerClassNames) {
Log logger = LogFactory.getLog(AotApplicationContextInitializer.class);
ClassLoader classLoader = applicationContext.getClassLoader();
logger.debug("Initializing ApplicationContext with AOT");
for (String initializerClassName : initializerClassNames) {
logger.trace(LogMessage.format("Applying %s", initializerClassName));
instantiateInitializer(initializerClassName, classLoader).initialize(applicationContext);
}
}
|
Factory method to create a new {@link AotApplicationContextInitializer}
instance that delegates to other initializers loaded from the given set
of class names.
@param <C> the application context type
@param initializerClassNames the class names of the initializers to load
@return a new {@link AotApplicationContextInitializer} instance
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AotApplicationContextInitializer.java
| 64
|
[
"applicationContext"
] |
void
| true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
baseOffset
|
@Override
public long baseOffset() {
return buffer.getLong(BASE_OFFSET_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 179
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
update_crawler
|
def update_crawler(self, **crawler_kwargs) -> bool:
"""
Update crawler configurations.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.update_crawler`
:param crawler_kwargs: Keyword args that define the configurations used for the crawler
:return: True if crawler was updated and false otherwise
"""
crawler_name = crawler_kwargs["Name"]
current_crawler = self.get_crawler(crawler_name)
tags_updated = (
self.update_tags(crawler_name, crawler_kwargs.pop("Tags")) if "Tags" in crawler_kwargs else False
)
update_config = {
key: value
for key, value in crawler_kwargs.items()
if current_crawler.get(key, None) != crawler_kwargs.get(key)
}
if update_config:
self.log.info("Updating crawler: %s", crawler_name)
self.glue_client.update_crawler(**crawler_kwargs)
self.log.info("Updated configurations: %s", update_config)
return True
return tags_updated
|
Update crawler configurations.
.. seealso::
- :external+boto3:py:meth:`Glue.Client.update_crawler`
:param crawler_kwargs: Keyword args that define the configurations used for the crawler
:return: True if crawler was updated and false otherwise
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_crawler.py
| 77
|
[
"self"
] |
bool
| true
| 3
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
entryIterator
|
@Override
Iterator<Entry<K, V>> entryIterator() {
return new Itr<Entry<K, V>>() {
@Override
Entry<K, V> output(@ParametricNullness K key, @ParametricNullness V value) {
return immutableEntry(key, value);
}
};
}
|
Returns an iterator across all key-value map entries, used by {@code entries().iterator()} and
{@code values().iterator()}. The default behavior, which traverses the values for one key, the
values for a second key, and so on, suffices for most {@code AbstractMapBasedMultimap}
implementations.
@return an iterator across map entries
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 1,264
|
[] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
checkPath
|
private boolean checkPath(String path, String[] paths) {
if (paths.length == 0) {
return false;
}
int endx = Arrays.binarySearch(forbiddenPaths, path, comparison.pathComparator());
if (endx < -1 && comparison.isParent(forbiddenPaths[-endx - 2], path) || endx >= 0) {
return false;
}
int ndx = Arrays.binarySearch(paths, path, comparison.pathComparator());
if (ndx < -1) {
return comparison.isParent(paths[-ndx - 2], path);
}
return ndx >= 0;
}
|
@return the "canonical" form of the given {@code path}, to be used for entitlement checks.
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileAccessTree.java
| 376
|
[
"path",
"paths"
] | true
| 6
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
northPolarH3Address
|
public static String northPolarH3Address(int res) {
return h3ToString(northPolarH3(res));
}
|
Find the h3 address containing the North Pole at the given resolution.
@param res the provided resolution.
@return the h3 address containing the North Pole.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 550
|
[
"res"
] |
String
| true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
inplace_column_scale
|
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features. It should be
of CSC or CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
"""
if sp.issparse(X) and X.format == "csc":
inplace_csr_row_scale(X.T, scale)
elif sp.issparse(X) and X.format == "csr":
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
|
Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
Matrix to normalize using the variance of the features. It should be
of CSC or CSR format.
scale : ndarray of shape (n_features,), dtype={np.float32, np.float64}
Array of precomputed feature-wise values to use for scaling.
Examples
--------
>>> from sklearn.utils import sparsefuncs
>>> from scipy import sparse
>>> import numpy as np
>>> indptr = np.array([0, 3, 4, 4, 4])
>>> indices = np.array([0, 1, 2, 2])
>>> data = np.array([8, 1, 2, 5])
>>> scale = np.array([2, 3, 2])
>>> csr = sparse.csr_matrix((data, indices, indptr))
>>> csr.todense()
matrix([[8, 1, 2],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0]])
>>> sparsefuncs.inplace_column_scale(csr, scale)
>>> csr.todense()
matrix([[16, 3, 4],
[ 0, 0, 10],
[ 0, 0, 0],
[ 0, 0, 0]])
|
python
|
sklearn/utils/sparsefuncs.py
| 294
|
[
"X",
"scale"
] | false
| 6
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
verify_config
|
def verify_config() -> None:
"""
Verify main mkdocs.yml content to make sure it uses the latest language names.
"""
typer.echo("Verifying mkdocs.yml")
config = get_en_config()
updated_config = get_updated_config_content()
if config != updated_config:
typer.secho(
"docs/en/mkdocs.yml outdated from docs/language_names.yml, "
"update language_names.yml and run "
"python ./scripts/docs.py update-languages",
color=typer.colors.RED,
)
raise typer.Abort()
typer.echo("Valid mkdocs.yml ✅")
|
Verify main mkdocs.yml content to make sure it uses the latest language names.
|
python
|
scripts/docs.py
| 379
|
[] |
None
| true
| 2
| 7.04
|
tiangolo/fastapi
| 93,264
|
unknown
| false
|
toInteger
|
public Integer toInteger() {
return Integer.valueOf(intValue());
}
|
Gets this mutable as an instance of Integer.
@return an Integer instance containing the value from this mutable, never null.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableInt.java
| 363
|
[] |
Integer
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getCustomEditor
|
private @Nullable PropertyEditor getCustomEditor(String propertyName, @Nullable Class<?> requiredType) {
CustomEditorHolder holder =
(this.customEditorsForPath != null ? this.customEditorsForPath.get(propertyName) : null);
return (holder != null ? holder.getPropertyEditor(requiredType) : null);
}
|
Get custom editor that has been registered for the given property.
@param propertyName the property path to look for
@param requiredType the type to look for
@return the custom editor, or {@code null} if none specific for this property
|
java
|
spring-beans/src/main/java/org/springframework/beans/PropertyEditorRegistrySupport.java
| 394
|
[
"propertyName",
"requiredType"
] |
PropertyEditor
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
validValues
|
List<Object> validValues(String name, Map<String, Object> parsedConfig);
|
The valid values for the configuration given the current configuration values.
@param name The name of the configuration
@param parsedConfig The parsed configuration values
@return The list of valid values. To function properly, the returned objects should have the type
defined for the configuration using the recommender.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 938
|
[
"name",
"parsedConfig"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
transformClassBody
|
function transformClassBody(node: ClassExpression | ClassDeclaration, extendsClauseElement: ExpressionWithTypeArguments | undefined): Block {
const statements: Statement[] = [];
const name = factory.getInternalName(node);
const constructorLikeName = isIdentifierANonContextualKeyword(name) ? factory.getGeneratedNameForNode(name) : name;
startLexicalEnvironment();
addExtendsHelperIfNeeded(statements, node, extendsClauseElement);
addConstructor(statements, node, constructorLikeName, extendsClauseElement);
addClassMembers(statements, node);
// Create a synthetic text range for the return statement.
const closingBraceLocation = createTokenRange(skipTrivia(currentText, node.members.end), SyntaxKind.CloseBraceToken);
// The following partially-emitted expression exists purely to align our sourcemap
// emit with the original emitter.
const outer = factory.createPartiallyEmittedExpression(constructorLikeName);
setTextRangeEnd(outer, closingBraceLocation.end);
setEmitFlags(outer, EmitFlags.NoComments);
const statement = factory.createReturnStatement(outer);
setTextRangePos(statement, closingBraceLocation.pos);
setEmitFlags(statement, EmitFlags.NoComments | EmitFlags.NoTokenSourceMaps);
statements.push(statement);
insertStatementsAfterStandardPrologue(statements, endLexicalEnvironment());
const block = factory.createBlock(setTextRange(factory.createNodeArray(statements), /*location*/ node.members), /*multiLine*/ true);
setEmitFlags(block, EmitFlags.NoComments);
return block;
}
|
Transforms a ClassExpression or ClassDeclaration into a function body.
@param node A ClassExpression or ClassDeclaration node.
@param extendsClauseElement The expression for the class `extends` clause.
|
typescript
|
src/compiler/transformers/es2015.ts
| 1,090
|
[
"node",
"extendsClauseElement"
] | true
| 2
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
name
|
public MetricName name() {
return this.name;
}
|
Get the name of this metric.
@return the metric name; never null
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequency.java
| 46
|
[] |
MetricName
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
transform
|
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
An M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
|
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
An M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
|
python
|
sklearn/cluster/_feature_agglomeration.py
| 24
|
[
"self",
"X"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
captureRanges
|
public Map<String, Object> captureRanges(String text) {
return innerCaptures(text, cfg -> cfg::rangeExtracter);
}
|
Matches and returns the ranges of any named captures.
@param text the text to match and extract values from.
@return a map containing field names and their respective ranges that matched or null if the pattern didn't match
|
java
|
libs/grok/src/main/java/org/elasticsearch/grok/Grok.java
| 208
|
[
"text"
] | true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
get_exception_info
|
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = self.source[upto:start]
during = self.source[start:end]
after = self.source[end:next]
source_lines.append((num, self.source[upto:next]))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# string.
try:
message = str(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = "(Could not get exception message)"
return {
"message": message,
"source_lines": source_lines[top:bottom],
"before": before,
"during": during,
"after": after,
"top": top,
"bottom": bottom,
"total": total,
"line": line,
"name": self.origin.name,
"start": start,
"end": end,
}
|
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
|
python
|
django/template/base.py
| 215
|
[
"self",
"exception",
"token"
] | false
| 4
| 6.16
|
django/django
| 86,204
|
unknown
| false
|
|
configure
|
public <T extends SimpleAsyncTaskScheduler> T configure(T taskScheduler) {
PropertyMapper map = PropertyMapper.get();
map.from(this.threadNamePrefix).to(taskScheduler::setThreadNamePrefix);
map.from(this.concurrencyLimit).to(taskScheduler::setConcurrencyLimit);
map.from(this.virtualThreads).to(taskScheduler::setVirtualThreads);
map.from(this.taskTerminationTimeout).as(Duration::toMillis).to(taskScheduler::setTaskTerminationTimeout);
map.from(this.taskDecorator).to(taskScheduler::setTaskDecorator);
if (!CollectionUtils.isEmpty(this.customizers)) {
this.customizers.forEach((customizer) -> customizer.customize(taskScheduler));
}
return taskScheduler;
}
|
Configure the provided {@link SimpleAsyncTaskScheduler} instance using this
builder.
@param <T> the type of task scheduler
@param taskScheduler the {@link SimpleAsyncTaskScheduler} to configure
@return the task scheduler instance
@see #build()
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskSchedulerBuilder.java
| 203
|
[
"taskScheduler"
] |
T
| true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
result
|
function result(object, path, defaultValue) {
path = castPath(path, object);
var index = -1,
length = path.length;
// Ensure the loop is entered when path is empty.
if (!length) {
length = 1;
object = undefined;
}
while (++index < length) {
var value = object == null ? undefined : object[toKey(path[index])];
if (value === undefined) {
index = length;
value = defaultValue;
}
object = isFunction(value) ? value.call(object) : value;
}
return object;
}
|
This method is like `_.get` except that if the resolved value is a
function it's invoked with the `this` binding of its parent object and
its result is returned.
@static
@since 0.1.0
@memberOf _
@category Object
@param {Object} object The object to query.
@param {Array|string} path The path of the property to resolve.
@param {*} [defaultValue] The value returned for `undefined` resolved values.
@returns {*} Returns the resolved value.
@example
var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] };
_.result(object, 'a[0].b.c1');
// => 3
_.result(object, 'a[0].b.c2');
// => 4
_.result(object, 'a[0].b.c3', 'default');
// => 'default'
_.result(object, 'a[0].b.c3', _.constant('default'));
// => 'default'
|
javascript
|
lodash.js
| 13,730
|
[
"object",
"path",
"defaultValue"
] | false
| 6
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getLog
|
@Override
public Log getLog(Supplier<Log> destination) {
synchronized (this.lines) {
DeferredLog logger = new DeferredLog(destination, this.lines);
this.loggers.add(logger);
return logger;
}
}
|
Create a new {@link DeferredLog} for the given destination.
@param destination the ultimate log destination
@return a deferred log instance that will switch to the destination when
appropriate.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLogs.java
| 70
|
[
"destination"
] |
Log
| true
| 1
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
firstNonNull
|
public static <T> T firstNonNull(@Nullable T first, @Nullable T second) {
if (first != null) {
return first;
}
if (second != null) {
return second;
}
throw new NullPointerException("Both parameters are null");
}
|
Returns the first of two given parameters that is not {@code null}, if either is, or otherwise
throws a {@link NullPointerException}.
<p>To find the first non-null element in an iterable, use {@code Iterables.find(iterable,
Predicates.notNull())}. For varargs, use {@code Iterables.find(Arrays.asList(a, b, c, ...),
Predicates.notNull())}, static importing as necessary.
<p><b>Note:</b> if {@code first} is represented as an {@link Optional}, this can be
accomplished with {@link Optional#or(Object) first.or(second)}. That approach also allows for
lazy evaluation of the fallback instance, using {@link Optional#or(Supplier)
first.or(supplier)}.
<p><b>Java 9 users:</b> use {@code java.util.Objects.requireNonNullElse(first, second)}
instead.
@return {@code first} if it is non-null; otherwise {@code second} if it is non-null
@throws NullPointerException if both {@code first} and {@code second} are null
@since 18.0 (since 3.0 as {@code Objects.firstNonNull()}).
|
java
|
android/guava/src/com/google/common/base/MoreObjects.java
| 60
|
[
"first",
"second"
] |
T
| true
| 3
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
iterate_file_descriptors_safely
|
def iterate_file_descriptors_safely(fds_iter, source_data,
hub_method, *args, **kwargs):
"""Apply hub method to fds in iter, remove from list if failure.
Some file descriptors may become stale through OS reasons
or possibly other reasons, so safely manage our lists of FDs.
:param fds_iter: the file descriptors to iterate and apply hub_method
:param source_data: data source to remove FD if it renders OSError
:param hub_method: the method to call with each fd and kwargs
:*args to pass through to the hub_method;
with a special syntax string '*fd*' represents a substitution
for the current fd object in the iteration (for some callers).
:**kwargs to pass through to the hub method (no substitutions needed)
"""
def _meta_fd_argument_maker():
# uses the current iterations value for fd
call_args = args
if "*fd*" in call_args:
call_args = [fd if arg == "*fd*" else arg for arg in args]
return call_args
# Track stale FDs for cleanup possibility
stale_fds = []
for fd in fds_iter:
# Handle using the correct arguments to the hub method
hub_args, hub_kwargs = _meta_fd_argument_maker(), kwargs
try: # Call the hub method
hub_method(fd, *hub_args, **hub_kwargs)
except (OSError, FileNotFoundError):
logger.warning(
"Encountered OSError when accessing fd %s ",
fd, exc_info=True)
stale_fds.append(fd) # take note of stale fd
# Remove now defunct fds from the managed list
if source_data:
for fd in stale_fds:
try:
if hasattr(source_data, 'remove'):
source_data.remove(fd)
else: # then not a list/set ... try dict
source_data.pop(fd, None)
except ValueError:
logger.warning("ValueError trying to invalidate %s from %s",
fd, source_data)
|
Apply hub method to fds in iter, remove from list if failure.
Some file descriptors may become stale through OS reasons
or possibly other reasons, so safely manage our lists of FDs.
:param fds_iter: the file descriptors to iterate and apply hub_method
:param source_data: data source to remove FD if it renders OSError
:param hub_method: the method to call with each fd and kwargs
:*args to pass through to the hub_method;
with a special syntax string '*fd*' represents a substitution
for the current fd object in the iteration (for some callers).
:**kwargs to pass through to the hub method (no substitutions needed)
|
python
|
celery/concurrency/asynpool.py
| 198
|
[
"fds_iter",
"source_data",
"hub_method"
] | false
| 8
| 6.4
|
celery/celery
| 27,741
|
sphinx
| false
|
|
getOrCreateEnvironment
|
private ConfigurableEnvironment getOrCreateEnvironment() {
if (this.environment != null) {
return this.environment;
}
WebApplicationType webApplicationType = this.properties.getWebApplicationType();
ConfigurableEnvironment environment = this.applicationContextFactory.createEnvironment(webApplicationType);
if (environment == null && this.applicationContextFactory != ApplicationContextFactory.DEFAULT) {
environment = ApplicationContextFactory.DEFAULT.createEnvironment(webApplicationType);
}
return (environment != null) ? environment : new ApplicationEnvironment();
}
|
Run the Spring application, creating and refreshing a new
{@link ApplicationContext}.
@param args the application arguments (usually passed from a Java main method)
@return a running {@link ApplicationContext}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 475
|
[] |
ConfigurableEnvironment
| true
| 5
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
from
|
public <V> Member<V> from(@Nullable V value) {
return from((instance) -> value);
}
|
Add members from a static value. One of the {@code Member.using(...)} methods
must be used to complete the configuration.
@param <V> the value type
@param value the member value
@return the added {@link Member} which may be configured further
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 272
|
[
"value"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
fromarrays
|
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""
Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [ma.getdata(x) for x in arraylist]
masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist]
_array = np.rec.fromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
|
Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
|
python
|
numpy/ma/mrecords.py
| 494
|
[
"arraylist",
"dtype",
"shape",
"formats",
"names",
"titles",
"aligned",
"byteorder",
"fill_value"
] | false
| 2
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_check_values_indices_shape_match
|
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[1] != len(columns) or values.shape[0] != len(index):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[0] == 0 < len(index):
raise ValueError("Empty data passed with indices specified.")
passed = values.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
|
Check that the shape implied by our axes matches the actual shape of the
data.
|
python
|
pandas/core/internals/construction.py
| 344
|
[
"values",
"index",
"columns"
] |
None
| true
| 4
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
evaluate
|
protected @Nullable Object evaluate(TypedStringValue value) {
Object result = doEvaluate(value.getValue());
if (!ObjectUtils.nullSafeEquals(result, value.getValue())) {
value.setDynamic();
}
return result;
}
|
Evaluate the given value as an expression, if necessary.
@param value the candidate value (may be an expression)
@return the resolved value
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionValueResolver.java
| 271
|
[
"value"
] |
Object
| true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
adviceChanged
|
@Override
protected void adviceChanged() {
super.adviceChanged();
if (this.singleton) {
logger.debug("Advice has changed; re-caching singleton instance");
synchronized (this) {
this.singletonInstance = null;
}
}
}
|
Blow away and recache singleton on an advice change.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
| 567
|
[] |
void
| true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get
|
public StructuredLogFormatter<E> get(String format) {
StructuredLogFormatter<E> formatter = this.commonFormatters.get(this.instantiator, format);
formatter = (formatter != null) ? formatter : getUsingClassName(format);
if (formatter != null) {
return formatter;
}
throw new IllegalArgumentException(
"Unknown format '%s'. Values can be a valid fully-qualified class name or one of the common formats: %s"
.formatted(format, this.commonFormatters.getCommonNames()));
}
|
Get a new {@link StructuredLogFormatter} instance for the specified format.
@param format the format requested (either a {@link CommonStructuredLogFormat} ID
or a fully-qualified class name)
@return a new {@link StructuredLogFormatter} instance
@throws IllegalArgumentException if the format is unknown
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/StructuredLogFormatterFactory.java
| 123
|
[
"format"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
maybeThrowInterruptException
|
private void maybeThrowInterruptException() {
if (Thread.interrupted()) {
throw new InterruptException(new InterruptedException());
}
}
|
Check whether there is pending request. This includes both requests that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@return A boolean indicating whether there is pending request
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 535
|
[] |
void
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@Override
public String toString() {
ToStringCreator creator = new ToStringCreator(this);
creator.append("key", this.key);
creator.append("options", this.options);
creator.append("protocol", this.protocol);
creator.append("stores", this.stores);
return creator.toString();
}
|
Get an {@link SslBundle} for the given {@link JksSslBundleProperties}.
@param properties the source properties
@param resourceLoader the resource loader used to load content
@return an {@link SslBundle} instance
@since 3.3.5
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/PropertiesSslBundle.java
| 183
|
[] |
String
| true
| 1
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toDashedForm
|
public static String toDashedForm(String name) {
StringBuilder result = new StringBuilder(name.length());
boolean inIndex = false;
for (int i = 0; i < name.length(); i++) {
char ch = name.charAt(i);
if (inIndex) {
result.append(ch);
if (ch == ']') {
inIndex = false;
}
}
else {
if (ch == '[') {
inIndex = true;
result.append(ch);
}
else {
ch = (ch != '_') ? ch : '-';
if (Character.isUpperCase(ch) && !result.isEmpty() && result.charAt(result.length() - 1) != '-') {
result.append('-');
}
result.append(Character.toLowerCase(ch));
}
}
}
return result.toString();
}
|
Return the specified Java Bean property name in dashed form.
@param name the source name
@return the dashed from
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/DataObjectPropertyName.java
| 37
|
[
"name"
] |
String
| true
| 9
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
inContainer
|
static boolean inContainer(final String dirPrefix) {
final String value = readFile(dirPrefix + "/proc/1/environ", "container");
if (value != null) {
return !value.isEmpty();
}
return fileExists(dirPrefix + "/.dockerenv") || fileExists(dirPrefix + "/run/.containerenv");
}
|
Tests whether we are running in a container like Docker or Podman.
<p>
<em>The following may change if we find better detection logic.</em>
</p>
<p>
We roughly follow the logic in SystemD:
</p>
<p>
<a href=
"https://github.com/systemd/systemd/blob/0747e3b60eb4496ee122066c844210ce818d76d9/src/basic/virt.c#L692">https://github.com/systemd/systemd/blob/0747e3b60eb4496ee122066c844210ce818d76d9/src/basic/virt.c#L692</a>
</p>
<p>
We check the `container` environment variable of process 1:
</p>
<ol>
<li>If the variable is empty, we return false. This includes the case, where the container developer wants to hide the fact that the application runs in
a container.</li>
<li>If the variable is not empty, we return true.</li>
<li>If the variable is absent, we continue.</li>
<li>We check files in the container. According to SystemD:/
<ol>
<li>/.dockerenv is used by Docker.</li>
<li>/run/.containerenv is used by PodMan.</li>
</ol>
</li>
</ol>
@return Whether we are running in a container like Docker or Podman.
@see <a href="https://github.com/systemd/systemd/blob/0747e3b60eb4496ee122066c844210ce818d76d9/src/basic/virt.c#L692">SystemD virt.c</a>
|
java
|
src/main/java/org/apache/commons/lang3/RuntimeEnvironment.java
| 103
|
[
"dirPrefix"
] | true
| 3
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
registered
|
def registered(self, *taskinfoitems):
"""Return all registered tasks per worker.
>>> app.control.inspect().registered()
{'celery@node1': ['task1', 'task1']}
>>> app.control.inspect().registered('serializer', 'max_retries')
{'celery@node1': ['task_foo [serializer=json max_retries=3]', 'tasb_bar [serializer=json max_retries=3]']}
Arguments:
taskinfoitems (Sequence[str]): List of :class:`~celery.app.task.Task`
attributes to include.
Returns:
Dict: Dictionary ``{HOSTNAME: [TASK1_INFO, ...]}``.
"""
return self._request('registered', taskinfoitems=taskinfoitems)
|
Return all registered tasks per worker.
>>> app.control.inspect().registered()
{'celery@node1': ['task1', 'task1']}
>>> app.control.inspect().registered('serializer', 'max_retries')
{'celery@node1': ['task_foo [serializer=json max_retries=3]', 'tasb_bar [serializer=json max_retries=3]']}
Arguments:
taskinfoitems (Sequence[str]): List of :class:`~celery.app.task.Task`
attributes to include.
Returns:
Dict: Dictionary ``{HOSTNAME: [TASK1_INFO, ...]}``.
|
python
|
celery/app/control.py
| 256
|
[
"self"
] | false
| 1
| 6.88
|
celery/celery
| 27,741
|
google
| false
|
|
values
|
private XContentBuilder values(float[] values) throws IOException {
if (values == null) {
return nullValue();
}
startArray();
for (float f : values) {
value(f);
}
endArray();
return this;
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 521
|
[
"values"
] |
XContentBuilder
| true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
successfulAsList
|
@SafeVarargs
public static <V extends @Nullable Object> ListenableFuture<List<@Nullable V>> successfulAsList(
ListenableFuture<? extends V>... futures) {
/*
* Another way to express this signature would be to bound <V> by @NonNull and accept
* LF<? extends @Nullable V>. That might be better: There's currently no difference between the
* outputs users get when calling this with <Foo> and calling it with <@Nullable Foo>. The only
* difference is that calling it with <Foo> won't work when an input Future has a @Nullable
* type. So why even make that error possible by giving callers the choice?
*
* On the other hand, the current signature is consistent with the similar allAsList method. And
* eventually this method may go away entirely in favor of an API like
* whenAllComplete().collectSuccesses(). That API would have a signature more like the current
* one.
*/
return new ListFuture<V>(ImmutableList.copyOf(futures), false);
}
|
Creates a new {@code ListenableFuture} whose value is a list containing the values of all its
successful input futures. The list of results is in the same order as the input list, and if
any of the provided futures fails or is canceled, its corresponding position will contain
{@code null} (which is indistinguishable from the future having a successful value of {@code
null}).
<p>The list of results is in the same order as the input list.
<p>This differs from {@link #allAsList(ListenableFuture[])} in that it's tolerant of failed
futures for any of the items, representing them as {@code null} in the result list.
<p>Canceling this future will attempt to cancel all the component futures.
@param futures futures to combine
@return a future that provides a list of the results of the component futures
@since 10.0
|
java
|
android/guava/src/com/google/common/util/concurrent/Futures.java
| 849
|
[] | true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
groupId
|
public String groupId() {
return groupId;
}
|
@return Group ID of the group the member is part of (or wants to be part of).
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 262
|
[] |
String
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
customizers
|
public ThreadPoolTaskSchedulerBuilder customizers(ThreadPoolTaskSchedulerCustomizer... customizers) {
Assert.notNull(customizers, "'customizers' must not be null");
return customizers(Arrays.asList(customizers));
}
|
Set the {@link ThreadPoolTaskSchedulerCustomizer
threadPoolTaskSchedulerCustomizers} that should be applied to the
{@link ThreadPoolTaskScheduler}. Customizers are applied in the order that they
were added after builder configuration has been applied. Setting this value will
replace any previously configured customizers.
@param customizers the customizers to set
@return a new builder instance
@see #additionalCustomizers(ThreadPoolTaskSchedulerCustomizer...)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskSchedulerBuilder.java
| 142
|
[] |
ThreadPoolTaskSchedulerBuilder
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
register_index_accessor
|
def register_index_accessor(name: str) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on Index objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for Index.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single Index object
* raises an AttributeError if the Index object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_index_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(isinstance(x, int) for x in pandas_obj):
... raise AttributeError("The index must only be an integer value")
... self._obj = pandas_obj
...
... def even(self):
... return [x for x in self._obj if x % 2 == 0]
>>> df = pd.DataFrame.from_dict(
... {"row1": {"1": 1, "2": "a"}, "row2": {"1": 2, "2": "b"}}, orient="index"
... )
>>> df.index.int_accessor
Traceback (most recent call last):
...
AttributeError: The index must only be an integer value.
>>> df = pd.DataFrame(
... {"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}, index=[1, 2, 5, 8]
... )
>>> df.index.int_accessor.even()
[2, 8]
"""
from pandas import Index
return _register_accessor(name, Index)
|
Register a custom accessor on Index objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for Index.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single Index object
* raises an AttributeError if the Index object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_index_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(isinstance(x, int) for x in pandas_obj):
... raise AttributeError("The index must only be an integer value")
... self._obj = pandas_obj
...
... def even(self):
... return [x for x in self._obj if x % 2 == 0]
>>> df = pd.DataFrame.from_dict(
... {"row1": {"1": 1, "2": "a"}, "row2": {"1": 2, "2": "b"}}, orient="index"
... )
>>> df.index.int_accessor
Traceback (most recent call last):
...
AttributeError: The index must only be an integer value.
>>> df = pd.DataFrame(
... {"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}, index=[1, 2, 5, 8]
... )
>>> df.index.int_accessor.even()
[2, 8]
|
python
|
pandas/core/accessor.py
| 519
|
[
"name"
] |
Callable[[TypeT], TypeT]
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
trySubstituteNamespaceExportedName
|
function trySubstituteNamespaceExportedName(node: Identifier): Expression | undefined {
// If this is explicitly a local name, do not substitute.
if (enabledSubstitutions & applicableSubstitutions && !isGeneratedIdentifier(node) && !isLocalName(node)) {
// If we are nested within a namespace declaration, we may need to qualifiy
// an identifier that is exported from a merged namespace.
const container = resolver.getReferencedExportContainer(node, /*prefixLocals*/ false);
if (container && container.kind !== SyntaxKind.SourceFile) {
const substitute = (applicableSubstitutions & TypeScriptSubstitutionFlags.NamespaceExports && container.kind === SyntaxKind.ModuleDeclaration) ||
(applicableSubstitutions & TypeScriptSubstitutionFlags.NonQualifiedEnumMembers && container.kind === SyntaxKind.EnumDeclaration);
if (substitute) {
return setTextRange(
factory.createPropertyAccessExpression(factory.getGeneratedNameForNode(container), node),
/*location*/ node,
);
}
}
}
return undefined;
}
|
Hooks node substitutions.
@param hint A hint as to the intended usage of the node.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,684
|
[
"node"
] | true
| 10
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
opj_uint_floorlog2
|
static INLINE OPJ_UINT32 opj_uint_floorlog2(OPJ_UINT32 a)
{
OPJ_UINT32 l;
for (l = 0; a > 1; ++l) {
a >>= 1;
}
return l;
}
|
Get logarithm of an integer and round downwards
@return Returns log2(a)
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 248
|
[
"a"
] | true
| 2
| 6.4
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
poll_for_termination
|
def poll_for_termination(self, app_id: str) -> None:
"""
Pool for spark application termination.
:param app_id: id of the spark application to monitor
"""
state = self.hook.get_spark_state(app_id)
while AppState(state) not in AnalyticDBSparkHook.TERMINAL_STATES:
self.log.debug("Application with id %s is in state: %s", app_id, state)
time.sleep(self.polling_interval)
state = self.hook.get_spark_state(app_id)
self.log.info("Application with id %s terminated with state: %s", app_id, state)
self.log.info(
"Web ui address is %s for application with id %s",
self.hook.get_spark_web_ui_address(app_id),
app_id,
)
self.log.info(self.hook.get_spark_log(app_id))
if AppState(state) != AppState.COMPLETED:
raise AirflowException(f"Application {app_id} did not succeed")
|
Pool for spark application termination.
:param app_id: id of the spark application to monitor
|
python
|
providers/alibaba/src/airflow/providers/alibaba/cloud/operators/analyticdb_spark.py
| 64
|
[
"self",
"app_id"
] |
None
| true
| 3
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
execute
|
def execute(self, context: Context) -> dict:
"""
Trigger a Dag Run for the Dag in the Amazon MWAA environment.
:param context: the Context object
:return: dict with information about the Dag run
For details of the returned dict, see :py:meth:`botocore.client.MWAA.invoke_rest_api`
"""
response = self.hook.invoke_rest_api(
env_name=self.env_name,
path=f"/dags/{self.trigger_dag_id}/dagRuns",
method="POST",
body={
"dag_run_id": self.trigger_run_id,
"logical_date": self.logical_date,
"data_interval_start": self.data_interval_start,
"data_interval_end": self.data_interval_end,
"conf": self.conf,
"note": self.note,
},
airflow_version=self.airflow_version,
)
dag_run_id = response["RestApiResponse"]["dag_run_id"]
self.log.info("DAG run %s of DAG %s created", dag_run_id, self.trigger_dag_id)
task_description = f"DAG run {dag_run_id} of DAG {self.trigger_dag_id} to complete"
if self.deferrable:
self.log.info("Deferring for %s", task_description)
self.defer(
trigger=MwaaDagRunCompletedTrigger(
external_env_name=self.env_name,
external_dag_id=self.trigger_dag_id,
external_dag_run_id=dag_run_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.log.info("Waiting for %s", task_description)
api_kwargs = {
"Name": self.env_name,
"Path": f"/dags/{self.trigger_dag_id}/dagRuns/{dag_run_id}",
"Method": "GET",
}
self.hook.get_waiter("mwaa_dag_run_complete").wait(
**api_kwargs,
WaiterConfig={"Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts},
)
return response
|
Trigger a Dag Run for the Dag in the Amazon MWAA environment.
:param context: the Context object
:return: dict with information about the Dag run
For details of the returned dict, see :py:meth:`botocore.client.MWAA.invoke_rest_api`
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/operators/mwaa.py
| 137
|
[
"self",
"context"
] |
dict
| true
| 3
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
describe_task_execution
|
def describe_task_execution(self, task_execution_arn: str) -> dict:
"""
Get description for the specified ``task_execution_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.describe_task_execution`
:param task_execution_arn: TaskExecutionArn
:return: AWS metadata about a task execution.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
"""
return self.get_conn().describe_task_execution(TaskExecutionArn=task_execution_arn)
|
Get description for the specified ``task_execution_arn``.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.describe_task_execution`
:param task_execution_arn: TaskExecutionArn
:return: AWS metadata about a task execution.
:raises AirflowBadRequest: If ``task_execution_arn`` is empty.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
| 267
|
[
"self",
"task_execution_arn"
] |
dict
| true
| 1
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
splitName
|
function splitName(fullPath: string): { name: string; parentPath: string } {
const p = fullPath.indexOf('/') !== -1 ? posix : win32;
const name = p.basename(fullPath);
const parentPath = p.dirname(fullPath);
if (name.length) {
return { name, parentPath };
}
// only the root segment
return { name: parentPath, parentPath: '' };
}
|
Splits a recent label in name and parent path, supporting both '/' and '\' and workspace suffixes.
If the location is remote, the remote name is included in the name part.
|
typescript
|
src/vs/base/common/labels.ts
| 464
|
[
"fullPath"
] | true
| 3
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
stream
|
public static Stream<KeyStoreEntry> stream(
KeyStore keyStore,
Function<GeneralSecurityException, ? extends RuntimeException> exceptionHandler
) {
try {
return Collections.list(keyStore.aliases()).stream().map(a -> new KeyStoreEntry(keyStore, a, exceptionHandler));
} catch (KeyStoreException e) {
throw exceptionHandler.apply(e);
}
}
|
Creates a {@link X509ExtendedTrustManager} based on the provided certificates
@param certificates the certificates to trust
@return a trust manager that trusts the provided certificates
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java
| 199
|
[
"keyStore",
"exceptionHandler"
] | true
| 2
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
initializeAdvisorChain
|
private synchronized void initializeAdvisorChain() throws AopConfigException, BeansException {
if (!this.advisorChainInitialized && !ObjectUtils.isEmpty(this.interceptorNames)) {
if (this.beanFactory == null) {
throw new IllegalStateException("No BeanFactory available anymore (probably due to serialization) " +
"- cannot resolve interceptor names " + Arrays.toString(this.interceptorNames));
}
// Globals can't be last unless we specified a targetSource using the property...
if (this.interceptorNames[this.interceptorNames.length - 1].endsWith(GLOBAL_SUFFIX) &&
this.targetName == null && this.targetSource == EMPTY_TARGET_SOURCE) {
throw new AopConfigException("Target required after globals");
}
// Materialize interceptor chain from bean names.
for (String name : this.interceptorNames) {
if (name.endsWith(GLOBAL_SUFFIX)) {
if (!(this.beanFactory instanceof ListableBeanFactory lbf)) {
throw new AopConfigException(
"Can only use global advisors or interceptors with a ListableBeanFactory");
}
addGlobalAdvisors(lbf, name.substring(0, name.length() - GLOBAL_SUFFIX.length()));
}
else {
// If we get here, we need to add a named interceptor.
// We must check if it's a singleton or prototype.
Object advice;
if (this.singleton || this.beanFactory.isSingleton(name)) {
// Add the real Advisor/Advice to the chain.
advice = this.beanFactory.getBean(name);
}
else {
// It's a prototype Advice or Advisor: replace with a prototype.
// Avoid unnecessary creation of prototype bean just for advisor chain initialization.
advice = new PrototypePlaceholderAdvisor(name);
}
addAdvisorOnChainCreation(advice);
}
}
this.advisorChainInitialized = true;
}
}
|
Create the advisor (interceptor) chain. Advisors that are sourced
from a BeanFactory will be refreshed each time a new prototype instance
is added. Interceptors added programmatically through the factory API
are unaffected by such changes.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
| 408
|
[] |
void
| true
| 11
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
onMemberEpochUpdated
|
@Override
public void onMemberEpochUpdated(Optional<Integer> memberEpochOpt, String memberId) {
this.memberId = Uuid.fromString(memberId);
}
|
The method checks whether the leader for a topicIdPartition has changed.
@param nodeId The previous leader for the partition.
@param topicIdPartition The TopicIdPartition to check.
@return Returns true if leader information is available and leader has changed.
If the leader information is not available or if the leader has not changed, it returns false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,145
|
[
"memberEpochOpt",
"memberId"
] |
void
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
task_render
|
def task_render(args, dag: DAG | None = None) -> None:
"""Render and displays templated fields for a given task."""
if not dag:
dag = get_bagged_dag(args.bundle_name, args.dag_id)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
ti, _ = _get_ti(
serialized_dag.get_task(task_id=args.task_id),
args.map_index,
logical_date_or_run_id=args.logical_date_or_run_id,
create_if_necessary="memory",
)
with create_session() as session:
context = ti.get_template_context(session=session)
task = dag.get_task(args.task_id)
# TODO (GH-52141): After sdk separation, ti.get_template_context() would
# contain serialized operators, but we need the real operators for
# rendering. This does not make sense and eventually we should rewrite
# this entire function so "ti" is a RuntimeTaskInstance instead, but for
# now we'll just manually fix it to contain the right objects.
context["task"] = context["ti"].task = task
task.render_template_fields(context)
for attr in context["task"].template_fields:
print(
textwrap.dedent(
f"""\
# ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
"""
)
+ str(getattr(context["task"], attr)) # This shouldn't be dedented.
)
|
Render and displays templated fields for a given task.
|
python
|
airflow-core/src/airflow/cli/commands/task_command.py
| 430
|
[
"args",
"dag"
] |
None
| true
| 3
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
select
|
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
See Also
--------
HDFStore.select_as_coordinates : Returns the selection as an index.
HDFStore.select_column : Returns a single column from the table.
HDFStore.select_as_multiple : Retrieves pandas objects from multiple tables.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select("/data1") # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select("/data1", where="columns == A") # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
|
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
See Also
--------
HDFStore.select_as_coordinates : Returns the selection as an index.
HDFStore.select_column : Returns a single column from the table.
HDFStore.select_as_multiple : Retrieves pandas objects from multiple tables.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data", df) # doctest: +SKIP
>>> store.get("data") # doctest: +SKIP
>>> print(store.keys()) # doctest: +SKIP
['/data1', '/data2']
>>> store.select("/data1") # doctest: +SKIP
A B
0 1 2
1 3 4
>>> store.select("/data1", where="columns == A") # doctest: +SKIP
A
0 1
1 3
>>> store.close() # doctest: +SKIP
|
python
|
pandas/io/pytables.py
| 839
|
[
"self",
"key",
"where",
"start",
"stop",
"columns",
"iterator",
"chunksize",
"auto_close"
] | true
| 2
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
namespaceElementVisitorWorker
|
function namespaceElementVisitorWorker(node: Node): VisitResult<Node | undefined> {
if (
node.kind === SyntaxKind.ExportDeclaration ||
node.kind === SyntaxKind.ImportDeclaration ||
node.kind === SyntaxKind.ImportClause ||
(node.kind === SyntaxKind.ImportEqualsDeclaration &&
(node as ImportEqualsDeclaration).moduleReference.kind === SyntaxKind.ExternalModuleReference)
) {
// do not emit ES6 imports and exports since they are illegal inside a namespace
return undefined;
}
else if (node.transformFlags & TransformFlags.ContainsTypeScript || hasSyntacticModifier(node, ModifierFlags.Export)) {
return visitTypeScript(node);
}
return node;
}
|
Specialized visitor that visits the immediate children of a namespace.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/ts.ts
| 518
|
[
"node"
] | true
| 9
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
of
|
public static <E> Stream<E> of(final Enumeration<E> enumeration) {
return StreamSupport.stream(new EnumerationSpliterator<>(Long.MAX_VALUE, Spliterator.ORDERED, enumeration), false);
}
|
Streams the elements of the given enumeration in order.
@param <E> The enumeration element type.
@param enumeration The enumeration to stream.
@return a new stream.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 687
|
[
"enumeration"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_truncate_datetime_for_granularity
|
def _truncate_datetime_for_granularity(
self,
dt: datetime,
granularity: Literal["hourly", "daily"],
) -> datetime:
"""
Truncate datetime based on granularity for planned tasks grouping.
Args:
dt: The datetime to truncate
granularity: Either "hourly" or "daily"
Returns:
Truncated datetime
"""
if granularity == "hourly":
return dt.replace(minute=0, second=0, microsecond=0)
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
Truncate datetime based on granularity for planned tasks grouping.
Args:
dt: The datetime to truncate
granularity: Either "hourly" or "daily"
Returns:
Truncated datetime
|
python
|
airflow-core/src/airflow/api_fastapi/core_api/services/ui/calendar.py
| 297
|
[
"self",
"dt",
"granularity"
] |
datetime
| true
| 2
| 7.28
|
apache/airflow
| 43,597
|
google
| false
|
acknowledgeBatchIfImplicitAcknowledgement
|
private void acknowledgeBatchIfImplicitAcknowledgement() {
// If IMPLICIT, acknowledge all records
if (acknowledgementMode == ShareAcknowledgementMode.IMPLICIT) {
currentFetch.acknowledgeAll(AcknowledgeType.ACCEPT);
}
}
|
If the acknowledgement mode is IMPLICIT, acknowledges all records in the current batch.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,145
|
[] |
void
| true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
update
|
def update(self, func, default=None, testing_value=None,
missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is ``''``.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or '1')
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set or clear it.
if missing_values is None:
# Clear all missing values even though the ctor initializes it to
# set(['']) when the argument is None.
self.missing_values = set()
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
if not all(isinstance(v, str) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
|
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is ``''``.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
|
python
|
numpy/lib/_iotools.py
| 766
|
[
"self",
"func",
"default",
"testing_value",
"missing_values",
"locked"
] | false
| 8
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
fromMap
|
V fromMap(Map<String, Short> baseVersionRangeMap);
|
Convert the map representation of an object of type <V>, to an object of type <V>.
@param baseVersionRangeMap the map representation of a BaseVersionRange object.
@return the object of type <V>
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/Features.java
| 115
|
[
"baseVersionRangeMap"
] |
V
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
_get_fab_migration_version
|
def _get_fab_migration_version(*, session: Session) -> str | None:
"""
Get the current FAB migration version from the database.
This intentionally queries the db directly, as the FAB provider and FABDBManager may not even be installed.
:param session: sqlalchemy session for connection to airflow metadata database
:return: The current FAB migration revision, or None if not found
"""
try:
result = session.execute(text("SELECT version_num FROM alembic_version_fab LIMIT 1"))
row = result.fetchone()
return row[0] if row else None
except Exception:
# Table might not exist or other database error
return None
|
Get the current FAB migration version from the database.
This intentionally queries the db directly, as the FAB provider and FABDBManager may not even be installed.
:param session: sqlalchemy session for connection to airflow metadata database
:return: The current FAB migration revision, or None if not found
|
python
|
airflow-core/src/airflow/utils/db.py
| 1,221
|
[
"session"
] |
str | None
| true
| 2
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
findCommonAncestorIndex
|
private static int findCommonAncestorIndex(StackTraceElement[] callStack, String className, String methodName) {
for (int i = 0; i < callStack.length; i++) {
StackTraceElement element = callStack[i];
if (className.equals(element.getClassName()) && methodName.equals(element.getMethodName())) {
return i;
}
}
return -1;
}
|
Rewrite the call stack of the specified {@code exception} so that it matches
the current call stack up to (included) the specified method invocation.
<p>Clone the specified exception. If the exception is not {@code serializable},
the original exception is returned. If no common ancestor can be found, returns
the original exception.
<p>Used to make sure that a cached exception has a valid invocation context.
@param exception the exception to merge with the current call stack
@param className the class name of the common ancestor
@param methodName the method name of the common ancestor
@return a clone exception with a rewritten call stack composed of the current call
stack up to (included) the common ancestor specified by the {@code className} and
{@code methodName} arguments, followed by stack trace elements of the specified
{@code exception} after the common ancestor.
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/CacheResultInterceptor.java
| 157
|
[
"callStack",
"className",
"methodName"
] | true
| 4
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return this.cause.toString();
}
|
Return the original cause of the error.
@return the error cause
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/error/Error.java
| 93
|
[] |
String
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
firstKey
|
function firstKey(parameters: Record<string, string>, ...keys: string[]): string | null {
for (const key of keys) {
if (key in parameters) {
return parameters[key]
}
}
return null
}
|
Return the value of the first key found in the parameters object
@param parameters
@param keys
|
typescript
|
packages/adapter-mssql/src/connection-string.ts
| 282
|
[
"parameters",
"...keys"
] | true
| 2
| 6.4
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
isNotInGroup
|
private boolean isNotInGroup() {
return state == MemberState.UNSUBSCRIBED ||
state == MemberState.FENCED ||
state == MemberState.FATAL ||
state == MemberState.STALE;
}
|
@return True if the member is preparing to leave the group (waiting for callbacks), or
leaving (sending last heartbeat). This is used to skip proactively leaving the group when
the poll timer expires.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 358
|
[] | true
| 4
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.