function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
finish
|
inline cl_int finish(void)
{
cl_int error;
CommandQueue queue = CommandQueue::getDefault(&error);
if (error != CL_SUCCESS) {
return error;
}
return queue.finish();
}
|
Blocking copy operation between iterators and a buffer.
|
cpp
|
3rdparty/include/opencl/1.2/CL/cl.hpp
| 6,547
|
[] | true
| 2
| 6.4
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
create
|
public static URL create(File file, String nestedEntryName) {
return create(file, nestedEntryName, null);
}
|
Create a new jar URL.
@param file the jar file
@param nestedEntryName the nested entry name or {@code null}
@return a jar file URL
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrl.java
| 60
|
[
"file",
"nestedEntryName"
] |
URL
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
initializeFrozenIntrinsics
|
function initializeFrozenIntrinsics() {
if (getOptionValue('--frozen-intrinsics')) {
emitExperimentalWarning('Frozen intristics');
require('internal/freeze_intrinsics')();
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 689
|
[] | false
| 2
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
getBeanFactory
|
@Override
public final ConfigurableListableBeanFactory getBeanFactory() {
DefaultListableBeanFactory beanFactory = this.beanFactory;
if (beanFactory == null) {
throw new IllegalStateException("BeanFactory not initialized or already closed - " +
"call 'refresh' before accessing beans via the ApplicationContext");
}
return beanFactory;
}
|
Determine whether this context currently holds a bean factory,
i.e. has been refreshed at least once and not been closed yet.
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractRefreshableApplicationContext.java
| 163
|
[] |
ConfigurableListableBeanFactory
| true
| 2
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
customOmitClone
|
function customOmitClone(value) {
return isPlainObject(value) ? undefined : value;
}
|
Used by `_.omit` to customize its `_.cloneDeep` use to only clone plain
objects.
@private
@param {*} value The value to inspect.
@param {string} key The key of the property to inspect.
@returns {*} Returns the uncloned value or `undefined` to defer cloning to `_.cloneDeep`.
|
javascript
|
lodash.js
| 5,697
|
[
"value"
] | false
| 2
| 6
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
slice_replace
|
def slice_replace(self, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
This function allows replacing specific parts of a string in a Series
or Index by specifying start and stop positions. It is useful for
modifying substrings in a controlled way, such as updating sections of
text based on their positions or patterns.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(["a", "ab", "abc", "abdc", "abcde"])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: str
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl="X")
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: str
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl="X")
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: str
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl="X")
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: str
"""
result = self._data.array._str_slice_replace(start, stop, repl)
return self._wrap_result(result)
|
Replace a positional slice of a string with another value.
This function allows replacing specific parts of a string in a Series
or Index by specifying start and stop positions. It is useful for
modifying substrings in a controlled way, such as updating sections of
text based on their positions or patterns.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(["a", "ab", "abc", "abdc", "abcde"])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: str
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl="X")
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: str
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl="X")
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: str
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl="X")
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: str
|
python
|
pandas/core/strings/accessor.py
| 2,040
|
[
"self",
"start",
"stop",
"repl"
] | false
| 1
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
sendPrefetches
|
private void sendPrefetches(Timer timer) {
try {
applicationEventHandler.add(new CreateFetchRequestsEvent(calculateDeadlineMs(timer)));
} catch (Throwable t) {
// Any unexpected errors will be logged for troubleshooting, but not thrown.
log.warn("An unexpected error occurred while pre-fetching data in Consumer.poll(), but was suppressed", t);
}
}
|
This method signals the background thread to {@link CreateFetchRequestsEvent create fetch requests} for the
pre-fetch case, i.e. right before {@link #poll(Duration)} exits. In the pre-fetch case, the application thread
will not wait for confirmation of the request creation before continuing.
<p/>
At the point this method is called, {@link KafkaConsumer#poll(Duration)} has data ready to return to the user,
which means the consumed position was already updated. In order to prevent potential gaps in records, this
method is designed to suppress all exceptions.
@param timer Provides an upper bound for the event and its {@link CompletableFuture future}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,995
|
[
"timer"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
_shared_cache_lockfile
|
def _shared_cache_lockfile(self) -> Path:
"""Get the lock file path for the shared memoizer cache.
Returns:
The path to the lock file for the shared cache.
"""
return Path(cache_dir()) / "memoizer_cache.lock"
|
Get the lock file path for the shared memoizer cache.
Returns:
The path to the lock file for the shared cache.
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 280
|
[
"self"
] |
Path
| true
| 1
| 6.4
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
spawnSync
|
function spawnSync(file, args, options) {
options = {
__proto__: null,
maxBuffer: MAX_BUFFER,
...normalizeSpawnArguments(file, args, options),
};
debug('spawnSync', options);
// Validate the timeout, if present.
validateTimeout(options.timeout);
// Validate maxBuffer, if present.
validateMaxBuffer(options.maxBuffer);
// Validate and translate the kill signal, if present.
options.killSignal = sanitizeKillSignal(options.killSignal);
options.stdio = getValidStdio(options.stdio || 'pipe', true).stdio;
if (options.input) {
const stdin = options.stdio[0] = { ...options.stdio[0] };
stdin.input = options.input;
}
// We may want to pass data in on any given fd, ensure it is a valid buffer
for (let i = 0; i < options.stdio.length; i++) {
const input = options.stdio[i]?.input;
if (input != null) {
const pipe = options.stdio[i] = { ...options.stdio[i] };
if (isArrayBufferView(input)) {
pipe.input = input;
} else if (typeof input === 'string') {
pipe.input = Buffer.from(input, options.encoding);
} else {
throw new ERR_INVALID_ARG_TYPE(`options.stdio[${i}]`,
['Buffer',
'TypedArray',
'DataView',
'string'],
input);
}
}
}
return child_process.spawnSync(options);
}
|
Spawns a new process synchronously using the given `file`.
@param {string} file
@param {string[]} [args]
@param {{
cwd?: string | URL;
input?: string | Buffer | TypedArray | DataView;
argv0?: string;
stdio?: string | Array;
env?: Record<string, string>;
uid?: number;
gid?: number;
timeout?: number;
killSignal?: string | number;
maxBuffer?: number;
encoding?: string;
shell?: boolean | string;
windowsVerbatimArguments?: boolean;
windowsHide?: boolean;
}} [options]
@returns {{
pid: number;
output: Array;
stdout: Buffer | string;
stderr: Buffer | string;
status: number | null;
signal: string | null;
error: Error;
}}
|
javascript
|
lib/child_process.js
| 866
|
[
"file",
"args",
"options"
] | false
| 9
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
_setitem_single_column
|
def _setitem_single_column(self, loc: int, value, plane_indexer) -> None:
"""
Parameters
----------
loc : int
Indexer for column position
plane_indexer : int, slice, listlike[int]
The indexer we use for setitem along axis=0.
"""
pi = plane_indexer
is_full_setter = com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj))
is_null_setter = com.is_empty_slice(pi) or (is_array_like(pi) and len(pi) == 0)
if is_null_setter:
# no-op, don't cast dtype later
return
elif is_full_setter:
try:
self.obj._mgr.column_setitem(
loc, plane_indexer, value, inplace_only=True
)
except (ValueError, TypeError, LossySetitemError) as exc:
# If we're setting an entire column and we can't do it inplace,
# then we can use value's dtype (or inferred dtype)
# instead of object
dtype = self.obj.dtypes.iloc[loc]
if dtype not in (np.void, object) and not self.obj.empty:
# - Exclude np.void, as that is a special case for expansion.
# We want to raise for
# df = pd.DataFrame({'a': [1, 2]})
# df.loc[:, 'a'] = .3
# but not for
# df = pd.DataFrame({'a': [1, 2]})
# df.loc[:, 'b'] = .3
# - Exclude `object`, as then no upcasting happens.
# - Exclude empty initial object with enlargement,
# as then there's nothing to be inconsistent with.
raise TypeError(
f"Invalid value '{value}' for dtype '{dtype}'"
) from exc
self.obj.isetitem(loc, value)
else:
# set value into the column (first attempting to operate inplace, then
# falling back to casting if necessary)
dtype = self.obj.dtypes.iloc[loc]
if dtype == np.void:
# This means we're expanding, with multiple columns, e.g.
# df = pd.DataFrame({'A': [1,2,3], 'B': [4,5,6]})
# df.loc[df.index <= 2, ['F', 'G']] = (1, 'abc')
# Columns F and G will initially be set to np.void.
# Here, we replace those temporary `np.void` columns with
# columns of the appropriate dtype, based on `value`.
self.obj.iloc[:, loc] = construct_1d_array_from_inferred_fill_value(
value, len(self.obj)
)
self.obj._mgr.column_setitem(loc, plane_indexer, value)
|
Parameters
----------
loc : int
Indexer for column position
plane_indexer : int, slice, listlike[int]
The indexer we use for setitem along axis=0.
|
python
|
pandas/core/indexing.py
| 2,122
|
[
"self",
"loc",
"value",
"plane_indexer"
] |
None
| true
| 10
| 6.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_slice_bound
|
def get_slice_bound(
self,
label: Hashable | Sequence[Hashable],
side: Literal["left", "right"],
) -> int:
"""
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abbc"), list("gefd")])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound("b", side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(("b", "f"), side="right")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if not isinstance(label, tuple):
label = (label,)
return self._partial_tup_index(label, side=side)
|
For an ordered MultiIndex, compute slice bound
that corresponds to given label.
Returns leftmost (one-past-the-rightmost if `side=='right') position
of given label.
Parameters
----------
label : object or tuple of objects
side : {'left', 'right'}
Returns
-------
int
Index of label.
Notes
-----
This method only works if level 0 index of the MultiIndex is lexsorted.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abbc"), list("gefd")])
Get the locations from the leftmost 'b' in the first level
until the end of the multiindex:
>>> mi.get_slice_bound("b", side="left")
1
Like above, but if you get the locations from the rightmost
'b' in the first level and 'f' in the second level:
>>> mi.get_slice_bound(("b", "f"), side="right")
3
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
|
python
|
pandas/core/indexes/multi.py
| 3,083
|
[
"self",
"label",
"side"
] |
int
| true
| 2
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
closeWhileHandlingException
|
public static void closeWhileHandlingException(Releasable... releasables) {
for (final Releasable releasable : releasables) {
try {
close(releasable);
} catch (RuntimeException e) {
// ignored
}
}
}
|
Release the provided {@link Releasable}s, ignoring exceptions.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Releasables.java
| 80
|
[] |
void
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
hasSignedLibrary
|
private boolean hasSignedLibrary(Map<String, Library> writtenLibraries) throws IOException {
for (Library library : writtenLibraries.values()) {
if (!(library instanceof JarModeLibrary) && FileUtils.isSignedJarFile(library.getFile())) {
return true;
}
}
return false;
}
|
Create a new {@link Repackager} instance.
@param source the source archive file to package
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Repackager.java
| 63
|
[
"writtenLibraries"
] | true
| 3
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
intersection
|
public static ClassFilter intersection(ClassFilter[] classFilters) {
Assert.notEmpty(classFilters, "ClassFilter array must not be empty");
return new IntersectionClassFilter(classFilters);
}
|
Match all classes that <i>all</i> of the given ClassFilters match.
@param classFilters the ClassFilters to match
@return a distinct ClassFilter that matches all classes that both
of the given ClassFilter match
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ClassFilters.java
| 85
|
[
"classFilters"
] |
ClassFilter
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
ensureOpenForRecordBatchWrite
|
private void ensureOpenForRecordBatchWrite() {
if (isClosed())
throw new IllegalStateException("Tried to write record batch header, but MemoryRecordsBuilder is closed");
if (aborted)
throw new IllegalStateException("Tried to write record batch header, but MemoryRecordsBuilder is aborted");
}
|
Append the record at the next consecutive offset. If no records have been appended yet, use the base
offset of this builder.
@param record The record to add
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 809
|
[] |
void
| true
| 3
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
below
|
public static JavaUnicodeEscaper below(final int codePoint) {
return outsideOf(codePoint, Integer.MAX_VALUE);
}
|
Constructs a {@link JavaUnicodeEscaper} below the specified value (exclusive).
@param codePoint
below which to escape.
@return the newly created {@link UnicodeEscaper} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/JavaUnicodeEscaper.java
| 48
|
[
"codePoint"
] |
JavaUnicodeEscaper
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
alias
|
@Nullable String alias();
|
The alias used when setting entries in the {@link KeyStore}.
@return the alias
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
| 51
|
[] |
String
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
usingExtractedPairs
|
public <E, N, V> Member<T> usingExtractedPairs(BiConsumer<T, Consumer<E>> elements,
Function<E, N> nameExtractor, Function<E, V> valueExtractor) {
Assert.notNull(elements, "'elements' must not be null");
Assert.notNull(nameExtractor, "'nameExtractor' must not be null");
Assert.notNull(valueExtractor, "'valueExtractor' must not be null");
return usingPairs((instance, pairsConsumer) -> elements.accept(instance, (element) -> {
N name = nameExtractor.apply(element);
V value = valueExtractor.apply(element);
pairsConsumer.accept(name, value);
}));
}
|
Add JSON name/value pairs by extracting values from a series of elements.
Typically used with a {@link Iterable#forEach(Consumer)} call, for example:
<pre class="code">
members.add(Event::getTags).usingExtractedPairs(Iterable::forEach, Tag::getName, Tag::getValue);
</pre>
<p>
When used with a named member, the pairs will be added as a new JSON value
object:
<pre>
{
"name": {
"p1": 1,
"p2": 2
}
}
</pre>
When used with an unnamed member the pairs will be added to the existing JSON
object:
<pre>
{
"p1": 1,
"p2": 2
}
</pre>
@param <E> the element type
@param <N> the name type
@param <V> the value type
@param elements callback used to provide the elements
@param nameExtractor {@link Function} used to extract the name
@param valueExtractor {@link Function} used to extract the value
@return a {@link Member} which may be configured further
@see #usingExtractedPairs(BiConsumer, PairExtractor)
@see #usingPairs(BiConsumer)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 541
|
[
"elements",
"nameExtractor",
"valueExtractor"
] | true
| 1
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
SocketAddress
|
SocketAddress(const char* host, uint16_t port, bool allowNameLookup = false) {
// Initialize the address family first,
// since setFromHostPort() and setFromIpPort() will check it.
if (allowNameLookup) {
setFromHostPort(host, port);
} else {
setFromIpPort(host, port);
}
}
|
Construct a SocketAddress from a hostname and port.
Note: If the host parameter is not a numeric IP address, hostname
resolution will be performed, which can be quite slow.
Raises std::system_error on error.
@param host The IP address (or hostname, if allowNameLookup is true)
@param port The port (in host byte order)
@param allowNameLookup If true, attempt to perform hostname lookup
if the hostname does not appear to be a numeric IP address.
This is potentially a very slow operation, so is disabled by
default.
|
cpp
|
folly/SocketAddress.h
| 65
|
[
"port"
] | true
| 3
| 6.88
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
rebuildHashTable
|
static @Nullable Object[] rebuildHashTable(int newTableSize, Object[] elements, int n) {
@Nullable Object[] hashTable = new @Nullable Object[newTableSize];
int mask = hashTable.length - 1;
for (int i = 0; i < n; i++) {
// requireNonNull is safe because we ensure that the first n elements have been populated.
Object e = requireNonNull(elements[i]);
int j0 = Hashing.smear(e.hashCode());
for (int j = j0; ; j++) {
int index = j & mask;
if (hashTable[index] == null) {
hashTable[index] = e;
break;
}
}
}
return hashTable;
}
|
Builds a new open-addressed hash table from the first n objects in elements.
|
java
|
guava/src/com/google/common/collect/ImmutableSet.java
| 830
|
[
"newTableSize",
"elements",
"n"
] | true
| 4
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
clientInstanceId
|
@Override
public Uuid clientInstanceId(Duration timeout) {
if (clientTelemetryReporter.isEmpty()) {
throw new IllegalStateException("Telemetry is not enabled. Set config `" + ConsumerConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`.");
}
return ClientTelemetryUtils.fetchClientInstanceId(clientTelemetryReporter.get(), timeout);
}
|
This method sends a commit event to the EventHandler and waits for
the event to finish.
@param timeout max wait time for the blocking operation.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,751
|
[
"timeout"
] |
Uuid
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
getVars
|
private static List<ConfigVariable> getVars(String value, Pattern pattern) {
List<ConfigVariable> configVars = new ArrayList<>();
Matcher matcher = pattern.matcher(value);
while (matcher.find()) {
configVars.add(new ConfigVariable(matcher));
}
return configVars;
}
|
Transforms the given configuration data by using the {@link ConfigProvider} instances to
look up values to replace the variables in the pattern.
@param configs the configuration values to be transformed
@return an instance of {@link ConfigTransformerResult}
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigTransformer.java
| 124
|
[
"value",
"pattern"
] | true
| 2
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
make_flex_doc
|
def make_flex_doc(op_name: str, typ: str) -> str:
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace("__", "")
op_desc = _op_descriptions[op_name]
op_desc_op = op_desc["op"]
assert op_desc_op is not None # for mypy
if op_name.startswith("r"):
equiv = f"other {op_desc_op} {typ}"
elif op_name == "divmod":
equiv = f"{op_name}({typ}, other)"
else:
equiv = f"{typ} {op_desc_op} other"
if typ == "series":
base_doc = _flex_doc_SERIES
if op_desc["reverse"]:
base_doc += _see_also_reverse_SERIES.format(
reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"]
)
doc_no_examples = base_doc.format(
desc=op_desc["desc"],
op_name=op_name,
equiv=equiv,
series_returns=op_desc["series_returns"],
)
ser_example = op_desc["series_examples"]
if ser_example:
doc = doc_no_examples + ser_example
else:
doc = doc_no_examples
elif typ == "dataframe":
if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]:
base_doc = _flex_comp_doc_FRAME
doc = _flex_comp_doc_FRAME.format(
op_name=op_name,
desc=op_desc["desc"],
)
else:
base_doc = _flex_doc_FRAME
doc = base_doc.format(
desc=op_desc["desc"],
op_name=op_name,
equiv=equiv,
reverse=op_desc["reverse"],
)
else:
raise AssertionError("Invalid typ argument.")
return doc
|
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
|
python
|
pandas/core/ops/docstrings.py
| 8
|
[
"op_name",
"typ"
] |
str
| true
| 12
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
listTopics
|
ListTopicsResult listTopics(ListTopicsOptions options);
|
List the topics available in the cluster.
@param options The options to use when listing the topics.
@return The ListTopicsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 284
|
[
"options"
] |
ListTopicsResult
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
canRetry
|
synchronized boolean canRetry(ProduceResponse.PartitionResponse response, ProducerBatch batch) {
Errors error = response.error;
// An UNKNOWN_PRODUCER_ID means that we have lost the producer state on the broker. Depending on the log start
// offset, we may want to retry these, as described for each case below. If none of those apply, then for the
// idempotent producer, we will locally bump the epoch and reset the sequence numbers of in-flight batches from
// sequence 0, then retry the failed batch, which should now succeed. For the transactional producer, allow the
// batch to fail. When processing the failed batch, we will transition to an abortable error and set a flag
// indicating that we need to bump the epoch (if supported by the broker).
if (error == Errors.UNKNOWN_PRODUCER_ID) {
if (response.logStartOffset == -1) {
// We don't know the log start offset with this response. We should just retry the request until we get it.
// The UNKNOWN_PRODUCER_ID error code was added along with the new ProduceResponse which includes the
// logStartOffset. So the '-1' sentinel is not for backward compatibility. Instead, it is possible for
// a broker to not know the logStartOffset at when it is returning the response because the partition
// may have moved away from the broker from the time the error was initially raised to the time the
// response was being constructed. In these cases, we should just retry the request: we are guaranteed
// to eventually get a logStartOffset once things settle down.
return true;
}
if (batch.sequenceHasBeenReset()) {
// When the first inflight batch fails due to the truncation case, then the sequences of all the other
// in flight batches would have been restarted from the beginning. However, when those responses
// come back from the broker, they would also come with an UNKNOWN_PRODUCER_ID error. In this case, we should not
// reset the sequence numbers to the beginning.
return true;
} else if (lastAckedOffset(batch.topicPartition).orElse(TxnPartitionEntry.NO_LAST_ACKED_SEQUENCE_NUMBER) < response.logStartOffset) {
// The head of the log has been removed, probably due to the retention time elapsing. In this case,
// we expect to lose the producer state. For the transactional producer, reset the sequences of all
// inflight batches to be from the beginning and retry them, so that the transaction does not need to
// be aborted. For the idempotent producer, bump the epoch to avoid reusing (sequence, epoch) pairs
if (isTransactional()) {
txnPartitionMap.startSequencesAtBeginning(batch.topicPartition, this.producerIdAndEpoch);
} else {
requestIdempotentEpochBumpForPartition(batch.topicPartition);
}
return true;
}
if (!isTransactional()) {
// For the idempotent producer, always retry UNKNOWN_PRODUCER_ID errors. If the batch has the current
// producer ID and epoch, request a bump of the epoch. Otherwise just retry the produce.
requestIdempotentEpochBumpForPartition(batch.topicPartition);
return true;
}
} else if (error == Errors.OUT_OF_ORDER_SEQUENCE_NUMBER) {
if (!hasUnresolvedSequence(batch.topicPartition) &&
(batch.sequenceHasBeenReset() || !isNextSequence(batch.topicPartition, batch.baseSequence()))) {
// We should retry the OutOfOrderSequenceException if the batch is _not_ the next batch, ie. its base
// sequence isn't the lastAckedSequence + 1.
return true;
} else if (!isTransactional()) {
// For the idempotent producer, retry all OUT_OF_ORDER_SEQUENCE_NUMBER errors. If there are no
// unresolved sequences, or this batch is the one immediately following an unresolved sequence, we know
// there is actually a gap in the sequences, and we bump the epoch. Otherwise, retry without bumping
// and wait to see if the sequence resolves
if (!hasUnresolvedSequence(batch.topicPartition) ||
isNextSequenceForUnresolvedPartition(batch.topicPartition, batch.baseSequence())) {
requestIdempotentEpochBumpForPartition(batch.topicPartition);
}
return true;
}
}
// If neither of the above cases are true, retry if the exception is retriable
return error.exception() instanceof RetriableException;
}
|
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
the lowest sequence number.
@return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
If there are no inflight requests being tracked for this partition, this method will return
RecordBatch.NO_SEQUENCE.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,017
|
[
"response",
"batch"
] | true
| 14
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
na_value_for_dtype
|
def na_value_for_dtype(dtype: DtypeObj, compat: bool = True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype("int64"))
0
>>> na_value_for_dtype(np.dtype("int64"), compat=False)
nan
>>> na_value_for_dtype(np.dtype("float64"))
nan
>>> na_value_for_dtype(np.dtype("complex128"))
nan
>>> na_value_for_dtype(np.dtype("bool"))
False
>>> na_value_for_dtype(np.dtype("datetime64[ns]"))
np.datetime64('NaT')
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif dtype.kind in "mM":
unit = np.datetime_data(dtype)[0]
return dtype.type("NaT", unit)
elif dtype.kind in "fc":
return np.nan
elif dtype.kind in "iu":
if compat:
return 0
return np.nan
elif dtype.kind == "b":
if compat:
return False
return np.nan
return np.nan
|
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype("int64"))
0
>>> na_value_for_dtype(np.dtype("int64"), compat=False)
nan
>>> na_value_for_dtype(np.dtype("float64"))
nan
>>> na_value_for_dtype(np.dtype("complex128"))
nan
>>> na_value_for_dtype(np.dtype("bool"))
False
>>> na_value_for_dtype(np.dtype("datetime64[ns]"))
np.datetime64('NaT')
|
python
|
pandas/core/dtypes/missing.py
| 603
|
[
"dtype",
"compat"
] | true
| 8
| 7.36
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
install_gcc_via_conda
|
def install_gcc_via_conda() -> str:
"""On older systems, this is a quick way to get a modern compiler"""
prefix = os.path.join(cache_dir(), "gcc")
cxx_path = os.path.join(prefix, "bin", "g++")
if not os.path.exists(cxx_path):
log.info("Downloading GCC via conda")
conda = os.environ.get("CONDA_EXE", "conda")
if conda is None:
conda = shutil.which("conda")
if conda is not None:
subprocess.check_call(
[
conda,
"create",
f"--prefix={prefix}",
"--channel=conda-forge",
"--quiet",
"-y",
"python=3.8",
"gxx",
],
stdout=subprocess.PIPE,
)
return cxx_path
|
On older systems, this is a quick way to get a modern compiler
|
python
|
torch/_inductor/cpp_builder.py
| 108
|
[] |
str
| true
| 4
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
validate_docs_version
|
def validate_docs_version() -> None:
"""
Validate the versions of documentation packages in the specified directory.
This script checks the versions of documentation packages in the published directory
when we publish and add back-references to the documentation. the directory is expected to be structured like:
docs-archive/
apache-airflow/
1.10.0/
stable/
stable.txt
apache-airflow-providers-standard/
2.0.0/
stable/
stable.txt
If anything found apart from the expected structure, it will cause error to redirects urls or publishing the documentation to s3
"""
doc_packages = os.listdir(AIRFLOW_SITE_DIRECTORY)
if not doc_packages:
console.print("[red]No documentation packages found in the specified directory.[/red]")
return
package_version_map = {}
for package in doc_packages:
if package in PACKAGES_METADATA_EXCLUDE_NAMES:
console.print(f"[yellow]Skipping excluded package: {package}[/yellow]")
continue
package_path = os.path.join(str(AIRFLOW_SITE_DIRECTORY), package)
versions = [v for v in os.listdir(package_path) if v != "stable" and v != "stable.txt"]
if versions:
package_version_map[package] = get_all_versions(package, versions)
if error_versions:
console.print("[red]Errors found in version validation:[/red]")
for error in error_versions:
console.print(f"[red]{error}[/red]")
console.print(
"[blue]These errors could be due to invalid redirects present in the doc packages.[/blue]"
)
sys.exit(1)
console.print("[green]All versions validated successfully![/green]")
console.print(f"[blue] {json.dumps(package_version_map, indent=2)} [/blue]")
|
Validate the versions of documentation packages in the specified directory.
This script checks the versions of documentation packages in the published directory
when we publish and add back-references to the documentation. the directory is expected to be structured like:
docs-archive/
apache-airflow/
1.10.0/
stable/
stable.txt
apache-airflow-providers-standard/
2.0.0/
stable/
stable.txt
If anything found apart from the expected structure, it will cause error to redirects urls or publishing the documentation to s3
|
python
|
dev/breeze/src/airflow_breeze/utils/docs_version_validation.py
| 39
|
[] |
None
| true
| 8
| 6.24
|
apache/airflow
| 43,597
|
unknown
| false
|
getBeans
|
private static Map<String, Object> getBeans(ListableBeanFactory beanFactory, @Nullable String qualifier) {
Map<String, Object> beans = new LinkedHashMap<>();
beans.putAll(getBeans(beanFactory, Printer.class, qualifier));
beans.putAll(getBeans(beanFactory, Parser.class, qualifier));
beans.putAll(getBeans(beanFactory, Formatter.class, qualifier));
beans.putAll(getBeans(beanFactory, Converter.class, qualifier));
beans.putAll(getBeans(beanFactory, ConverterFactory.class, qualifier));
beans.putAll(getBeans(beanFactory, GenericConverter.class, qualifier));
return beans;
}
|
Add {@link Printer}, {@link Parser}, {@link Formatter}, {@link Converter},
{@link ConverterFactory}, {@link GenericConverter}, and beans from the specified
bean factory.
@param registry the service to register beans with
@param beanFactory the bean factory to get the beans from
@param qualifier the qualifier required on the beans or {@code null}
@return the beans that were added
@since 3.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/convert/ApplicationConversionService.java
| 333
|
[
"beanFactory",
"qualifier"
] | true
| 1
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
names
|
public JSONArray names() {
return this.nameValuePairs.isEmpty() ? null : new JSONArray(new ArrayList<>(this.nameValuePairs.keySet()));
}
|
Returns an array containing the string names in this object. This method returns
null if this object contains no mappings.
@return the array
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 688
|
[] |
JSONArray
| true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
size
|
public int size() {
return bitSet.size();
}
|
Returns the number of bits of space actually in use by this {@link BitSet} to represent bit values. The maximum
element in the set is the size - 1st element.
@return the number of bits currently in this bit set.
|
java
|
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
| 509
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
asSupplier
|
public static <T> Supplier<T> asSupplier(final FailableSupplier<T, ?> supplier) {
return () -> get(supplier);
}
|
Converts the given {@link FailableSupplier} into a standard {@link Supplier}.
@param <T> the type supplied by the suppliers
@param supplier a {@link FailableSupplier}
@return a standard {@link Supplier}
|
java
|
src/main/java/org/apache/commons/lang3/function/Failable.java
| 383
|
[
"supplier"
] | true
| 1
| 6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
openBufferedStream
|
public InputStream openBufferedStream() throws IOException {
InputStream in = openStream();
return (in instanceof BufferedInputStream)
? (BufferedInputStream) in
: new BufferedInputStream(in);
}
|
Opens a new buffered {@link InputStream} for reading from this source. The returned stream is
not required to be a {@link BufferedInputStream} in order to allow implementations to simply
delegate to {@link #openStream()} when the stream returned by that method does not benefit from
additional buffering (for example, a {@code ByteArrayInputStream}). This method returns a new,
independent stream each time it is called.
<p>The caller is responsible for ensuring that the returned stream is closed.
@throws IOException if an I/O error occurs while opening the stream
@since 15.0 (in 14.0 with return type {@link BufferedInputStream})
|
java
|
android/guava/src/com/google/common/io/ByteSource.java
| 118
|
[] |
InputStream
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
copyCustomPrologue
|
function copyCustomPrologue(source: readonly Statement[], target: Statement[], statementOffset: number | undefined, visitor?: (node: Node) => VisitResult<Node>, filter: (node: Statement) => boolean = returnTrue): number | undefined {
const numStatements = source.length;
while (statementOffset !== undefined && statementOffset < numStatements) {
const statement = source[statementOffset];
if (getEmitFlags(statement) & EmitFlags.CustomPrologue && filter(statement)) {
append(target, visitor ? visitNode(statement, visitor, isStatement) : statement);
}
else {
break;
}
statementOffset++;
}
return statementOffset;
}
|
Copies only the custom prologue-directives into target statement-array.
@param source origin statements array
@param target result statements array
@param statementOffset The offset at which to begin the copy.
@param visitor Optional callback used to visit any custom prologue directives.
|
typescript
|
src/compiler/factory/nodeFactory.ts
| 6,924
|
[
"source",
"target",
"statementOffset",
"visitor?",
"filter"
] | true
| 7
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
if (!hasTransaction()) {
return "";
}
return producerId + ":" + epoch;
}
|
Returns a serialized string representation of this transaction state.
The format is "producerId:epoch" for an initialized state, or an empty string
for an uninitialized state (where producerId and epoch are both -1).
@return a serialized string representation
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/PreparedTxnState.java
| 105
|
[] |
String
| true
| 2
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
hexRingSize
|
public static int hexRingSize(long h3) {
return H3Index.H3_is_pentagon(h3) ? 5 : 6;
}
|
Returns the number of neighbor indexes.
@param h3 Origin index
@return the number of neighbor indexes from the origin
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 379
|
[
"h3"
] | true
| 2
| 8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
byBrokerId
|
public KafkaFuture<Map<Integer, KafkaFuture<Collection<TransactionListing>>>> byBrokerId() {
KafkaFutureImpl<Map<Integer, KafkaFuture<Collection<TransactionListing>>>> result = new KafkaFutureImpl<>();
future.whenComplete((brokerFutures, exception) -> {
if (brokerFutures != null) {
Map<Integer, KafkaFuture<Collection<TransactionListing>>> brokerFuturesCopy =
new HashMap<>(brokerFutures.size());
brokerFuturesCopy.putAll(brokerFutures);
result.complete(brokerFuturesCopy);
} else {
result.completeExceptionally(exception);
}
});
return result;
}
|
Get a future which returns a map containing the underlying listing future for each broker
in the cluster. This is useful, for example, if a partial listing of transactions is
sufficient, or if you want more granular error details.
@return A future containing a map of futures by broker which complete individually when
their respective transaction listings are available. The top-level future returned
from this method may fail if the admin client is unable to lookup the available
brokers in the cluster.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java
| 68
|
[] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
removeRaftVoter
|
@Override
public RemoveRaftVoterResult removeRaftVoter(
int voterId,
Uuid voterDirectoryId,
RemoveRaftVoterOptions options
) {
NodeProvider provider = new LeastLoadedBrokerOrActiveKController();
final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
final long now = time.milliseconds();
final Call call = new Call(
"removeRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) {
@Override
RemoveRaftVoterRequest.Builder createRequest(int timeoutMs) {
return new RemoveRaftVoterRequest.Builder(
new RemoveRaftVoterRequestData().
setClusterId(options.clusterId().orElse(null)).
setVoterId(voterId).
setVoterDirectoryId(voterDirectoryId));
}
@Override
void handleResponse(AbstractResponse response) {
handleNotControllerError(response);
RemoveRaftVoterResponse addResponse = (RemoveRaftVoterResponse) response;
Errors error = Errors.forCode(addResponse.data().errorCode());
if (error != Errors.NONE)
future.completeExceptionally(error.exception(addResponse.data().errorMessage()));
else
future.complete(null);
}
@Override
void handleFailure(Throwable throwable) {
future.completeExceptionally(throwable);
}
};
runnable.call(call, now);
return new RemoveRaftVoterResult(future);
}
|
Forcefully terminates an ongoing transaction for a given transactional ID.
<p>
This API is intended for well-formed but long-running transactions that are known to the
transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows,
where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed.
</p>
@param transactionalId The transactional ID whose active transaction should be forcefully terminated.
@return a {@link TerminateTransactionResult} that can be used to await the operation result.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 5,013
|
[
"voterId",
"voterDirectoryId",
"options"
] |
RemoveRaftVoterResult
| true
| 2
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
start_go_pipeline
|
def start_go_pipeline(
self,
variables: dict,
go_file: str,
process_line_callback: Callable[[str], None] | None = None,
should_init_module: bool = False,
) -> None:
"""
Start Apache Beam Go pipeline with a source file.
:param variables: Variables passed to the job.
:param go_file: Path to the Go file with your beam pipeline.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
:param should_init_module: If False (default), will just execute a `go run` command. If True, will
init a module and dependencies with a ``go mod init`` and ``go mod tidy``, useful when pulling
source with GCSHook.
:return:
"""
if shutil.which("go") is None:
raise AirflowConfigException(
"You need to have Go installed to run beam go pipeline. See https://go.dev/doc/install "
"installation guide. If you are running airflow in Docker see more info at "
"'https://airflow.apache.org/docs/docker-stack/recipes.html'."
)
try:
from airflow.providers.google.go_module_utils import init_module, install_dependencies
except ImportError:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(
"Failed to import apache-airflow-google-provider. To start a go pipeline, please install the"
" google provider."
)
if "labels" in variables:
variables["labels"] = json.dumps(variables["labels"], separators=(",", ":"))
working_directory = os.path.dirname(go_file)
basename = os.path.basename(go_file)
if should_init_module:
init_module("main", working_directory)
install_dependencies(working_directory)
command_prefix = ["go", "run", basename]
self._start_pipeline(
variables=variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
working_directory=working_directory,
)
|
Start Apache Beam Go pipeline with a source file.
:param variables: Variables passed to the job.
:param go_file: Path to the Go file with your beam pipeline.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
:param should_init_module: If False (default), will just execute a `go run` command. If True, will
init a module and dependencies with a ``go mod init`` and ``go mod tidy``, useful when pulling
source with GCSHook.
:return:
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 352
|
[
"self",
"variables",
"go_file",
"process_line_callback",
"should_init_module"
] |
None
| true
| 4
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
resolveArgumentValue
|
private ValueHolder resolveArgumentValue(BeanDefinitionValueResolver resolver, ValueHolder valueHolder) {
if (valueHolder.isConverted()) {
return valueHolder;
}
Object value = resolver.resolveValueIfNecessary("constructor argument", valueHolder.getValue());
ValueHolder resolvedHolder = new ValueHolder(value, valueHolder.getType(), valueHolder.getName());
resolvedHolder.setSource(valueHolder);
return resolvedHolder;
}
|
Resolve arguments for the specified registered bean.
@param registeredBean the registered bean
@return the resolved constructor or factory method arguments
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
| 318
|
[
"resolver",
"valueHolder"
] |
ValueHolder
| true
| 2
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
lookupTopicId
|
private TopicIdPartition lookupTopicId(Uuid topicId, int partitionIndex) {
String topicName = metadata.topicNames().get(topicId);
if (topicName == null) {
topicName = topicNamesMap.remove(new IdAndPartition(topicId, partitionIndex));
}
if (topicName == null) {
log.error("Topic name not found in metadata for topicId {} and partitionIndex {}", topicId, partitionIndex);
return null;
}
return new TopicIdPartition(topicId, partitionIndex, topicName);
}
|
The method checks whether the leader for a topicIdPartition has changed.
@param nodeId The previous leader for the partition.
@param topicIdPartition The TopicIdPartition to check.
@return Returns true if leader information is available and leader has changed.
If the leader information is not available or if the leader has not changed, it returns false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,112
|
[
"topicId",
"partitionIndex"
] |
TopicIdPartition
| true
| 3
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeFrom
|
FileTime removeFrom(FileTime time) {
return FileTime.fromMillis(removeFrom(time.toMillis()));
}
|
Remove the default offset from the given time.
@param time the time to remove the default offset from
@return the time with the default offset removed
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/DefaultTimeZoneOffset.java
| 45
|
[
"time"
] |
FileTime
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
accept
|
public static <T> void accept(final Consumer<T> consumer, final T object) {
if (consumer != null) {
consumer.accept(object);
}
}
|
Applies the given {@link Consumer} action to the object if the consumer is not {@code null}. Otherwise, does nothing.
@param consumer the consumer to consume.
@param object the object to be consumed.
@param <T> the type of the argument the consumer accepts.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/function/Consumers.java
| 42
|
[
"consumer",
"object"
] |
void
| true
| 2
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
partitionEndOffset
|
public synchronized Long partitionEndOffset(TopicPartition tp, IsolationLevel isolationLevel) {
TopicPartitionState topicPartitionState = assignedState(tp);
if (isolationLevel == IsolationLevel.READ_COMMITTED) {
return topicPartitionState.lastStableOffset;
} else {
return topicPartitionState.highWatermark;
}
}
|
Attempt to complete validation with the end offset returned from the OffsetForLeaderEpoch request.
@return Log truncation details if detected and no reset policy is defined.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 651
|
[
"tp",
"isolationLevel"
] |
Long
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
copy
|
@CanIgnoreReturnValue
public static long copy(Readable from, Appendable to) throws IOException {
// The most common case is that from is a Reader (like InputStreamReader or StringReader) so
// take advantage of that.
if (from instanceof Reader) {
// optimize for common output types which are optimized to deal with char[]
if (to instanceof StringBuilder) {
return copyReaderToBuilder((Reader) from, (StringBuilder) to);
} else {
return copyReaderToWriter((Reader) from, asWriter(to));
}
}
checkNotNull(from);
checkNotNull(to);
long total = 0;
CharBuffer buf = createBuffer();
while (from.read(buf) != -1) {
Java8Compatibility.flip(buf);
to.append(buf);
total += buf.remaining();
Java8Compatibility.clear(buf);
}
return total;
}
|
Copies all characters between the {@link Readable} and {@link Appendable} objects. Does not
close or flush either object.
@param from the object to read from
@param to the object to write to
@return the number of characters copied
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/CharStreams.java
| 65
|
[
"from",
"to"
] | true
| 4
| 8.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
findMethodWithMinimalParameters
|
public static @Nullable Method findMethodWithMinimalParameters(Method[] methods, String methodName)
throws IllegalArgumentException {
Method targetMethod = null;
int numMethodsFoundWithCurrentMinimumArgs = 0;
for (Method method : methods) {
if (method.getName().equals(methodName)) {
int numParams = method.getParameterCount();
if (targetMethod == null || numParams < targetMethod.getParameterCount()) {
targetMethod = method;
numMethodsFoundWithCurrentMinimumArgs = 1;
}
else if (!method.isBridge() && targetMethod.getParameterCount() == numParams) {
if (targetMethod.isBridge()) {
// Prefer regular method over bridge...
targetMethod = method;
}
else {
// Additional candidate with same length
numMethodsFoundWithCurrentMinimumArgs++;
}
}
}
}
if (numMethodsFoundWithCurrentMinimumArgs > 1) {
throw new IllegalArgumentException("Cannot resolve method '" + methodName +
"' to a unique method. Attempted to resolve to overloaded method with " +
"the least number of parameters but there were " +
numMethodsFoundWithCurrentMinimumArgs + " candidates.");
}
return targetMethod;
}
|
Find a method with the given method name and minimal parameters (best case: none)
in the given list of methods.
@param methods the methods to check
@param methodName the name of the method to find
@return the Method object, or {@code null} if not found
@throws IllegalArgumentException if methods of the given name were found but
could not be resolved to a unique method with minimal parameters
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 401
|
[
"methods",
"methodName"
] |
Method
| true
| 8
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
automaticConfigProvidersFilter
|
private Predicate<String> automaticConfigProvidersFilter() {
String systemProperty = System.getProperty(AUTOMATIC_CONFIG_PROVIDERS_PROPERTY);
if (systemProperty == null) {
return ignored -> true;
} else {
return Arrays.stream(systemProperty.split(","))
.map(String::trim)
.collect(Collectors.toSet())::contains;
}
}
|
Instantiates given list of config providers and fetches the actual values of config variables from the config providers.
returns a map of config key and resolved values.
@param configProviderProps The map of config provider configs
@param originals The map of raw configs.
@return map of resolved config variable.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 567
|
[] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getSymlinkCache
|
function getSymlinkCache(): SymlinkCache {
if (host.getSymlinkCache) {
return host.getSymlinkCache();
}
if (!symlinks) {
symlinks = createSymlinkCache(currentDirectory, getCanonicalFileName);
}
if (files && !symlinks.hasProcessedResolutions()) {
symlinks.setSymlinksFromResolutions(forEachResolvedModule, forEachResolvedTypeReferenceDirective, automaticTypeDirectiveResolutions);
}
return symlinks;
}
|
Get the referenced project if the file is input file from that reference project
|
typescript
|
src/compiler/program.ts
| 4,751
|
[] | true
| 5
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_get_hooks_with_mocked_fab
|
def _get_hooks_with_mocked_fab() -> tuple[
MutableMapping[str, HookInfo | None], dict[str, ConnectionFormWidgetInfo], dict[str, dict]
]:
"""Get hooks with all details w/o FAB needing to be installed."""
from unittest import mock
from airflow.providers_manager import ProvidersManager
def mock_lazy_gettext(txt: str) -> str:
"""Mock for flask_babel.lazy_gettext."""
return txt
def mock_any_of(allowed_values: list) -> HookMetaService.MockEnum:
"""Mock for wtforms.validators.any_of."""
return HookMetaService.MockEnum(allowed_values)
# Before importing ProvidersManager, we need to mock all FAB and WTForms
# dependencies to avoid ImportErrors when FAB is not installed.
import sys
from importlib.util import find_spec
from unittest.mock import MagicMock
for mod_name in [
"wtforms",
"wtforms.csrf",
"wtforms.fields",
"wtforms.fields.simple",
"wtforms.validators",
"flask_babel",
"flask_appbuilder",
"flask_appbuilder.fieldwidgets",
]:
try:
if not find_spec(mod_name):
raise ModuleNotFoundError
except ModuleNotFoundError:
sys.modules[mod_name] = MagicMock()
with (
mock.patch("wtforms.StringField", HookMetaService.MockStringField),
mock.patch("wtforms.fields.StringField", HookMetaService.MockStringField),
mock.patch("wtforms.fields.simple.StringField", HookMetaService.MockStringField),
mock.patch("wtforms.IntegerField", HookMetaService.MockIntegerField),
mock.patch("wtforms.fields.IntegerField", HookMetaService.MockIntegerField),
mock.patch("wtforms.PasswordField", HookMetaService.MockPasswordField),
mock.patch("wtforms.BooleanField", HookMetaService.MockBooleanField),
mock.patch("wtforms.fields.BooleanField", HookMetaService.MockBooleanField),
mock.patch("wtforms.fields.simple.BooleanField", HookMetaService.MockBooleanField),
mock.patch("flask_babel.lazy_gettext", mock_lazy_gettext),
mock.patch("flask_appbuilder.fieldwidgets.BS3TextFieldWidget", HookMetaService.MockAnyWidget),
mock.patch("flask_appbuilder.fieldwidgets.BS3TextAreaFieldWidget", HookMetaService.MockAnyWidget),
mock.patch("flask_appbuilder.fieldwidgets.BS3PasswordFieldWidget", HookMetaService.MockAnyWidget),
mock.patch("wtforms.validators.Optional", HookMetaService.MockOptional),
mock.patch("wtforms.validators.any_of", mock_any_of),
):
pm = ProvidersManager()
return pm.hooks, pm.connection_form_widgets, pm.field_behaviours # Will init providers hooks
|
Get hooks with all details w/o FAB needing to be installed.
|
python
|
airflow-core/src/airflow/api_fastapi/core_api/services/ui/connections.py
| 119
|
[] |
tuple[
MutableMapping[str, HookInfo | None], dict[str, ConnectionFormWidgetInfo], dict[str, dict]
]
| true
| 3
| 6.96
|
apache/airflow
| 43,597
|
unknown
| false
|
collectFetch
|
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) {
final Fetch<K, V> fetch = Fetch.empty();
final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final CompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null)
break;
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
// Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and
// (2) there are no fetched completedFetch with actual content preceding this exception.
// The first condition ensures that the completedFetches is not stuck with the same completedFetch
// in cases such as the TopicAuthorizationException, and the second condition ensures that no
// potential data loss due to an exception in a following record.
if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0)
fetchBuffer.poll();
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else if (subscriptions.isPaused(nextInLineFetch.partition)) {
// when the partition is paused we add the records back to the completedFetches queue instead of draining
// them so that they can be returned on a subsequent poll if the partition is resumed at that time
log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition);
pausedCompletedFetches.add(nextInLineFetch);
fetchBuffer.setNextInLineFetch(null);
} else {
final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining);
recordsRemaining -= nextFetch.numRecords();
fetch.add(nextFetch);
}
}
} catch (KafkaException e) {
if (fetch.isEmpty())
throw e;
} finally {
// add any polled completed fetches for paused partitions back to the completed fetches queue to be
// re-evaluated in the next poll
fetchBuffer.addAll(pausedCompletedFetches);
}
return fetch;
}
|
Return the fetched {@link ConsumerRecord records}, empty the {@link FetchBuffer record buffer}, and
update the consumed position.
</p>
NOTE: returning an {@link Fetch#empty() empty} fetch guarantees the consumed position is not updated.
@param fetchBuffer {@link FetchBuffer} from which to retrieve the {@link ConsumerRecord records}
@return A {@link Fetch} for the requested partitions
@throws OffsetOutOfRangeException If there is OffsetOutOfRange error in fetchResponse and
the defaultResetPolicy is NONE
@throws TopicAuthorizationException If there is TopicAuthorization error in fetchResponse.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java
| 92
|
[
"fetchBuffer"
] | true
| 12
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
leggauss
|
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = pu._as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0] * deg + [1])
m = legcompanion(c)
x = np.linalg.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy / df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1 / (fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
|
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
|
python
|
numpy/polynomial/legendre.py
| 1,470
|
[
"deg"
] | false
| 2
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
hashCode
|
@Override
public int hashCode() {
int normalizedScale = normalizeScale(index(), scale);
int scaleAdjustment = normalizedScale - scale;
long normalizedIndex = adjustScale(index(), scale, scaleAdjustment);
int result = normalizedScale;
result = 31 * result + Long.hashCode(normalizedIndex);
result = 31 * result + Double.hashCode(zeroThreshold());
result = 31 * result + Long.hashCode(count);
return result;
}
|
Collapses all buckets from the given iterator whose lower boundaries are smaller than the zero threshold.
The iterator is advanced to point at the first, non-collapsed bucket.
@param buckets The iterator whose buckets may be collapsed.
@return A potentially updated {@link ZeroBucket} with the collapsed buckets' counts and an adjusted threshold.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
| 279
|
[] | true
| 1
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
refresh_from_db
|
def refresh_from_db(
self, session: Session = NEW_SESSION, lock_for_update: bool = False, keep_local_changes: bool = False
) -> None:
"""
Refresh the task instance from the database based on the primary key.
:param session: SQLAlchemy ORM Session
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
:param keep_local_changes: Force all attributes to the values from the database if False (the default),
or if True don't overwrite locally set attributes
"""
query = select(
# Select the columns, not the ORM object, to bypass any session/ORM caching layer
*TaskInstance.__table__.columns
).filter_by(
dag_id=self.dag_id,
run_id=self.run_id,
task_id=self.task_id,
map_index=self.map_index,
)
if lock_for_update:
query = query.with_for_update()
source = session.execute(query).mappings().one_or_none()
if source:
target_state: Any = inspect(self)
if target_state is None:
raise RuntimeError(f"Unable to inspect SQLAlchemy state of {type(self)}: {self}")
# To deal with `@hybrid_property` we need to get the names from `mapper.columns`
for attr_name, col in target_state.mapper.columns.items():
if keep_local_changes and target_state.attrs[attr_name].history.has_changes():
continue
set_committed_value(self, attr_name, source[col.name])
# ID may have changed, update SQLAs state and object tracking
newkey = session.identity_key(type(self), (self.id,))
# Delete anything under the new key
if newkey != target_state.key:
old = session.identity_map.get(newkey)
if old is not self and old is not None:
session.expunge(old)
target_state.key = newkey
if target_state.attrs.dag_run.loaded_value is not NO_VALUE:
dr_key = session.identity_key(type(self.dag_run), (self.dag_run.id,))
if (dr := session.identity_map.get(dr_key)) is not None:
set_committed_value(self, "dag_run", dr)
else:
self.state = None
|
Refresh the task instance from the database based on the primary key.
:param session: SQLAlchemy ORM Session
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
:param keep_local_changes: Force all attributes to the values from the database if False (the default),
or if True don't overwrite locally set attributes
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 670
|
[
"self",
"session",
"lock_for_update",
"keep_local_changes"
] |
None
| true
| 13
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
compareModuleSpecifierRelativity
|
function compareModuleSpecifierRelativity(a: ImportFixWithModuleSpecifier, b: ImportFixWithModuleSpecifier, preferences: UserPreferences): Comparison {
if (preferences.importModuleSpecifierPreference === "non-relative" || preferences.importModuleSpecifierPreference === "project-relative") {
return compareBooleans(a.moduleSpecifierKind === "relative", b.moduleSpecifierKind === "relative");
}
return Comparison.EqualTo;
}
|
@returns `Comparison.LessThan` if `a` is better than `b`.
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,442
|
[
"a",
"b",
"preferences"
] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getFormattedExceptionMessage
|
@Override
public String getFormattedExceptionMessage(final String baseMessage) {
final StringBuilder buffer = new StringBuilder(256);
if (baseMessage != null) {
buffer.append(baseMessage);
}
if (!contextValues.isEmpty()) {
if (buffer.length() > 0) {
buffer.append('\n');
}
buffer.append("Exception Context:\n");
int i = 0;
for (final Pair<String, Object> pair : contextValues) {
buffer.append("\t[");
buffer.append(++i);
buffer.append(':');
buffer.append(pair.getKey());
buffer.append("=");
final Object value = pair.getValue();
try {
buffer.append(Objects.toString(value));
} catch (final Exception e) {
buffer.append("Exception thrown on toString(): ");
buffer.append(ExceptionUtils.getStackTrace(e));
}
buffer.append("]\n");
}
buffer.append("---------------------------------");
}
return buffer.toString();
}
|
Builds the message containing the contextual information.
@param baseMessage the base exception message <strong>without</strong> context information appended
@return the exception message <strong>with</strong> context information appended, never null
|
java
|
src/main/java/org/apache/commons/lang3/exception/DefaultExceptionContext.java
| 104
|
[
"baseMessage"
] |
String
| true
| 5
| 7.12
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
iterator
|
@Override
public Iterator<E> iterator() {
return new QueueIterator();
}
|
Returns an iterator over the elements contained in this collection, <i>in no particular
order</i>.
<p>The iterator is <i>fail-fast</i>: If the MinMaxPriorityQueue is modified at any time after
the iterator is created, in any way except through the iterator's own remove method, the
iterator will generally throw a {@link ConcurrentModificationException}. Thus, in the face of
concurrent modification, the iterator fails quickly and cleanly, rather than risking arbitrary,
non-deterministic behavior at an undetermined time in the future.
<p>Note that the fail-fast behavior of an iterator cannot be guaranteed as it is, generally
speaking, impossible to make any hard guarantees in the presence of unsynchronized concurrent
modification. Fail-fast iterators throw {@code ConcurrentModificationException} on a
best-effort basis. Therefore, it would be wrong to write a program that depended on this
exception for its correctness: <i>the fail-fast behavior of iterators should be used only to
detect bugs.</i>
@return an iterator over the elements contained in this collection
|
java
|
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
| 899
|
[] | true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
|
fit
|
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
super()._fit_indicator(self._mask_fit_X)
return self
|
Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
|
python
|
sklearn/impute/_knn.py
| 214
|
[
"self",
"X",
"y"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
getMissingCache
|
@Override
protected @Nullable Cache getMissingCache(String name) {
CacheManager cacheManager = getCacheManager();
Assert.state(cacheManager != null, "No CacheManager set");
// Check the JCache cache again (in case the cache was added at runtime)
javax.cache.Cache<Object, Object> jcache = cacheManager.getCache(name);
if (jcache != null) {
return new JCacheCache(jcache, isAllowNullValues());
}
return null;
}
|
Return whether this cache manager accepts and converts {@code null} values
for all of its caches.
|
java
|
spring-context-support/src/main/java/org/springframework/cache/jcache/JCacheCacheManager.java
| 120
|
[
"name"
] |
Cache
| true
| 2
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
readStaticField
|
public static Object readStaticField(final Class<?> cls, final String fieldName, final boolean forceAccess) throws IllegalAccessException {
final Field field = getField(cls, fieldName, forceAccess);
Validate.notNull(field, "Cannot locate field '%s' on %s", fieldName, cls);
// already forced access above, don't repeat it here:
return readStaticField(field, false);
}
|
Reads the named {@code static} {@link Field}. Superclasses will be considered.
@param cls
the {@link Class} to reflect, must not be {@code null}.
@param fieldName
the field name to obtain.
@param forceAccess
whether to break scope restrictions using the
{@link AccessibleObject#setAccessible(boolean)} method. {@code false} will only
match {@code public} fields.
@return the Field object.
@throws NullPointerException
if the class is {@code null}, or the field could not be found.
@throws IllegalArgumentException
if the field name is {@code null}, blank or empty, or is not {@code static}.
@throws IllegalAccessException
if the field is not made accessible.
@throws SecurityException if an underlying accessible object's method denies the request.
@see SecurityManager#checkPermission
|
java
|
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
| 498
|
[
"cls",
"fieldName",
"forceAccess"
] |
Object
| true
| 1
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createAcls
|
CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options);
|
Creates access control lists (ACLs) which are bound to specific resources.
<p>
This operation is not transactional so it may succeed for some ACLs while fail for others.
<p>
If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
no changes will be made.
<p>
This operation is supported by brokers with version 0.11.0.0 or higher.
@param acls The ACLs to create
@param options The options to use when creating the ACLs.
@return The CreateAclsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 411
|
[
"acls",
"options"
] |
CreateAclsResult
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
copySortDefinition
|
protected SortDefinition copySortDefinition(SortDefinition sort) {
return new MutableSortDefinition(sort);
}
|
Create a deep copy of the given sort definition,
for use as state holder to compare a modified sort definition against.
<p>Default implementation creates a MutableSortDefinition instance.
Can be overridden in subclasses, in particular in case of custom
extensions to the SortDefinition interface. Is allowed to return
null, which means that no sort state will be held, triggering
actual sorting for each {@code resort} call.
@param sort the current SortDefinition object
@return a deep copy of the SortDefinition object
@see MutableSortDefinition#MutableSortDefinition(SortDefinition)
|
java
|
spring-beans/src/main/java/org/springframework/beans/support/PagedListHolder.java
| 328
|
[
"sort"
] |
SortDefinition
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
wait_for_db_snapshot_state
|
def wait_for_db_snapshot_state(
self, snapshot_id: str, target_state: str, check_interval: int = 30, max_attempts: int = 40
) -> None:
"""
Poll DB Snapshots until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_snapshots`
:param snapshot_id: The ID of the target DB instance snapshot
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
"""
def poke():
return self.get_db_snapshot_state(snapshot_id)
target_state = target_state.lower()
if target_state in ("available", "deleted", "completed"):
waiter = self.conn.get_waiter(f"db_snapshot_{target_state}") # type: ignore
waiter.wait(
DBSnapshotIdentifier=snapshot_id,
WaiterConfig={"Delay": check_interval, "MaxAttempts": max_attempts},
)
else:
self._wait_for_state(poke, target_state, check_interval, max_attempts)
self.log.info("DB snapshot '%s' reached the '%s' state", snapshot_id, target_state)
|
Poll DB Snapshots until target_state is reached; raise AirflowException after max_attempts.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_snapshots`
:param snapshot_id: The ID of the target DB instance snapshot
:param target_state: Wait until this state is reached
:param check_interval: The amount of time in seconds to wait between attempts
:param max_attempts: The maximum number of attempts to be made
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/rds.py
| 70
|
[
"self",
"snapshot_id",
"target_state",
"check_interval",
"max_attempts"
] |
None
| true
| 3
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
addAdvisorInternal
|
private void addAdvisorInternal(int pos, Advisor advisor) throws AopConfigException {
Assert.notNull(advisor, "Advisor must not be null");
if (isFrozen()) {
throw new AopConfigException("Cannot add advisor: Configuration is frozen.");
}
if (pos > this.advisors.size()) {
throw new IllegalArgumentException(
"Illegal position " + pos + " in advisor list with size " + this.advisors.size());
}
this.advisors.add(pos, advisor);
adviceChanged();
}
|
Add all the given advisors to this proxy configuration.
@param advisors the advisors to register
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 403
|
[
"pos",
"advisor"
] |
void
| true
| 3
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
childRouteName
|
function childRouteName(child: AngularRoute): string {
if (child.component) {
return child.component.name;
} else if (child.loadChildren || child.loadComponent) {
return `${child.path} [Lazy]`;
} else {
return 'no-name-route';
}
}
|
Get the display name for a function or class.
@param fn - The function or class to get the name from
@param defaultName - Optional name to check against. If the function name matches this value,
'[Function]' is returned instead
@returns The formatted name: class name, function name with '()', or '[Function]' for anonymous/arrow functions
|
typescript
|
devtools/projects/ng-devtools-backend/src/lib/router-tree.ts
| 219
|
[
"child"
] | true
| 6
| 7.92
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
get_device_list
|
def get_device_list() -> Sequence[Optional[int]]:
"""
Gather the list of devices to be used in the pool.
"""
if not config.autotune_multi_device:
# Don't use multiple devices
return [None]
gpu_type = get_gpu_type()
device_interface = get_interface_for_device(gpu_type)
count = device_interface.device_count()
# If the user specified the visible devices in the env, use those.
if CUDA_VISIBLE_DEVICES in os.environ:
devices = [int(d) for d in os.environ[CUDA_VISIBLE_DEVICES].split(",")]
assert len(devices) <= count
return devices
return list(range(count))
|
Gather the list of devices to be used in the pool.
|
python
|
torch/_inductor/autotune_process.py
| 280
|
[] |
Sequence[Optional[int]]
| true
| 3
| 7.04
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
geoAzimuthRads
|
double geoAzimuthRads(double lat, double lon) {
// algorithm from the original H3 library
final double cosLat = FastMath.cos(lat);
return FastMath.atan2(
cosLat * FastMath.sin(lon - this.lon),
FastMath.cos(this.lat) * FastMath.sin(lat) - FastMath.sin(this.lat) * cosLat * FastMath.cos(lon - this.lon)
);
}
|
Determines the azimuth to the provided LatLng in radians.
@param lat The latitude in radians.
@param lon The longitude in radians.
@return The azimuth in radians.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java
| 74
|
[
"lat",
"lon"
] | true
| 1
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
put
|
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> import numpy as np
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError as e:
raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e
return put(ind, v, mode=mode)
|
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> import numpy as np
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
|
python
|
numpy/_core/fromnumeric.py
| 489
|
[
"a",
"ind",
"v",
"mode"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
acquireAndEnsureOpen
|
private void acquireAndEnsureOpen() {
acquire();
if (this.closed) {
release();
throw new IllegalStateException("This consumer has already been closed.");
}
}
|
Acquire the light lock and ensure that the consumer hasn't been closed.
@throws IllegalStateException If the consumer has been closed
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 2,073
|
[] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
read
|
@Override
public int read() throws IOException {
while (in != null) {
int result = in.read();
if (result != -1) {
return result;
}
advance();
}
return -1;
}
|
Closes the current input stream and opens the next one, if any.
|
java
|
android/guava/src/com/google/common/io/MultiInputStream.java
| 82
|
[] | true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
directives
|
function directives(node: TSESTree.Program | TSESTree.BlockStatement): TSESTree.Node[] {
return takeWhile(looksLikeDirective, node.body);
}
|
@param node a Program or BlockStatement node
@returns the leading sequence of directive nodes in the given node's body
|
typescript
|
.eslint-plugin-local/code-no-unused-expressions.ts
| 91
|
[
"node"
] | true
| 1
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
topicPartition
|
public TopicPartition topicPartition() {
return topicPartition;
}
|
@return Topic partition representing this instance.
|
java
|
clients/src/main/java/org/apache/kafka/common/TopicIdPartition.java
| 77
|
[] |
TopicPartition
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
getMethodsListWithAnnotation
|
public static List<Method> getMethodsListWithAnnotation(final Class<?> cls, final Class<? extends Annotation> annotationCls, final boolean searchSupers,
final boolean ignoreAccess) {
Objects.requireNonNull(cls, "cls");
Objects.requireNonNull(annotationCls, "annotationCls");
final List<Class<?>> classes = searchSupers ? getAllSuperclassesAndInterfaces(cls) : new ArrayList<>();
classes.add(0, cls);
final List<Method> annotatedMethods = new ArrayList<>();
classes.forEach(acls -> {
final Method[] methods = ignoreAccess ? acls.getDeclaredMethods() : acls.getMethods();
Stream.of(methods).filter(method -> method.isAnnotationPresent(annotationCls)).forEachOrdered(annotatedMethods::add);
});
return annotatedMethods;
}
|
Gets all methods of the given class that are annotated with the given annotation.
@param cls the {@link Class} to query.
@param annotationCls the {@link Annotation} that must be present on a method to be matched.
@param searchSupers determines if a lookup in the entire inheritance hierarchy of the given class should be performed.
@param ignoreAccess determines if non-public methods should be considered.
@return a list of Methods (possibly empty).
@throws NullPointerException if either the class or annotation class is {@code null}.
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 447
|
[
"cls",
"annotationCls",
"searchSupers",
"ignoreAccess"
] | true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
split
|
public ConfigDataLocation[] split(String delimiter) {
Assert.state(!this.value.isEmpty(), "Unable to split empty locations");
String[] values = StringUtils.delimitedListToStringArray(toString(), delimiter);
ConfigDataLocation[] result = new ConfigDataLocation[values.length];
for (int i = 0; i < values.length; i++) {
int index = i;
ConfigDataLocation configDataLocation = of(values[index]);
result[i] = configDataLocation.withOrigin(getOrigin());
}
return result;
}
|
Return an array of {@link ConfigDataLocation} elements built by splitting this
{@link ConfigDataLocation} around the specified delimiter.
@param delimiter the delimiter to split on
@return the split locations
@since 2.4.7
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataLocation.java
| 119
|
[
"delimiter"
] | true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
overlaps
|
def overlaps(self, other):
"""
Check elementwise if an Interval overlaps the values in the IntervalArray.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
Parameters
----------
other : IntervalArray
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
>>> data = [(0, 1), (1, 3), (2, 4)]
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, dtype: interval[int64, right]
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed="left"))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed="right"))
array([False, True, False])
"""
if isinstance(other, (IntervalArray, ABCIntervalIndex)):
raise NotImplementedError
if not isinstance(other, Interval):
msg = f"`other` must be Interval-like, got {type(other).__name__}"
raise TypeError(msg)
# equality is okay if both endpoints are closed (overlap at a point)
op1 = le if (self.closed_left and other.closed_right) else lt
op2 = le if (other.closed_left and self.closed_right) else lt
# overlaps is equivalent negation of two interval being disjoint:
# disjoint = (A.left > B.right) or (B.left > A.right)
# (simplifying the negation allows this to be done in less operations)
return op1(self.left, other.right) & op2(other.left, self.right)
|
Check elementwise if an Interval overlaps the values in the IntervalArray.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
Parameters
----------
other : IntervalArray
Interval to check against for an overlap.
Returns
-------
ndarray
Boolean array positionally indicating where an overlap occurs.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
Examples
--------
>>> data = [(0, 1), (1, 3), (2, 4)]
>>> intervals = pd.arrays.IntervalArray.from_tuples(data)
>>> intervals
<IntervalArray>
[(0, 1], (1, 3], (2, 4]]
Length: 3, dtype: interval[int64, right]
>>> intervals.overlaps(pd.Interval(0.5, 1.5))
array([ True, True, False])
Intervals that share closed endpoints overlap:
>>> intervals.overlaps(pd.Interval(1, 3, closed="left"))
array([ True, True, True])
Intervals that only have an open endpoint in common do not overlap:
>>> intervals.overlaps(pd.Interval(1, 2, closed="right"))
array([False, True, False])
|
python
|
pandas/core/arrays/interval.py
| 1,511
|
[
"self",
"other"
] | false
| 7
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
visitExportAssignment
|
function visitExportAssignment(node: ExportAssignment): VisitResult<Statement | undefined> {
// Elide the export assignment if it does not reference a value.
return compilerOptions.verbatimModuleSyntax || resolver.isValueAliasDeclaration(node)
? visitEachChild(node, visitor, context)
: undefined;
}
|
Visits an export assignment, eliding it if it does not contain a clause that resolves
to a value.
@param node The export assignment node.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,325
|
[
"node"
] | true
| 3
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
convertPropertyValue
|
protected String convertPropertyValue(String originalValue) {
return originalValue;
}
|
Convert the given property value from the properties source to the value
which should be applied.
<p>The default implementation simply returns the original value.
Can be overridden in subclasses, for example to detect
encrypted values and decrypt them accordingly.
@param originalValue the original value from the properties source
(properties file or local "properties")
@return the converted value, to be used for processing
@see #setProperties
@see #setLocations
@see #setLocation
@see #convertProperty(String, String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/PropertyResourceConfigurer.java
| 140
|
[
"originalValue"
] |
String
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
transitionToStale
|
private void transitionToStale() {
transitionTo(MemberState.STALE);
final CompletableFuture<Void> onAllTasksLostCallbackExecuted = requestOnAllTasksLostCallbackInvocation();
staleMemberAssignmentRelease = onAllTasksLostCallbackExecuted.whenComplete((result, error) -> {
if (error != null) {
log.error("Task revocation callback invocation failed " +
"after member left group due to expired poll timer.", error);
}
clearTaskAndPartitionAssignment();
log.debug("Member {} sent leave group heartbeat and released its assignment. It will remain " +
"in {} state until the poll timer is reset, and it will then rejoin the group",
memberId, MemberState.STALE);
});
}
|
Transition to STALE to release assignments because the member has left the group due to
expired poll timer. This will trigger the onAllTasksLost callback. Once the callback
completes, the member will remain stale until the poll timer is reset by an application
poll event. See {@link #maybeRejoinStaleMember()}.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 462
|
[] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
unique
|
def unique(self) -> ArrayLike:
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
Series.drop_duplicates : Return Series with duplicate values removed.
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Datetime without Timezone
* Timedelta
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name="A").unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00']
Length: 1, dtype: datetime64[us]
>>> pd.Series(
... [pd.Timestamp("2016-01-01", tz="US/Eastern") for _ in range(3)]
... ).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[us, US/Eastern]
A Categorical will return categories in the order of
appearance and with the same dtype.
>>> pd.Series(pd.Categorical(list("baabc"))).unique()
['b', 'a', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... ).unique()
['b', 'a', 'c']
Categories (3, str): ['a' < 'b' < 'c']
"""
return super().unique()
|
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
Series.drop_duplicates : Return Series with duplicate values removed.
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Datetime without Timezone
* Timedelta
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name="A").unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00']
Length: 1, dtype: datetime64[us]
>>> pd.Series(
... [pd.Timestamp("2016-01-01", tz="US/Eastern") for _ in range(3)]
... ).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[us, US/Eastern]
A Categorical will return categories in the order of
appearance and with the same dtype.
>>> pd.Series(pd.Categorical(list("baabc"))).unique()
['b', 'a', 'c']
Categories (3, str): ['a', 'b', 'c']
>>> pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... ).unique()
['b', 'a', 'c']
Categories (3, str): ['a' < 'b' < 'c']
|
python
|
pandas/core/series.py
| 2,155
|
[
"self"
] |
ArrayLike
| true
| 1
| 6.72
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
toString
|
@Override
public String toString() {
return this.options.toString();
}
|
Returns if the given option is contained in this set.
@param option the option to check
@return {@code true} of the option is present
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigData.java
| 218
|
[] |
String
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_compute_interactions
|
def _compute_interactions(self, node):
r"""Compute features allowed by interactions to be inherited by child nodes.
Example: Assume constraints [{0, 1}, {1, 2}].
1 <- Both constraint groups could be applied from now on
/ \
1 2 <- Left split still fulfills both constraint groups.
/ \ / \ Right split at feature 2 has only group {1, 2} from now on.
LightGBM uses the same logic for overlapping groups. See
https://github.com/microsoft/LightGBM/issues/4481 for details.
Parameters:
----------
node : TreeNode
A node that might have children. Based on its feature_idx, the interaction
constraints for possible child nodes are computed.
Returns
-------
allowed_features : ndarray, dtype=uint32
Indices of features allowed to split for children.
interaction_cst_indices : list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
"""
# Note:
# - Case of no interactions is already captured before function call.
# - This is for nodes that are already split and have a
# node.split_info.feature_idx.
allowed_features = set()
interaction_cst_indices = []
for i in node.interaction_cst_indices:
if node.split_info.feature_idx in self.interaction_cst[i]:
interaction_cst_indices.append(i)
allowed_features.update(self.interaction_cst[i])
return (
np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)),
interaction_cst_indices,
)
|
r"""Compute features allowed by interactions to be inherited by child nodes.
Example: Assume constraints [{0, 1}, {1, 2}].
1 <- Both constraint groups could be applied from now on
/ \
1 2 <- Left split still fulfills both constraint groups.
/ \ / \ Right split at feature 2 has only group {1, 2} from now on.
LightGBM uses the same logic for overlapping groups. See
https://github.com/microsoft/LightGBM/issues/4481 for details.
Parameters:
----------
node : TreeNode
A node that might have children. Based on its feature_idx, the interaction
constraints for possible child nodes are computed.
Returns
-------
allowed_features : ndarray, dtype=uint32
Indices of features allowed to split for children.
interaction_cst_indices : list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
|
python
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
| 654
|
[
"self",
"node"
] | false
| 3
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
google
| false
|
|
records
|
Iterable<Record> records();
|
Get an iterator over the records in this log. Note that this generally requires decompression,
and should therefore be used with care.
@return The record iterator
|
java
|
clients/src/main/java/org/apache/kafka/common/record/Records.java
| 92
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
canApply
|
public static boolean canApply(Pointcut pc, Class<?> targetClass) {
return canApply(pc, targetClass, false);
}
|
Can the given pointcut apply at all on the given class?
<p>This is an important test as it can be used to optimize
out a pointcut for a class.
@param pc the static or dynamic pointcut to check
@param targetClass the class to test
@return whether the pointcut can apply on any method
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/AopUtils.java
| 225
|
[
"pc",
"targetClass"
] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
all_estimators
|
def all_estimators(type_filter=None):
"""Get a list of all estimators from `sklearn`.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
Parameters
----------
type_filter : {"classifier", "regressor", "cluster", "transformer"} \
or list of such str, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_estimators
>>> estimators = all_estimators()
>>> type(estimators)
<class 'list'>
>>> type(estimators[0])
<class 'tuple'>
>>> estimators[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
>>> classifiers = all_estimators(type_filter="classifier")
>>> classifiers[:2]
[('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
>>> regressors = all_estimators(type_filter="regressor")
>>> regressors[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostRegressor',
<class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
>>> both = all_estimators(type_filter=["classifier", "regressor"])
>>> both[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
"""
# lazy import to avoid circular imports from sklearn.base
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
ClusterMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.utils._testing import ignore_warnings
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
root = str(Path(__file__).parent.parent) # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
module_parts = module_name.split(".")
if (
any(part in _MODULE_TO_IGNORE for part in module_parts)
or "._" in module_name
):
continue
module = import_module(module_name)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, est_cls) for name, est_cls in classes if not name.startswith("_")
]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c
for c in all_classes
if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {
"classifier": ClassifierMixin,
"regressor": RegressorMixin,
"transformer": TransformerMixin,
"cluster": ClusterMixin,
}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend(
[est for est in estimators if issubclass(est[1], mixin)]
)
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
f" {type_filter!r}."
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
|
Get a list of all estimators from `sklearn`.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
Parameters
----------
type_filter : {"classifier", "regressor", "cluster", "transformer"} \
or list of such str, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actual type of the class.
Examples
--------
>>> from sklearn.utils.discovery import all_estimators
>>> estimators = all_estimators()
>>> type(estimators)
<class 'list'>
>>> type(estimators[0])
<class 'tuple'>
>>> estimators[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
>>> classifiers = all_estimators(type_filter="classifier")
>>> classifiers[:2]
[('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>),
('BaggingClassifier', <class 'sklearn.ensemble._bagging.BaggingClassifier'>)]
>>> regressors = all_estimators(type_filter="regressor")
>>> regressors[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostRegressor',
<class 'sklearn.ensemble._weight_boosting.AdaBoostRegressor'>)]
>>> both = all_estimators(type_filter=["classifier", "regressor"])
>>> both[:2]
[('ARDRegression', <class 'sklearn.linear_model._bayes.ARDRegression'>),
('AdaBoostClassifier',
<class 'sklearn.ensemble._weight_boosting.AdaBoostClassifier'>)]
|
python
|
sklearn/utils/discovery.py
| 22
|
[
"type_filter"
] | false
| 13
| 6.8
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
beforeSingletonCreation
|
protected void beforeSingletonCreation(String beanName) {
if (!this.inCreationCheckExclusions.contains(beanName) && !this.singletonsCurrentlyInCreation.add(beanName)) {
throw new BeanCurrentlyInCreationException(beanName);
}
}
|
Callback before singleton creation.
<p>The default implementation registers the singleton as currently in creation.
@param beanName the name of the singleton about to be created
@see #isSingletonCurrentlyInCreation
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 539
|
[
"beanName"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
validate_and_load_priority_weight_strategy
|
def validate_and_load_priority_weight_strategy(
priority_weight_strategy: str | PriorityWeightStrategy | None,
) -> PriorityWeightStrategy:
"""
Validate and load a priority weight strategy.
Returns the priority weight strategy if it is valid, otherwise raises an exception.
:param priority_weight_strategy: The priority weight strategy to validate and load.
:meta private:
"""
from airflow._shared.module_loading import qualname
from airflow.serialization.serialized_objects import _get_registered_priority_weight_strategy
if priority_weight_strategy is None:
return _AbsolutePriorityWeightStrategy()
if isinstance(priority_weight_strategy, str):
if priority_weight_strategy in airflow_priority_weight_strategies:
return airflow_priority_weight_strategies[priority_weight_strategy]()
priority_weight_strategy_class = priority_weight_strategy
else:
priority_weight_strategy_class = qualname(priority_weight_strategy)
loaded_priority_weight_strategy = _get_registered_priority_weight_strategy(priority_weight_strategy_class)
if loaded_priority_weight_strategy is None:
raise ValueError(f"Unknown priority strategy {priority_weight_strategy_class}")
return loaded_priority_weight_strategy()
|
Validate and load a priority weight strategy.
Returns the priority weight strategy if it is valid, otherwise raises an exception.
:param priority_weight_strategy: The priority weight strategy to validate and load.
:meta private:
|
python
|
airflow-core/src/airflow/task/priority_strategy.py
| 130
|
[
"priority_weight_strategy"
] |
PriorityWeightStrategy
| true
| 6
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
map
|
public static MappedByteBuffer map(File file) throws IOException {
checkNotNull(file);
return map(file, MapMode.READ_ONLY);
}
|
Fully maps a file read-only in to memory as per {@link
FileChannel#map(java.nio.channels.FileChannel.MapMode, long, long)}.
<p>Files are mapped from offset 0 to its length.
<p>This only works for files ≤ {@link Integer#MAX_VALUE} bytes.
@param file the file to map
@return a read-only buffer reflecting {@code file}
@throws FileNotFoundException if the {@code file} does not exist
@throws IOException if an I/O error occurs
@see FileChannel#map(MapMode, long, long)
@since 2.0
|
java
|
android/guava/src/com/google/common/io/Files.java
| 645
|
[
"file"
] |
MappedByteBuffer
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
_get_git_log_command
|
def _get_git_log_command(
folder_paths: list[Path] | None = None, from_commit: str | None = None, to_commit: str | None = None
) -> list[str]:
"""Get git command to run for the current repo from the current folder.
The current directory should always be the package folder.
:param folder_paths: list of folder paths to check for changes
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
elif to_commit:
raise ValueError("It makes no sense to specify to_commit without from_commit.")
folders = [folder_path.as_posix() for folder_path in folder_paths] if folder_paths else ["."]
git_cmd.extend(["--", *folders])
return git_cmd
|
Get git command to run for the current repo from the current folder.
The current directory should always be the package folder.
:param folder_paths: list of folder paths to check for changes
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
| 254
|
[
"folder_paths",
"from_commit",
"to_commit"
] |
list[str]
| true
| 6
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
bigIntegerValue
|
public BigInteger bigIntegerValue() {
BigInteger bigInt = BigInteger.valueOf(value & UNSIGNED_MASK);
if (value < 0) {
bigInt = bigInt.setBit(Long.SIZE - 1);
}
return bigInt;
}
|
Returns the value of this {@code UnsignedLong} as a {@link BigInteger}.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLong.java
| 223
|
[] |
BigInteger
| true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
onEmitNode
|
function onEmitNode(hint: EmitHint, node: Node, emitCallback: (hint: EmitHint, node: Node) => void): void {
if (node.kind === SyntaxKind.SourceFile) {
const id = getOriginalNodeId(node);
currentSourceFile = node as SourceFile;
moduleInfo = moduleInfoMap[id];
exportFunction = exportFunctionsMap[id];
noSubstitution = noSubstitutionMap[id];
contextObject = contextObjectMap[id];
if (noSubstitution) {
delete noSubstitutionMap[id];
}
previousOnEmitNode(hint, node, emitCallback);
currentSourceFile = undefined!;
moduleInfo = undefined!;
exportFunction = undefined!;
contextObject = undefined!;
noSubstitution = undefined;
}
else {
previousOnEmitNode(hint, node, emitCallback);
}
}
|
Hook for node emit notifications.
@param hint A hint as to the intended usage of the node.
@param node The node to emit.
@param emitCallback A callback used to emit the node in the printer.
|
typescript
|
src/compiler/transformers/module/system.ts
| 1,769
|
[
"hint",
"node",
"emitCallback"
] | true
| 4
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
iscomplex
|
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> import numpy as np
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True])
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return res[()] # convert to scalar if needed
|
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> import numpy as np
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True])
|
python
|
numpy/lib/_type_check_impl.py
| 176
|
[
"x"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
lastIndexOf
|
public static int lastIndexOf(byte[] array, byte target) {
return lastIndexOf(array, target, 0, array.length);
}
|
Returns the index of the last appearance of the value {@code target} in {@code array}.
@param array an array of {@code byte} values, possibly empty
@param target a primitive {@code byte} value
@return the greatest index {@code i} for which {@code array[i] == target}, or {@code -1} if no
such index exists.
|
java
|
android/guava/src/com/google/common/primitives/Bytes.java
| 143
|
[
"array",
"target"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
nop
|
@SuppressWarnings("unchecked")
static <E extends Throwable> FailableLongToIntFunction<E> nop() {
return NOP;
}
|
Gets the NOP singleton.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongToIntFunction.java
| 41
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "ReplicaInfo(" +
"size=" + size +
", offsetLag=" + offsetLag +
", isFuture=" + isFuture +
')';
}
|
Whether this replica has been created by a AlterReplicaLogDirsRequest
but not yet replaced the current replica on the broker.
@return true if this log is created by AlterReplicaLogDirsRequest and will replace the current log
of the replica at some time in the future.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java
| 63
|
[] |
String
| true
| 1
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
maybeMarkRequiredField
|
private static void maybeMarkRequiredField(String currentFieldName, List<String[]> requiredFields) {
Iterator<String[]> iter = requiredFields.iterator();
while (iter.hasNext()) {
String[] requiredFieldNames = iter.next();
for (String field : requiredFieldNames) {
if (field.equals(currentFieldName)) {
iter.remove();
break;
}
}
}
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 370
|
[
"currentFieldName",
"requiredFields"
] |
void
| true
| 3
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
matches
|
private static int[] matches(final CharSequence first, final CharSequence second) {
final CharSequence max;
final CharSequence min;
if (first.length() > second.length()) {
max = first;
min = second;
} else {
max = second;
min = first;
}
final int range = Math.max(max.length() / 2 - 1, 0);
final int[] matchIndexes = ArrayFill.fill(new int[min.length()], -1);
final boolean[] matchFlags = new boolean[max.length()];
int matches = 0;
for (int mi = 0; mi < min.length(); mi++) {
final char c1 = min.charAt(mi);
for (int xi = Math.max(mi - range, 0), xn = Math.min(mi + range + 1, max.length()); xi < xn; xi++) {
if (!matchFlags[xi] && c1 == max.charAt(xi)) {
matchIndexes[mi] = xi;
matchFlags[xi] = true;
matches++;
break;
}
}
}
final char[] ms1 = new char[matches];
final char[] ms2 = new char[matches];
for (int i = 0, si = 0; i < min.length(); i++) {
if (matchIndexes[i] != -1) {
ms1[si] = min.charAt(i);
si++;
}
}
for (int i = 0, si = 0; i < max.length(); i++) {
if (matchFlags[i]) {
ms2[si] = max.charAt(i);
si++;
}
}
int transpositions = 0;
for (int mi = 0; mi < ms1.length; mi++) {
if (ms1[mi] != ms2[mi]) {
transpositions++;
}
}
int prefix = 0;
for (int mi = 0; mi < min.length(); mi++) {
if (first.charAt(mi) != second.charAt(mi)) {
break;
}
prefix++;
}
return new int[] { matches, transpositions / 2, prefix, max.length() };
}
|
Converts a String to lower case as per {@link String#toLowerCase(Locale)}.
<p>
A {@code null} input String returns {@code null}.
</p>
<pre>
StringUtils.lowerCase(null, Locale.ENGLISH) = null
StringUtils.lowerCase("", Locale.ENGLISH) = ""
StringUtils.lowerCase("aBc", Locale.ENGLISH) = "abc"
</pre>
@param str the String to lower case, may be null.
@param locale the locale that defines the case transformation rules, must not be null.
@return the lower cased String, {@code null} if null String input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,251
|
[
"first",
"second"
] | true
| 14
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
buildToString("", builder);
return builder.toString();
}
|
Create a new {@link ConfigDataEnvironmentContributor} instance where an existing
child is replaced.
@param existing the existing node that should be replaced
@param replacement the replacement node that should be used instead
@return a new {@link ConfigDataEnvironmentContributor} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 359
|
[] |
String
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
addAdvisor
|
@Override
public void addAdvisor(Advisor advisor) {
int pos = this.advisors.size();
addAdvisor(pos, advisor);
}
|
Remove a proxied interface.
<p>Does nothing if the given interface isn't proxied.
@param ifc the interface to remove from the proxy
@return {@code true} if the interface was removed; {@code false}
if the interface was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 301
|
[
"advisor"
] |
void
| true
| 1
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getAndIncrement
|
public byte getAndIncrement() {
final byte last = value;
value++;
return last;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately prior to the increment operation. This method is not thread safe.
@return the value associated with the instance before it was incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableByte.java
| 258
|
[] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getDeclareParentsAdvisor
|
private @Nullable Advisor getDeclareParentsAdvisor(Field introductionField) {
DeclareParents declareParents = introductionField.getAnnotation(DeclareParents.class);
if (declareParents == null) {
// Not an introduction field
return null;
}
if (DeclareParents.class == declareParents.defaultImpl()) {
throw new IllegalStateException("'defaultImpl' attribute must be set on DeclareParents");
}
return new DeclareParentsAdvisor(
introductionField.getType(), declareParents.value(), declareParents.defaultImpl());
}
|
Build a {@link org.springframework.aop.aspectj.DeclareParentsAdvisor}
for the given introduction field.
<p>Resulting Advisors will need to be evaluated for targets.
@param introductionField the field to introspect
@return the Advisor instance, or {@code null} if not an Advisor
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/ReflectiveAspectJAdvisorFactory.java
| 184
|
[
"introductionField"
] |
Advisor
| true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fromLittleEndianByteArray
|
public static InetAddress fromLittleEndianByteArray(byte[] addr) throws UnknownHostException {
byte[] reversed = new byte[addr.length];
for (int i = 0; i < addr.length; i++) {
reversed[i] = addr[addr.length - i - 1];
}
return InetAddress.getByAddress(reversed);
}
|
Returns an address from a <b>little-endian ordered</b> byte array (the opposite of what {@link
InetAddress#getByAddress} expects).
<p>IPv4 address byte array must be 4 bytes long and IPv6 byte array must be 16 bytes long.
@param addr the raw IP address in little-endian byte order
@return an InetAddress object created from the raw IP address
@throws UnknownHostException if IP address is of illegal length
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 1,162
|
[
"addr"
] |
InetAddress
| true
| 2
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.