function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
score_samples
|
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : ndarray of shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self)
v = validate_data(self, X, accept_sparse="csr", reset=False)
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))
if sp.issparse(v):
data = -2 * v[ind] + 1
if isinstance(data, np.matrix): # v is a sparse matrix
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else: # v is a sparse array
v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
# log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x)
return -v.shape[1] * np.logaddexp(0, -(fe_ - fe))
|
Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : ndarray of shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
|
python
|
sklearn/neural_network/_rbm.py
| 344
|
[
"self",
"X"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
fullmatch
|
def fullmatch(self, pat, case: bool = True, flags: int = 0, na=lib.no_default):
"""
Determine if each string entirely matches a regular expression.
Checks if each string in the Series or Index fully matches the
specified regular expression pattern. This function is useful when the
requirement is for an entire string to conform to a pattern, such as
validating formats like phone numbers or email addresses.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/Index/array of boolean values
The function returns a Series, Index, or array of boolean values,
where True indicates that the entire string matches the regular
expression pattern and False indicates that it does not.
See Also
--------
match : Similar, but also returns `True` when only a *prefix* of the string
matches the regular expression.
extract : Extract matched groups.
Examples
--------
>>> ser = pd.Series(["cat", "duck", "dove"])
>>> ser.str.fullmatch(r"d.+")
0 False
1 True
2 True
dtype: bool
"""
result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
|
Determine if each string entirely matches a regular expression.
Checks if each string in the Series or Index fully matches the
specified regular expression pattern. This function is useful when the
requirement is for an entire string to conform to a pattern, such as
validating formats like phone numbers or email addresses.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/Index/array of boolean values
The function returns a Series, Index, or array of boolean values,
where True indicates that the entire string matches the regular
expression pattern and False indicates that it does not.
See Also
--------
match : Similar, but also returns `True` when only a *prefix* of the string
matches the regular expression.
extract : Extract matched groups.
Examples
--------
>>> ser = pd.Series(["cat", "duck", "dove"])
>>> ser.str.fullmatch(r"d.+")
0 False
1 True
2 True
dtype: bool
|
python
|
pandas/core/strings/accessor.py
| 1,443
|
[
"self",
"pat",
"case",
"flags",
"na"
] | true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
autowireConstructor
|
protected BeanWrapper autowireConstructor(
String beanName, RootBeanDefinition mbd, Constructor<?> @Nullable [] ctors, @Nullable Object @Nullable [] explicitArgs) {
return new ConstructorResolver(this).autowireConstructor(beanName, mbd, ctors, explicitArgs);
}
|
"autowire constructor" (with constructor arguments by type) behavior.
Also applied if explicit constructor argument values are specified,
matching all remaining arguments with beans from the bean factory.
<p>This corresponds to constructor injection: In this mode, a Spring
bean factory is able to host components that expect constructor-based
dependency resolution.
@param beanName the name of the bean
@param mbd the bean definition for the bean
@param ctors the chosen candidate constructors
@param explicitArgs argument values passed in programmatically via the getBean method,
or {@code null} if none (implying the use of constructor argument values from bean definition)
@return a BeanWrapper for the new instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,379
|
[
"beanName",
"mbd",
"ctors",
"explicitArgs"
] |
BeanWrapper
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
clearKeyReferenceQueue
|
void clearKeyReferenceQueue() {
while (keyReferenceQueue.poll() != null) {}
}
|
Clears all entries from the key and value reference queues.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 2,427
|
[] |
void
| true
| 2
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
found
|
public ItemsBuilder found(String article) {
return found(article, article);
}
|
Indicate that one or more results were found. For example
{@code found("bean").items("x")} results in the message "found bean x".
@param article the article found
@return an {@link ItemsBuilder}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 226
|
[
"article"
] |
ItemsBuilder
| true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
throwExpectedStartObject
|
private void throwExpectedStartObject(XContentParser parser, XContentParser.Token token) {
throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] Expected START_OBJECT but was: " + token);
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 330
|
[
"parser",
"token"
] |
void
| true
| 1
| 6.32
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
defaultIfBlank
|
public static <T extends CharSequence> T defaultIfBlank(final T str, final T defaultStr) {
return isBlank(str) ? defaultStr : str;
}
|
Returns either the passed in CharSequence, or if the CharSequence is {@link #isBlank(CharSequence) blank} (whitespaces, empty ({@code ""}) or
{@code null}), the value of {@code defaultStr}.
<p>
Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.defaultIfBlank(null, "NULL") = "NULL"
StringUtils.defaultIfBlank("", "NULL") = "NULL"
StringUtils.defaultIfBlank(" ", "NULL") = "NULL"
StringUtils.defaultIfBlank("bat", "NULL") = "bat"
StringUtils.defaultIfBlank("", null) = null
</pre>
@param <T> the specific kind of CharSequence.
@param str the CharSequence to check, may be null.
@param defaultStr the default CharSequence to return if {@code str} is {@link #isBlank(CharSequence) blank} (whitespaces, empty ({@code ""}) or
{@code null}); may be null.
@return the passed in CharSequence, or the default.
@see StringUtils#defaultString(String, String)
@see #isBlank(CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,531
|
[
"str",
"defaultStr"
] |
T
| true
| 2
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getRuntimeMetadata
|
public static String getRuntimeMetadata() {
StringBuilder s = new StringBuilder();
String version;
version = kotlinVersion();
if (version != null) {
s.append(",kt=").append(version);
}
version = scalaVersion();
if (version != null) {
s.append(",sc=").append(version);
}
version = clojureVersion();
if (version != null) {
s.append(",clj=").append(version);
}
version = groovyVersion();
if (version != null) {
s.append(",gy=").append(version);
}
version = jRubyVersion();
if (version != null) {
s.append(",jrb=").append(version);
}
return s.toString();
}
|
Returns runtime information by looking up classes identifying non-Java JVM
languages and appending a key with their name and their major.minor version, if available
|
java
|
client/rest/src/main/java/org/elasticsearch/client/LanguageRuntimeVersions.java
| 33
|
[] |
String
| true
| 6
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
hash
|
@Override
public final HashCode hash() {
munch();
Java8Compatibility.flip(buffer);
if (buffer.remaining() > 0) {
processRemaining(buffer);
Java8Compatibility.position(buffer, buffer.limit());
}
return makeHash();
}
|
This is invoked for the last bytes of the input, which are not enough to fill a whole chunk.
The passed {@code ByteBuffer} is guaranteed to be non-empty.
<p>This implementation simply pads with zeros and delegates to {@link #process(ByteBuffer)}.
|
java
|
android/guava/src/com/google/common/hash/AbstractStreamingHasher.java
| 186
|
[] |
HashCode
| true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
populateDefaults
|
protected void populateDefaults(DocumentDefaultsDefinition defaults, @Nullable DocumentDefaultsDefinition parentDefaults, Element root) {
String lazyInit = root.getAttribute(DEFAULT_LAZY_INIT_ATTRIBUTE);
if (isDefaultValue(lazyInit)) {
// Potentially inherited from outer <beans> sections, otherwise falling back to false.
lazyInit = (parentDefaults != null ? parentDefaults.getLazyInit() : FALSE_VALUE);
}
defaults.setLazyInit(lazyInit);
String merge = root.getAttribute(DEFAULT_MERGE_ATTRIBUTE);
if (isDefaultValue(merge)) {
// Potentially inherited from outer <beans> sections, otherwise falling back to false.
merge = (parentDefaults != null ? parentDefaults.getMerge() : FALSE_VALUE);
}
defaults.setMerge(merge);
String autowire = root.getAttribute(DEFAULT_AUTOWIRE_ATTRIBUTE);
if (isDefaultValue(autowire)) {
// Potentially inherited from outer <beans> sections, otherwise falling back to 'no'.
autowire = (parentDefaults != null ? parentDefaults.getAutowire() : AUTOWIRE_NO_VALUE);
}
defaults.setAutowire(autowire);
if (root.hasAttribute(DEFAULT_AUTOWIRE_CANDIDATES_ATTRIBUTE)) {
defaults.setAutowireCandidates(root.getAttribute(DEFAULT_AUTOWIRE_CANDIDATES_ATTRIBUTE));
}
else if (parentDefaults != null) {
defaults.setAutowireCandidates(parentDefaults.getAutowireCandidates());
}
if (root.hasAttribute(DEFAULT_INIT_METHOD_ATTRIBUTE)) {
defaults.setInitMethod(root.getAttribute(DEFAULT_INIT_METHOD_ATTRIBUTE));
}
else if (parentDefaults != null) {
defaults.setInitMethod(parentDefaults.getInitMethod());
}
if (root.hasAttribute(DEFAULT_DESTROY_METHOD_ATTRIBUTE)) {
defaults.setDestroyMethod(root.getAttribute(DEFAULT_DESTROY_METHOD_ATTRIBUTE));
}
else if (parentDefaults != null) {
defaults.setDestroyMethod(parentDefaults.getDestroyMethod());
}
defaults.setSource(this.readerContext.extractSource(root));
}
|
Populate the given DocumentDefaultsDefinition instance with the default lazy-init,
autowire, dependency check settings, init-method, destroy-method and merge settings.
Support nested 'beans' element use cases by falling back to {@code parentDefaults}
in case the defaults are not explicitly set locally.
@param defaults the defaults to populate
@param parentDefaults the parent BeanDefinitionParserDelegate (if any) defaults to fall back to
@param root the root element of the current bean definition document (or nested beans element)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/BeanDefinitionParserDelegate.java
| 320
|
[
"defaults",
"parentDefaults",
"root"
] |
void
| true
| 13
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
loadBuiltinModule
|
function loadBuiltinModule(id) {
if (!BuiltinModule.canBeRequiredByUsers(id)) {
return;
}
/** @type {import('internal/bootstrap/realm.js').BuiltinModule} */
const mod = BuiltinModule.map.get(id);
debug('load built-in module %s', id);
// compileForPublicLoader() throws if canBeRequiredByUsers is false:
mod.compileForPublicLoader();
return mod;
}
|
Provide one of Node.js' public modules to user code.
@param {string} id - The identifier/specifier of the builtin module to load
@returns {object|undefined}
|
javascript
|
lib/internal/modules/helpers.js
| 117
|
[
"id"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
private_import_across_module
|
def private_import_across_module(file_obj: IO[str]) -> Iterable[tuple[int, str]]:
"""
Checking that a private function is not imported across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of import statement, that imports the private function.
msg : str
Explanation of the error.
"""
contents = file_obj.read()
tree = ast.parse(contents)
for node in ast.walk(tree):
if not isinstance(node, (ast.Import, ast.ImportFrom)):
continue
for module in node.names:
module_name = module.name.split(".")[-1]
if module_name in PRIVATE_IMPORTS_TO_IGNORE:
continue
if module_name.startswith("_"):
yield (node.lineno, f"Import of internal function {module_name!r}")
|
Checking that a private function is not imported across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of import statement, that imports the private function.
msg : str
Explanation of the error.
|
python
|
scripts/validate_unwanted_patterns.py
| 150
|
[
"file_obj"
] |
Iterable[tuple[int, str]]
| true
| 6
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
get_cmake_cache_variables_from_file
|
def get_cmake_cache_variables_from_file(
cmake_cache_file: IO[str],
) -> dict[str, CMakeValue]:
r"""Gets values in CMakeCache.txt into a dictionary.
Args:
cmake_cache_file: A CMakeCache.txt file object.
Returns:
dict: A ``dict`` containing the value of cached CMake variables.
"""
results = {}
for i, line in enumerate(cmake_cache_file, 1):
line = line.strip()
if not line or line.startswith(("#", "//")):
# Blank or comment line, skip
continue
# Almost any character can be part of variable name and value. As a practical matter, we assume the type must be
# valid if it were a C variable name. It should match the following kinds of strings:
#
# USE_CUDA:BOOL=ON
# "USE_CUDA":BOOL=ON
# USE_CUDA=ON
# USE_CUDA:=ON
# Intel(R) MKL-DNN_SOURCE_DIR:STATIC=/path/to/pytorch/third_party/ideep/mkl-dnn
# "OpenMP_COMPILE_RESULT_CXX_openmp:experimental":INTERNAL=FALSE
matched = re.match(
r'("?)(.+?)\1(?::\s*([a-zA-Z_-][a-zA-Z0-9_-]*)?)?\s*=\s*(.*)', line
)
if matched is None: # Illegal line
raise ValueError(f"Unexpected line {i} in {repr(cmake_cache_file)}: {line}")
_, variable, type_, value = matched.groups()
if type_ is None:
type_ = ""
if type_.upper() in ("INTERNAL", "STATIC"):
# CMake internal variable, do not touch
continue
results[variable] = convert_cmake_value_to_python_value(value, type_)
return results
|
r"""Gets values in CMakeCache.txt into a dictionary.
Args:
cmake_cache_file: A CMakeCache.txt file object.
Returns:
dict: A ``dict`` containing the value of cached CMake variables.
|
python
|
tools/setup_helpers/cmake_utils.py
| 45
|
[
"cmake_cache_file"
] |
dict[str, CMakeValue]
| true
| 7
| 7.76
|
pytorch/pytorch
| 96,034
|
google
| false
|
getConditionClasses
|
@SuppressWarnings("unchecked")
private List<String[]> getConditionClasses(AnnotatedTypeMetadata metadata) {
MultiValueMap<String, @Nullable Object> attributes = metadata.getAllAnnotationAttributes(Conditional.class.getName(), true);
Object values = (attributes != null ? attributes.get("value") : null);
return (List<String[]>) (values != null ? values : Collections.emptyList());
}
|
Return the {@linkplain Condition conditions} that should be applied when
considering the given annotated type.
@param metadata the metadata of the annotated type
@return the ordered list of conditions for that type
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConditionEvaluator.java
| 130
|
[
"metadata"
] | true
| 3
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("RequestOptions{");
boolean comma = false;
if (headers.size() > 0) {
b.append("headers=");
comma = true;
for (int h = 0; h < headers.size(); h++) {
if (h != 0) {
b.append(',');
}
b.append(headers.get(h).toString());
}
}
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
if (comma) b.append(", ");
comma = true;
b.append("consumerFactory=").append(httpAsyncResponseConsumerFactory);
}
if (warningsHandler != null) {
if (comma) b.append(", ");
comma = true;
b.append("warningsHandler=").append(warningsHandler);
}
return b.append('}').toString();
}
|
get RequestConfig, which can set socketTimeout, connectTimeout
and so on by request
@return RequestConfig
|
java
|
client/rest/src/main/java/org/elasticsearch/client/RequestOptions.java
| 130
|
[] |
String
| true
| 8
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
extractPotentialVariables
|
private Map<String, String> extractPotentialVariables(Map<?, ?> configMap) {
// Variables are tuples of the form "${providerName:[path:]key}". From the configMap we extract the subset of configs with string
// values as potential variables.
Map<String, String> configMapAsString = new HashMap<>();
for (Map.Entry<?, ?> entry : configMap.entrySet()) {
if (entry.getValue() instanceof String)
configMapAsString.put((String) entry.getKey(), (String) entry.getValue());
}
return configMapAsString;
}
|
Get a list of configured instances of the given class specified by the given configuration key. The configuration
may specify either null or an empty string to indicate no configured instances. In both cases, this method
returns an empty list to indicate no configured instances.
@param classNames The list of class names of the instances to create
@param t The interface the class should implement
@param configOverrides Configuration overrides to use.
@return The list of configured instances
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 515
|
[
"configMap"
] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
onHttpSocketEnd
|
function onHttpSocketEnd () {
const parser = this[kParser]
if (parser.statusCode && !parser.shouldKeepAlive) {
// We treat all incoming data so far as a valid response.
parser.onMessageComplete()
return
}
util.destroy(this, new SocketError('other side closed', util.getSocketInfo(this)))
}
|
@param {import ('./client.js')} client
@param {import('net').Socket} socket
@returns
|
javascript
|
deps/undici/src/lib/dispatcher/client-h1.js
| 895
|
[] | false
| 3
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
tryReadSync
|
function tryReadSync(fd, isUserFd, buffer, pos, len) {
let threw = true;
let bytesRead;
try {
bytesRead = fs.readSync(fd, buffer, pos, len);
threw = false;
} finally {
if (threw && !isUserFd) fs.closeSync(fd);
}
return bytesRead;
}
|
Asynchronously reads the entire contents of a file.
@param {string | Buffer | URL | number} path
@param {{
encoding?: string | null;
flag?: string;
signal?: AbortSignal;
} | string} [options]
@param {(
err?: Error,
data?: string | Buffer
) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 408
|
[
"fd",
"isUserFd",
"buffer",
"pos",
"len"
] | false
| 3
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
appendWithSeparators
|
public StrBuilder appendWithSeparators(final Iterable<?> iterable, final String separator) {
if (iterable != null) {
final String sep = Objects.toString(separator, "");
final Iterator<?> it = iterable.iterator();
while (it.hasNext()) {
append(it.next());
if (it.hasNext()) {
append(sep);
}
}
}
return this;
}
|
Appends an iterable placing separators between each value, but
not before the first or after the last.
Appending a null iterable will have no effect.
Each object is appended using {@link #append(Object)}.
@param iterable the iterable to append
@param separator the separator to use, null means no separator
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,403
|
[
"iterable",
"separator"
] |
StrBuilder
| true
| 4
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
compose
|
public <S> RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter) {
final RequestFuture<S> adapted = new RequestFuture<>();
addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(T value) {
adapter.onSuccess(value, adapted);
}
@Override
public void onFailure(RuntimeException e) {
adapter.onFailure(e, adapted);
}
});
return adapted;
}
|
Convert from a request future of one type to another type
@param adapter The adapter which does the conversion
@param <S> The type of the future adapted to
@return The new future
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 201
|
[
"adapter"
] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
__init__
|
def __init__(
self,
config_module: ConfigModule,
test_model_fn_factory: FactoryType,
seed: int,
default: Optional[ConfigType] = None,
sm: SamplingMethod = SamplingMethod.TOGGLE,
test_timeout: int = 3600,
):
"""
Args:
config_module: The module containing the configs to fuzz
test_model_fn_factory: Function that returns a test model, which runs and returns True if successful, or
the outputs if they should be compared with eager
seed: Randomness seed.
default: Default values for the config. Inductor has preset based on know failures.
sm: How type value samples are generated, default TOGGLE.
test_timeout: max time a test can take.
"""
self.seed = seed
self.test_timeout = test_timeout
self.detailed_results: dict[ComboType, dict[str, Any]] = {}
self.config_module = config_module
self.test_model_fn_factory = test_model_fn_factory
self.fields: dict[str, _ConfigEntry] = self.config_module._config
self.sample = SamplingMethod.dispatch(sm)
if default is None:
if self.config_module.__name__ in MODULE_DEFAULTS:
self.default = MODULE_DEFAULTS[self.config_module.__name__]
else:
raise ValueError("No default passed to ConfigFuzzer.")
else:
self.default = default
|
Args:
config_module: The module containing the configs to fuzz
test_model_fn_factory: Function that returns a test model, which runs and returns True if successful, or
the outputs if they should be compared with eager
seed: Randomness seed.
default: Default values for the config. Inductor has preset based on know failures.
sm: How type value samples are generated, default TOGGLE.
test_timeout: max time a test can take.
|
python
|
torch/_inductor/fuzzer.py
| 587
|
[
"self",
"config_module",
"test_model_fn_factory",
"seed",
"default",
"sm",
"test_timeout"
] | true
| 5
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
markAsInitialized
|
private void markAsInitialized(LoggerContext loggerContext) {
loggerContext.setExternalContext(LoggingSystem.class.getName());
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 493
|
[
"loggerContext"
] |
void
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
throwUnchecked
|
public static <T extends Throwable> T throwUnchecked(final T throwable) {
if (isUnchecked(throwable)) {
throw asRuntimeException(throwable);
}
return throwable;
}
|
Tests whether the specified {@link Throwable} is unchecked and throws it if so.
@param <T> The Throwable type.
@param throwable the throwable to test and throw or return.
@return the given throwable.
@since 3.14.0
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
| 1,046
|
[
"throwable"
] |
T
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
load64Safely
|
static long load64Safely(byte[] input, int offset, int length) {
long result = 0;
// Due to the way we shift, we can stop iterating once we've run out of data, the rest
// of the result already being filled with zeros.
// This loop is critical to performance, so please check HashBenchmark if altering it.
int limit = min(length, 8);
for (int i = 0; i < limit; i++) {
// Shift value left while iterating logically through the array.
result |= (input[offset + i] & 0xFFL) << (i * 8);
}
return result;
}
|
Similar to load64, but allows offset + 8 > input.length, padding the result with zeroes. This
has to explicitly reverse the order of the bytes as it packs them into the result which makes
it slower than the native version.
@param input the input bytes
@param offset the offset into the array at which to start reading
@param length the number of bytes from the input to read
@return a long of a concatenated 8 bytes
|
java
|
android/guava/src/com/google/common/hash/LittleEndianByteArray.java
| 68
|
[
"input",
"offset",
"length"
] | true
| 2
| 8.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
fuzz_inputs_specs
|
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for multi_head_attention_forward.
MHA requires:
- query, key, value: (seq_len, batch, embed_dim)
- in_proj_weight: (3*embed_dim, embed_dim) for combined QKV projection
- in_proj_bias: (3*embed_dim,) optional
- out_proj_weight: (embed_dim, embed_dim)
- out_proj_bias: (embed_dim,) optional
For simplicity, we'll use the combined in_proj_weight path.
IMPORTANT: The order of optional parameters matters for codegen!
We must ensure that when we have 6 inputs, they are in the order:
query, key, value, in_proj_weight, in_proj_bias, out_proj_weight
NOT: query, key, value, in_proj_weight, out_proj_weight, out_proj_bias
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError(
"MultiHeadAttentionForwardOperator can only produce TensorSpec outputs"
)
if len(output_spec.size) < 3:
raise ValueError("MHA output must have at least 3 dimensions")
# Output shape: (seq_len, batch, embed_dim)
seq_len, batch, embed_dim = output_spec.size[:3]
# Query, key, value have the same shape as output
query_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
key_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
value_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
# in_proj_weight: (3*embed_dim, embed_dim)
in_proj_weight_spec = TensorSpec(
size=(3 * embed_dim, embed_dim),
stride=(embed_dim, 1),
dtype=output_spec.dtype,
)
# out_proj_weight: (embed_dim, embed_dim)
out_proj_weight_spec = TensorSpec(
size=(embed_dim, embed_dim),
stride=(embed_dim, 1),
dtype=output_spec.dtype,
)
# For simplicity and correctness, always generate all required tensors
# This avoids ambiguity in the codegen about which optional parameters are present
# We'll use a simplified signature: query, key, value, in_proj_weight, out_proj_weight only
specs = [
query_spec,
key_spec,
value_spec,
in_proj_weight_spec,
out_proj_weight_spec,
]
from typing import cast
return cast(list[Spec], specs)
|
Generate input specs for multi_head_attention_forward.
MHA requires:
- query, key, value: (seq_len, batch, embed_dim)
- in_proj_weight: (3*embed_dim, embed_dim) for combined QKV projection
- in_proj_bias: (3*embed_dim,) optional
- out_proj_weight: (embed_dim, embed_dim)
- out_proj_bias: (embed_dim,) optional
For simplicity, we'll use the combined in_proj_weight path.
IMPORTANT: The order of optional parameters matters for codegen!
We must ensure that when we have 6 inputs, they are in the order:
query, key, value, in_proj_weight, in_proj_bias, out_proj_weight
NOT: query, key, value, in_proj_weight, out_proj_weight, out_proj_bias
|
python
|
tools/experimental/torchfuzz/operators/nn_functional.py
| 1,056
|
[
"self",
"output_spec"
] |
list[Spec]
| true
| 3
| 6.64
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
handleFilePaths
|
function handleFilePaths({ result, currentPath, context }) {
for (let i = 0; i < result.length; i++) {
const resultPath = pathModule.join(currentPath, result[i]);
const relativeResultPath = pathModule.relative(context.basePath, resultPath);
const stat = binding.internalModuleStat(resultPath);
ArrayPrototypePush(context.readdirResults, relativeResultPath);
if (stat === 1) {
ArrayPrototypePush(context.pathsQueue, resultPath);
}
}
}
|
Synchronously creates a directory.
@param {string | Buffer | URL} path
@param {{
recursive?: boolean;
mode?: string | number;
} | number} [options]
@returns {string | void}
|
javascript
|
lib/fs.js
| 1,420
|
[] | false
| 3
| 7.12
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
declareField
|
public void declareField(Parser<Value, Context> p, ParseField parseField, ValueType type) {
if (parseField == null) {
throw new IllegalArgumentException("[parseField] is required");
}
if (type == null) {
throw new IllegalArgumentException("[type] is required");
}
FieldParser fieldParser = new FieldParser(p, type.supportedTokens(), parseField, type);
for (String fieldValue : parseField.getAllNamesIncludedDeprecated()) {
if (RestApiVersion.minimumSupported().matches(parseField.getForRestApiVersion())) {
Map<String, FieldParser> nameToParserMap = fieldParserMap.computeIfAbsent(
RestApiVersion.minimumSupported(),
(v) -> new HashMap<>()
);
FieldParser previousValue = nameToParserMap.putIfAbsent(fieldValue, fieldParser);
if (previousValue != null) {
throw new IllegalArgumentException("Parser already registered for name=[" + fieldValue + "]. " + previousValue);
}
}
if (RestApiVersion.current().matches(parseField.getForRestApiVersion())) {
Map<String, FieldParser> nameToParserMap = fieldParserMap.computeIfAbsent(RestApiVersion.current(), (v) -> new HashMap<>());
FieldParser previousValue = nameToParserMap.putIfAbsent(fieldValue, fieldParser);
if (previousValue != null) {
throw new IllegalArgumentException("Parser already registered for name=[" + fieldValue + "]. " + previousValue);
}
}
}
}
|
Parses a Value from the given {@link XContentParser}
@param parser the parser to build a value from
@param value the value to fill from the parser
@param context a context that is passed along to all declared field parsers
@return the parsed value
@throws IOException if an IOException occurs.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
| 396
|
[
"p",
"parseField",
"type"
] |
void
| true
| 7
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
partition
|
public Integer partition() {
return partition;
}
|
@return The partition to which the record will be sent (or null if no partition was specified)
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/ProducerRecord.java
| 184
|
[] |
Integer
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
findTraceDuration
|
function findTraceDuration(view: DataFrameView<TraceRow>): number {
let traceEndTime = 0;
let traceStartTime = Infinity;
for (let i = 0; i < view.length; i++) {
const row = view.get(i);
if (row.startTime < traceStartTime) {
traceStartTime = row.startTime;
}
if (row.startTime + row.duration > traceEndTime) {
traceEndTime = row.startTime + row.duration;
}
}
return traceEndTime - traceStartTime;
}
|
Get the duration of the whole trace as it isn't a part of the response data.
Note: Seems like this should be the same as just longest span, but this is probably safer.
|
typescript
|
packages/grafana-o11y-ds-frontend/src/createNodeGraphFrames.ts
| 197
|
[
"view"
] | true
| 4
| 6.88
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
evalModuleEntryPoint
|
function evalModuleEntryPoint(source, print) {
if (print) {
throw new ERR_EVAL_ESM_CANNOT_PRINT();
}
RegExpPrototypeExec(/^/, ''); // Necessary to reset RegExp statics before user code runs.
return require('internal/modules/run_main').runEntryPointWithESMLoader(
(loader) => loader.eval(source, getEvalModuleUrl(), true),
);
}
|
Evaluate an ESM entry point and return the promise that gets fulfilled after
it finishes evaluation.
@param {string} source Source code the ESM
@param {boolean} print Whether the result should be printed.
@returns {Promise}
|
javascript
|
lib/internal/process/execution.js
| 71
|
[
"source",
"print"
] | false
| 2
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
parseLocationDouble
|
static Double parseLocationDouble(final String latlon) {
if (latlon == null || Strings.hasText(latlon) == false) {
return null;
} else {
String stripped = latlon.trim();
try {
return Double.parseDouble(stripped);
} catch (NumberFormatException e) {
logger.trace("Unable to parse non-compliant location string [{}]", latlon);
return null;
}
}
}
|
Lax-ly parses a string that contains a double into a Double (or null, if such parsing isn't possible).
@param latlon a potentially empty (or null) string that is expected to contain a parsable double
@return the parsed double
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IpinfoIpDataLookups.java
| 174
|
[
"latlon"
] |
Double
| true
| 4
| 8.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
loadSource
|
function loadSource(mod, filename, formatFromNode) {
if (mod[kFormat] === undefined) {
mod[kFormat] = formatFromNode;
}
// If the module was loaded before, just return.
if (mod[kModuleSource] !== undefined) {
return { source: mod[kModuleSource], format: mod[kFormat] };
}
// Fast path: no hooks, just load it and return.
if (!loadHooks.length) {
const source = defaultLoadImpl(filename, formatFromNode);
return { source, format: formatFromNode };
}
if (mod[kURL] === undefined) {
mod[kURL] = convertCJSFilenameToURL(filename);
}
const defaultLoad = getDefaultLoad(mod[kURL], filename);
const loadResult = loadWithHooks(mod[kURL], mod[kFormat], /* importAttributes */ undefined,
getCjsConditionsArray(), defaultLoad, validateLoadStrict);
// Reset the module properties with load hook results.
if (loadResult.format !== undefined) {
mod[kFormat] = loadResult.format;
}
mod[kModuleSource] = loadResult.source;
return { source: mod[kModuleSource], format: mod[kFormat] };
}
|
Get the source code of a module, using cached ones if it's cached. This is used
for TypeScript, JavaScript and JSON loading.
After this returns, mod[kFormat], mod[kModuleSource] and mod[kURL] will be set.
@param {Module} mod Module instance whose source is potentially already cached.
@param {string} filename Absolute path to the file of the module.
@returns {{source: string, format?: string}}
|
javascript
|
lib/internal/modules/cjs/loader.js
| 1,774
|
[
"mod",
"filename",
"formatFromNode"
] | false
| 6
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
encode
|
def encode(self, obj):
'''Encodes a given object to an ARFF file.
:param obj: the object containing the ARFF information.
:return: the ARFF file as an string.
'''
data = [row for row in self.iter_encode(obj)]
return '\n'.join(data)
|
Encodes a given object to an ARFF file.
:param obj: the object containing the ARFF information.
:return: the ARFF file as an string.
|
python
|
sklearn/externals/_arff.py
| 971
|
[
"self",
"obj"
] | false
| 1
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
sphinx
| false
|
|
delete_nodegroup
|
def delete_nodegroup(self, clusterName: str, nodegroupName: str) -> dict:
"""
Delete an Amazon EKS managed node group from a specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_nodegroup`
:param clusterName: The name of the Amazon EKS Cluster that is associated with your nodegroup.
:param nodegroupName: The name of the nodegroup to delete.
:return: Returns descriptive information about the deleted EKS Managed Nodegroup.
"""
eks_client = self.conn
response = eks_client.delete_nodegroup(clusterName=clusterName, nodegroupName=nodegroupName)
self.log.info(
"Deleted Amazon EKS managed node group named %s from Amazon EKS cluster %s.",
response.get("nodegroup").get("nodegroupName"),
response.get("nodegroup").get("clusterName"),
)
return response
|
Delete an Amazon EKS managed node group from a specified cluster.
.. seealso::
- :external+boto3:py:meth:`EKS.Client.delete_nodegroup`
:param clusterName: The name of the Amazon EKS Cluster that is associated with your nodegroup.
:param nodegroupName: The name of the nodegroup to delete.
:return: Returns descriptive information about the deleted EKS Managed Nodegroup.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
| 265
|
[
"self",
"clusterName",
"nodegroupName"
] |
dict
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
of
|
public static <E> Stream<E> of(final Collection<E> collection) {
return collection == null ? Stream.empty() : collection.stream();
}
|
Delegates to {@link Collection#stream()} or returns {@link Stream#empty()} if the collection is null.
@param <E> the type of elements in the collection.
@param collection the collection to stream or null.
@return {@link Collection#stream()} or {@link Stream#empty()} if the collection is null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 675
|
[
"collection"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
startsWithAny
|
@Deprecated
public static boolean startsWithAny(final CharSequence sequence, final CharSequence... searchStrings) {
return Strings.CS.startsWithAny(sequence, searchStrings);
}
|
Tests if a CharSequence starts with any of the provided case-sensitive prefixes.
<pre>
StringUtils.startsWithAny(null, null) = false
StringUtils.startsWithAny(null, new String[] {"abc"}) = false
StringUtils.startsWithAny("abcxyz", null) = false
StringUtils.startsWithAny("abcxyz", new String[] {""}) = true
StringUtils.startsWithAny("abcxyz", new String[] {"abc"}) = true
StringUtils.startsWithAny("abcxyz", new String[] {null, "xyz", "abc"}) = true
StringUtils.startsWithAny("abcxyz", null, "xyz", "ABCX") = false
StringUtils.startsWithAny("ABCXYZ", null, "xyz", "abc") = false
</pre>
@param sequence the CharSequence to check, may be null.
@param searchStrings the case-sensitive CharSequence prefixes, may be empty or contain {@code null}.
@return {@code true} if the input {@code sequence} is {@code null} AND no {@code searchStrings} are provided, or the input {@code sequence} begins with
any of the provided case-sensitive {@code searchStrings}.
@see StringUtils#startsWith(CharSequence, CharSequence)
@since 2.5
@since 3.0 Changed signature from startsWithAny(String, String[]) to startsWithAny(CharSequence, CharSequence...)
@deprecated Use {@link Strings#startsWithAny(CharSequence, CharSequence...) Strings.CS.startsWithAny(CharSequence, CharSequence...)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,742
|
[
"sequence"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
run
|
function run() {
fbPanel = (fbPanel = root.document && document.getElementById('FirebugUI')) &&
(fbPanel = (fbPanel = fbPanel.contentWindow || fbPanel.contentDocument).document || fbPanel) &&
fbPanel.getElementById('fbPanel1');
log('\nSit back and relax, this may take a while.');
suites[0].run({ 'async': true });
}
|
Runs all benchmark suites.
@private (@public in the browser)
|
javascript
|
perf/perf.js
| 194
|
[] | false
| 6
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
match
|
public static ConditionOutcome match(String message) {
return new ConditionOutcome(true, message);
}
|
Create a new {@link ConditionOutcome} instance for 'match'. For more consistent
messages consider using {@link #match(ConditionMessage)}.
@param message the message
@return the {@link ConditionOutcome}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionOutcome.java
| 72
|
[
"message"
] |
ConditionOutcome
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
notBlank
|
public static <T extends CharSequence> T notBlank(final T chars) {
return notBlank(chars, DEFAULT_NOT_BLANK_EX_MESSAGE);
}
|
<p>Validates that the specified argument character sequence is
neither {@code null}, a length of zero (no characters), empty
nor whitespace; otherwise throwing an exception.
<pre>Validate.notBlank(myString);</pre>
<p>The message in the exception is "The validated character
sequence is blank".
@param <T> the character sequence type.
@param chars the character sequence to check, validated not null by this method.
@return the validated character sequence (never {@code null} method for chaining).
@throws NullPointerException if the character sequence is {@code null}.
@throws IllegalArgumentException if the character sequence is blank.
@see #notBlank(CharSequence, String, Object...)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 780
|
[
"chars"
] |
T
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getRawJSDocModifierFlagsNoCache
|
function getRawJSDocModifierFlagsNoCache(node: Node): ModifierFlags {
let flags = ModifierFlags.None;
if (!!node.parent && !isParameter(node)) {
if (isInJSFile(node)) {
if (getJSDocPublicTagNoCache(node)) flags |= ModifierFlags.JSDocPublic;
if (getJSDocPrivateTagNoCache(node)) flags |= ModifierFlags.JSDocPrivate;
if (getJSDocProtectedTagNoCache(node)) flags |= ModifierFlags.JSDocProtected;
if (getJSDocReadonlyTagNoCache(node)) flags |= ModifierFlags.JSDocReadonly;
if (getJSDocOverrideTagNoCache(node)) flags |= ModifierFlags.JSDocOverride;
}
if (getJSDocDeprecatedTagNoCache(node)) flags |= ModifierFlags.Deprecated;
}
return flags;
}
|
Gets the ModifierFlags for syntactic modifiers on the provided node. The modifiers will be cached on the node to improve performance.
NOTE: This function does not use `parent` pointers and will not include modifiers from JSDoc.
@internal
|
typescript
|
src/compiler/utilities.ts
| 7,249
|
[
"node"
] | true
| 10
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isInitialized
|
@Override
public synchronized boolean isInitialized() {
if (future == null || !future.isDone()) {
return false;
}
try {
future.get();
return true;
} catch (CancellationException | ExecutionException | InterruptedException e) {
return false;
}
}
|
Tests whether this instance is initialized. Once initialized, always returns true.
If initialization failed then the failure will be cached and this will never return
true.
@return true if initialization completed successfully, otherwise false.
@since 3.14.0
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/BackgroundInitializer.java
| 339
|
[] | true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getWriteIndexName
|
@Nullable
private String getWriteIndexName(DataStream dataStream, boolean failureStore) {
if (dataStream == null) {
return null;
}
if (failureStore) {
return dataStream.getWriteFailureIndex() == null ? null : dataStream.getWriteFailureIndex().getName();
}
return dataStream.getWriteIndex().getName();
}
|
This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
it has sent delete requests for.
@param project The project metadata from which to get index metadata
@param dataStream The data stream
@param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
@return The set of indices that delete requests have been sent for
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,135
|
[
"dataStream",
"failureStore"
] |
String
| true
| 4
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
validateAll
|
public Map<String, ConfigValue> validateAll(Map<String, String> props) {
Map<String, ConfigValue> configValues = new HashMap<>();
for (String name: configKeys.keySet()) {
configValues.put(name, new ConfigValue(name));
}
List<String> undefinedConfigKeys = undefinedDependentConfigs();
for (String undefinedConfigKey: undefinedConfigKeys) {
ConfigValue undefinedConfigValue = new ConfigValue(undefinedConfigKey);
undefinedConfigValue.addErrorMessage(undefinedConfigKey + " is referred in the dependents, but not defined.");
undefinedConfigValue.visible(false);
configValues.put(undefinedConfigKey, undefinedConfigValue);
}
Map<String, Object> parsed = parseForValidate(props, configValues);
return validate(parsed, configValues);
}
|
Validate the current configuration values with the configuration definition.
@param props the current configuration values
@return List of Config, each Config contains the updated configuration information given
the current configuration values.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 566
|
[
"props"
] | true
| 1
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
negate
|
default FailableBiPredicate<T, U, E> negate() {
return (final T t, final U u) -> !test(t, u);
}
|
Returns a predicate that negates this predicate.
@return a predicate that negates this predicate.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableBiPredicate.java
| 85
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
quantile_compat
|
def quantile_compat(
values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str
) -> ArrayLike:
"""
Compute the quantiles of the given values for each quantile in `qs`.
Parameters
----------
values : np.ndarray or ExtensionArray
qs : np.ndarray[float64]
interpolation : str
Returns
-------
np.ndarray or ExtensionArray
"""
if isinstance(values, np.ndarray):
fill_value = na_value_for_dtype(values.dtype, compat=False)
mask = isna(values)
return quantile_with_mask(values, mask, fill_value, qs, interpolation)
else:
return values._quantile(qs, interpolation)
|
Compute the quantiles of the given values for each quantile in `qs`.
Parameters
----------
values : np.ndarray or ExtensionArray
qs : np.ndarray[float64]
interpolation : str
Returns
-------
np.ndarray or ExtensionArray
|
python
|
pandas/core/array_algos/quantile.py
| 20
|
[
"values",
"qs",
"interpolation"
] |
ArrayLike
| true
| 3
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
allOf
|
public static KafkaFuture<Void> allOf(KafkaFuture<?>... futures) {
KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
CompletableFuture.allOf(Arrays.stream(futures)
.map(kafkaFuture -> {
// Safe since KafkaFuture's only subclass is KafkaFuture for which toCompletionStage()
// always return a CF.
return (CompletableFuture<?>) kafkaFuture.toCompletionStage();
})
.toArray(CompletableFuture[]::new)).whenComplete((value, ex) -> {
if (ex == null) {
result.complete(value);
} else {
// Have to unwrap the CompletionException which allOf() introduced
result.completeExceptionally(ex.getCause());
}
});
return result;
}
|
Returns a new KafkaFuture that is completed when all the given futures have completed. If
any future throws an exception, the returned future returns it. If multiple futures throw
an exception, which one gets returned is arbitrarily chosen.
|
java
|
clients/src/main/java/org/apache/kafka/common/KafkaFuture.java
| 72
|
[] | true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
addCodesToBuffer
|
function addCodesToBuffer(codes: number): void {
// NUL is ignored, this is used for archaic characters to avoid using a Map
// for the data
if (codes === AsciiCode.NUL) {
return;
}
// Number stored in format: OptionalThirdCode << 16 | OptionalSecondCode << 8 | Code
codeBuffer[codeBufferLength++] = codes & 0xFF;
if (codes >> 8) {
codeBuffer[codeBufferLength++] = (codes >> 8) & 0xFF;
}
if (codes >> 16) {
codeBuffer[codeBufferLength++] = (codes >> 16) & 0xFF;
}
}
|
Gets alternative Korean characters for the character code. This will return the ascii
character code(s) that a Hangul character may have been input with using a qwerty layout.
This only aims to cover modern (not archaic) Hangul syllables.
@param code The character code to get alternate characters for
|
typescript
|
src/vs/base/common/naturalLanguage/korean.ts
| 101
|
[
"codes"
] | true
| 4
| 7.2
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
soarDistance
|
public static float soarDistance(float[] v1, float[] centroid, float[] originalResidual, float soarLambda, float rnorm) {
if (v1.length != centroid.length) {
throw new IllegalArgumentException("vector dimensions differ: " + v1.length + "!=" + centroid.length);
}
if (originalResidual.length != v1.length) {
throw new IllegalArgumentException("vector dimensions differ: " + originalResidual.length + "!=" + v1.length);
}
return IMPL.soarDistance(v1, centroid, originalResidual, soarLambda, rnorm);
}
|
calculates the soar distance for a vector and a centroid
@param v1 the vector
@param centroid the centroid
@param originalResidual the residual with the actually nearest centroid
@param soarLambda the lambda parameter
@param rnorm distance to the nearest centroid
@return the soar distance
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
| 277
|
[
"v1",
"centroid",
"originalResidual",
"soarLambda",
"rnorm"
] | true
| 3
| 7.12
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
toBooleanDefaultIfNull
|
public static boolean toBooleanDefaultIfNull(final Boolean bool, final boolean valueIfNull) {
if (bool == null) {
return valueIfNull;
}
return bool.booleanValue();
}
|
Converts a Boolean to a boolean handling {@code null}.
<pre>
BooleanUtils.toBooleanDefaultIfNull(Boolean.TRUE, false) = true
BooleanUtils.toBooleanDefaultIfNull(Boolean.TRUE, true) = true
BooleanUtils.toBooleanDefaultIfNull(Boolean.FALSE, true) = false
BooleanUtils.toBooleanDefaultIfNull(Boolean.FALSE, false) = false
BooleanUtils.toBooleanDefaultIfNull(null, true) = true
BooleanUtils.toBooleanDefaultIfNull(null, false) = false
</pre>
@param bool the boolean object to convert to primitive
@param valueIfNull the boolean value to return if the parameter {@code bool} is {@code null}
@return {@code true} or {@code false}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 562
|
[
"bool",
"valueIfNull"
] | true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkBigIntSuffix
|
function checkBigIntSuffix(): SyntaxKind {
if (charCodeUnchecked(pos) === CharacterCodes.n) {
tokenValue += "n";
// Use base 10 instead of base 2 or base 8 for shorter literals
if (tokenFlags & TokenFlags.BinaryOrOctalSpecifier) {
tokenValue = parsePseudoBigInt(tokenValue) + "n";
}
pos++;
return SyntaxKind.BigIntLiteral;
}
else { // not a bigint, so can convert to number in simplified form
// Number() may not support 0b or 0o, so use parseInt() instead
const numericValue = tokenFlags & TokenFlags.BinarySpecifier
? parseInt(tokenValue.slice(2), 2) // skip "0b"
: tokenFlags & TokenFlags.OctalSpecifier
? parseInt(tokenValue.slice(2), 8) // skip "0o"
: +tokenValue;
tokenValue = "" + numericValue;
return SyntaxKind.NumericLiteral;
}
}
|
Sets the current 'tokenValue' and returns a NoSubstitutionTemplateLiteral or
a literal component of a TemplateExpression.
|
typescript
|
src/compiler/scanner.ts
| 1,869
|
[] | true
| 6
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getBoolean
|
public boolean getBoolean(String name) throws JSONException {
Object object = get(name);
Boolean result = JSON.toBoolean(object);
if (result == null) {
throw JSON.typeMismatch(name, object, "boolean");
}
return result;
}
|
Returns the value mapped by {@code name} if it exists and is a boolean or can be
coerced to a boolean.
@param name the name of the property
@return the value
@throws JSONException if the mapping doesn't exist or cannot be coerced to a
boolean.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 395
|
[
"name"
] | true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isInitialized
|
@Override
public boolean isInitialized() {
if (childInitializers.isEmpty()) {
return false;
}
return childInitializers.values().stream().allMatch(BackgroundInitializer::isInitialized);
}
|
Tests whether this all child {@code BackgroundInitializer} objects are initialized. Once initialized, always returns true.
@return Whether all child {@code BackgroundInitializer} objects instance are initialized. Once initialized, always returns true. If there are no child
{@code BackgroundInitializer} objects return false.
@since 3.14.0
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/MultiBackgroundInitializer.java
| 355
|
[] | true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
merge_commands
|
def merge_commands(
base_commands: list[CLICommand], commands_will_be_merged: list[CLICommand]
) -> list[CLICommand]:
"""
Merge group commands with existing commands which extends base_commands with will_be_merged commands.
Args:
base_commands: List of base commands to be extended.
commands_will_be_merged: List of group commands to be merged with base_commands.
Returns:
List of merged commands.
"""
merge_command_map = {}
new_commands: list[CLICommand] = []
for command in commands_will_be_merged:
if isinstance(command, ActionCommand):
new_commands.append(command)
if isinstance(command, GroupCommand):
merge_command_map[command.name] = command
merged_commands = []
# Common commands
for command in base_commands:
if command.name in merge_command_map.keys():
merged_command = merge_command_map[command.name]
if isinstance(command, GroupCommand):
# Merge common group command with existing group command
current_subcommands = list(command.subcommands)
current_subcommands.extend(list(merged_command.subcommands))
new_commands.append(
GroupCommand(
name=command.name,
help=command.help,
subcommands=current_subcommands,
api_operation=merged_command.api_operation,
description=merged_command.description,
epilog=command.epilog,
)
)
elif isinstance(command, ActionCommand):
new_commands.append(merged_command)
merged_commands.append(command.name)
else:
new_commands.append(command)
# Discrete commands
new_commands.extend(
[
merged_command
for merged_command in merge_command_map.values()
if merged_command.name not in merged_commands
]
)
return new_commands
|
Merge group commands with existing commands which extends base_commands with will_be_merged commands.
Args:
base_commands: List of base commands to be extended.
commands_will_be_merged: List of group commands to be merged with base_commands.
Returns:
List of merged commands.
|
python
|
airflow-ctl/src/airflowctl/ctl/cli_config.py
| 721
|
[
"base_commands",
"commands_will_be_merged"
] |
list[CLICommand]
| true
| 9
| 7.84
|
apache/airflow
| 43,597
|
google
| false
|
define
|
public ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation) {
return define(name, type, defaultValue, null, importance, documentation);
}
|
Define a new configuration with no special validation logic
@param name The name of the config parameter
@param type The type of the config
@param defaultValue The default value to use if this config isn't present
@param importance The importance of this config: is this something you will likely need to change.
@param documentation The documentation string for the config
@return This ConfigDef so you can chain calls
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 414
|
[
"name",
"type",
"defaultValue",
"importance",
"documentation"
] |
ConfigDef
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
_trigger_dag
|
def _trigger_dag(
dag_id: str,
dag_bag: DBDagBag,
*,
triggered_by: DagRunTriggeredByType,
triggering_user_name: str | None = None,
run_after: datetime | None = None,
run_id: str | None = None,
conf: dict | str | None = None,
logical_date: datetime | None = None,
replace_microseconds: bool = True,
session: Session = NEW_SESSION,
) -> DagRun | None:
"""
Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param triggered_by: the entity which triggers the dag_run
:param triggering_user_name: the user name who triggers the dag_run
:param run_after: the datetime before which dag cannot run
:param run_id: ID of the run
:param conf: configuration
:param logical_date: logical date of the run
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
if (dag := dag_bag.get_latest_version_of_dag(dag_id, session=session)) is None:
raise DagNotFound(f"Dag id {dag_id} not found")
run_after = run_after or timezone.coerce_datetime(timezone.utcnow())
coerced_logical_date: datetime | None = None
if logical_date:
if not timezone.is_localized(logical_date):
raise ValueError("The logical date should be localized")
if replace_microseconds:
logical_date = logical_date.replace(microsecond=0)
if dag.default_args and "start_date" in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and logical_date < min_dag_start_date:
raise ValueError(
f"Logical date [{logical_date.isoformat()}] should be >= start_date "
f"[{min_dag_start_date.isoformat()}] from DAG's default_args"
)
coerced_logical_date = timezone.coerce_datetime(logical_date)
data_interval: DataInterval | None = dag.timetable.infer_manual_data_interval(
run_after=timezone.coerce_datetime(run_after)
)
else:
data_interval = None
run_id = run_id or dag.timetable.generate_run_id(
run_type=DagRunType.MANUAL,
run_after=timezone.coerce_datetime(run_after),
data_interval=data_interval,
)
# This intentionally does not use 'session' in the current scope because it
# may be rolled back when this function exits with an exception (due to how
# provide_session is implemented). This would make the DagRun object in the
# DagRunAlreadyExists expire and unusable.
if dag_run := DagRun.find_duplicate(dag_id=dag_id, run_id=run_id):
raise DagRunAlreadyExists(dag_run)
run_conf = None
if conf:
run_conf = conf if isinstance(conf, dict) else json.loads(conf)
dag_run = dag.create_dagrun(
run_id=run_id,
logical_date=coerced_logical_date,
data_interval=data_interval,
run_after=run_after,
conf=run_conf,
run_type=DagRunType.MANUAL,
triggered_by=triggered_by,
triggering_user_name=triggering_user_name,
state=DagRunState.QUEUED,
session=session,
)
return dag_run
|
Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param triggered_by: the entity which triggers the dag_run
:param triggering_user_name: the user name who triggers the dag_run
:param run_after: the datetime before which dag cannot run
:param run_id: ID of the run
:param conf: configuration
:param logical_date: logical date of the run
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
|
python
|
airflow-core/src/airflow/api/common/trigger_dag.py
| 42
|
[
"dag_id",
"dag_bag",
"triggered_by",
"triggering_user_name",
"run_after",
"run_id",
"conf",
"logical_date",
"replace_microseconds",
"session"
] |
DagRun | None
| true
| 15
| 8.16
|
apache/airflow
| 43,597
|
sphinx
| false
|
put
|
private void put(Properties properties, LoggingSystemProperty property, @Nullable String value) {
if (StringUtils.hasLength(value)) {
properties.put(property.getEnvironmentVariableName(), value);
}
}
|
Apply log file details to {@code LOG_PATH} and {@code LOG_FILE} map entries.
@param properties the properties to apply to
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/LogFile.java
| 95
|
[
"properties",
"property",
"value"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getBreakContext
|
function getBreakContext(state, label) {
let context = state.breakContext;
while (context) {
if (label ? context.label === label : context.breakable) {
return context;
}
context = context.upper;
}
/* c8 ignore next */
return null;
}
|
Gets a context for a `break` statement.
@param {CodePathState} state A state to get.
@param {string} label The label of a `break` statement.
@returns {LoopContext|SwitchContext} A context for a `break` statement.
|
javascript
|
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-state.js
| 69
|
[
"state",
"label"
] | false
| 4
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
outputMethod
|
function outputMethod(quotePreference: QuotePreference, signature: Signature, modifiers: NodeArray<Modifier> | undefined, name: PropertyName, body?: Block): void {
const method = createSignatureDeclarationFromSignature(SyntaxKind.MethodDeclaration, context, quotePreference, signature, body, name, modifiers, optional && !!(preserveOptional & PreserveOptionalFlags.Method), enclosingDeclaration, importAdder) as MethodDeclaration;
if (method) addClassElement(method);
}
|
(#49811)
Note that there are cases in which the symbol declaration is not present. For example, in the code below both
`MappedIndirect.ax` and `MappedIndirect.ay` have no declaration node attached (due to their mapped-type
parent):
```ts
type Base = { ax: number; ay: string };
type BaseKeys = keyof Base;
type MappedIndirect = { [K in BaseKeys]: boolean };
```
In such cases, we assume the declaration to be a `PropertySignature`.
|
typescript
|
src/services/codefixes/helpers.ts
| 326
|
[
"quotePreference",
"signature",
"modifiers",
"name",
"body?"
] | true
| 3
| 7.04
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
validate
|
private void validate(String name, Map<String, Object> parsed, Map<String, ConfigValue> configs) {
if (!configKeys.containsKey(name)) {
return;
}
ConfigKey key = configKeys.get(name);
ConfigValue value = configs.get(name);
if (key.recommender != null) {
try {
List<Object> recommendedValues = key.recommender.validValues(name, parsed);
List<Object> originalRecommendedValues = value.recommendedValues();
if (!originalRecommendedValues.isEmpty()) {
Set<Object> originalRecommendedValueSet = new HashSet<>(originalRecommendedValues);
recommendedValues.removeIf(o -> !originalRecommendedValueSet.contains(o));
}
value.recommendedValues(recommendedValues);
value.visible(key.recommender.visible(name, parsed));
} catch (ConfigException e) {
value.addErrorMessage(e.getMessage());
}
}
configs.put(name, value);
for (String dependent: key.dependents) {
validate(dependent, parsed, configs);
}
}
|
Validate the current configuration values with the configuration definition.
@param props the current configuration values
@return List of Config, each Config contains the updated configuration information given
the current configuration values.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 667
|
[
"name",
"parsed",
"configs"
] |
void
| true
| 5
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
isMethodOnIntroducedInterface
|
protected final boolean isMethodOnIntroducedInterface(MethodInvocation mi) {
Boolean rememberedResult = this.rememberedMethods.get(mi.getMethod());
if (rememberedResult != null) {
return rememberedResult;
}
else {
// Work it out and cache it.
boolean result = implementsInterface(mi.getMethod().getDeclaringClass());
this.rememberedMethods.put(mi.getMethod(), result);
return result;
}
}
|
Is this method on an introduced interface?
@param mi the method invocation
@return whether the invoked method is on an introduced interface
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/IntroductionInfoSupport.java
| 94
|
[
"mi"
] | true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
none
|
static Options none() {
return of();
}
|
Factory method used if there are no expected options.
@return a new {@link Options} instance
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 244
|
[] |
Options
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
tryParseSemicolon
|
function tryParseSemicolon() {
if (!canParseSemicolon()) {
return false;
}
if (token() === SyntaxKind.SemicolonToken) {
// consume the semicolon if it was explicitly provided.
nextToken();
}
return true;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 2,577
|
[] | false
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
if (obj == this) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
@SuppressWarnings("unchecked") // OK because we checked the class above
final
Range<T> range = (Range<T>) obj;
return minimum.equals(range.minimum) &&
maximum.equals(range.maximum);
}
|
Compares this range to another object to test if they are equal.
<p>To be equal, the minimum and maximum values must be equal, which
ignores any differences in the comparator.</p>
@param obj the reference object with which to compare.
@return true if this object is equal.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 303
|
[
"obj"
] | true
| 5
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_assemble_from_unit_mappings
|
def _assemble_from_unit_mappings(
arg, errors: DateTimeErrorChoices, utc: bool
) -> Series:
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`
utc : bool
Whether to convert/localize timestamps to UTC.
Returns
-------
Series
"""
from pandas import (
DataFrame,
to_numeric,
to_timedelta,
)
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ["year", "month", "day"]
req = set(required) - set(unit_rev.keys())
if len(req):
_required = ",".join(sorted(req))
raise ValueError(
"to assemble mappings requires at least that "
f"[year, month, day] be specified: [{_required}] is missing"
)
# keys we don't recognize
excess = set(unit_rev.keys()) - set(_unit_map.values())
if len(excess):
_excess = ",".join(sorted(excess))
raise ValueError(
f"extra keys have been passed to the datetime assemblage: [{_excess}]"
)
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent prevision issues in case of float32 # GH#60506
if is_float_dtype(values.dtype):
values = values.astype("float64")
# prevent overflow in case of int8 or int16
if is_integer_dtype(values.dtype):
values = values.astype("int64")
return values
values = (
coerce(arg[unit_rev["year"]]) * 10000
+ coerce(arg[unit_rev["month"]]) * 100
+ coerce(arg[unit_rev["day"]])
)
try:
values = to_datetime(values, format="%Y%m%d", errors=errors, utc=utc)
except (TypeError, ValueError) as err:
raise ValueError(f"cannot assemble the datetimes: {err}") from err
units: list[UnitChoices] = ["h", "m", "s", "ms", "us", "ns"]
for u in units:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
except (TypeError, ValueError) as err:
raise ValueError(
f"cannot assemble the datetimes [{value}]: {err}"
) from err
return values
|
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'raise', 'coerce'}, default 'raise'
- If :const:`'raise'`, then invalid parsing will raise an exception
- If :const:`'coerce'`, then invalid parsing will be set as :const:`NaT`
utc : bool
Whether to convert/localize timestamps to UTC.
Returns
-------
Series
|
python
|
pandas/core/tools/datetimes.py
| 1,100
|
[
"arg",
"errors",
"utc"
] |
Series
| true
| 11
| 6.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_set_names
|
def _set_names(self, names, *, level=None) -> None:
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError("Names should be list-like for a MultiIndex")
names = list(names)
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = (self._get_level_number(lev) for lev in level)
# set the name
for lev, name in zip(level, names, strict=True):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError(
f"{type(self).__name__}.name must be a hashable type"
)
self._names[lev] = name
# If .levels has been accessed, the .name of each level in our cache
# will be stale.
self._reset_cache("levels")
|
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
|
python
|
pandas/core/indexes/multi.py
| 1,589
|
[
"self",
"names",
"level"
] |
None
| true
| 12
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
mask_or
|
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> import numpy as np
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False])
"""
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return _shrink_mask(m1) if shrink else m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if dtype1 != dtype2:
raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'")
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
|
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> import numpy as np
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False])
|
python
|
numpy/ma/core.py
| 1,748
|
[
"m1",
"m2",
"copy",
"shrink"
] | false
| 10
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
fromPairs
|
function fromPairs(pairs) {
var index = -1,
length = pairs == null ? 0 : pairs.length,
result = {};
while (++index < length) {
var pair = pairs[index];
result[pair[0]] = pair[1];
}
return result;
}
|
The inverse of `_.toPairs`; this method returns an object composed
from key-value `pairs`.
@static
@memberOf _
@since 4.0.0
@category Array
@param {Array} pairs The key-value pairs.
@returns {Object} Returns the new object.
@example
_.fromPairs([['a', 1], ['b', 2]]);
// => { 'a': 1, 'b': 2 }
|
javascript
|
lodash.js
| 7,496
|
[
"pairs"
] | false
| 3
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
compute
|
@Override
public O compute(final I arg) throws InterruptedException {
while (true) {
final Future<O> future = cache.computeIfAbsent(arg, mappingFunction);
try {
return future.get();
} catch (final CancellationException e) {
cache.remove(arg, future);
} catch (final ExecutionException e) {
if (recalculate) {
cache.remove(arg, future);
}
throw launderException(e.getCause());
}
}
}
|
This method will return the result of the calculation and cache it, if it has not previously been calculated.
<p>
This cache will also cache exceptions that occur during the computation if the {@code recalculate} parameter in the
constructor was set to {@code false}, or not set. Otherwise, if an exception happened on the previous calculation,
the method will attempt again to generate a value.
</p>
@param arg the argument for the calculation
@return the result of the calculation
@throws InterruptedException thrown if the calculation is interrupted
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/Memoizer.java
| 122
|
[
"arg"
] |
O
| true
| 5
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
strip
|
public static String strip(final String str) {
return strip(str, null);
}
|
Strips whitespace from the start and end of a String.
<p>
This is similar to {@link #trim(String)} but removes whitespace. Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<p>
A {@code null} input String returns {@code null}.
</p>
<pre>
StringUtils.strip(null) = null
StringUtils.strip("") = ""
StringUtils.strip(" ") = ""
StringUtils.strip("abc") = "abc"
StringUtils.strip(" abc") = "abc"
StringUtils.strip("abc ") = "abc"
StringUtils.strip(" abc ") = "abc"
StringUtils.strip(" ab c ") = "ab c"
</pre>
@param str the String to remove whitespace from, may be null.
@return the stripped String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,800
|
[
"str"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
check_parent_directory
|
def check_parent_directory(path: Path | str) -> None:
"""
Check if parent directory of a file exists, raise OSError if it does not
Parameters
----------
path: Path or str
Path to check parent directory of
"""
parent = Path(path).parent
if not parent.is_dir():
raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
|
Check if parent directory of a file exists, raise OSError if it does not
Parameters
----------
path: Path or str
Path to check parent directory of
|
python
|
pandas/io/common.py
| 609
|
[
"path"
] |
None
| true
| 2
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
configurePropertySources
|
protected void configurePropertySources(ConfigurableEnvironment environment, String[] args) {
MutablePropertySources sources = environment.getPropertySources();
if (!CollectionUtils.isEmpty(this.defaultProperties)) {
DefaultPropertiesPropertySource.addOrMerge(this.defaultProperties, sources);
}
if (this.addCommandLineProperties && args.length > 0) {
String name = CommandLinePropertySource.COMMAND_LINE_PROPERTY_SOURCE_NAME;
PropertySource<?> source = sources.get(name);
if (source != null) {
CompositePropertySource composite = new CompositePropertySource(name);
composite
.addPropertySource(new SimpleCommandLinePropertySource("springApplicationCommandLineArgs", args));
composite.addPropertySource(source);
sources.replace(name, composite);
}
else {
sources.addFirst(new SimpleCommandLinePropertySource(args));
}
}
environment.getPropertySources().addLast(new ApplicationInfoPropertySource(this.mainApplicationClass));
}
|
Add, remove or re-order any {@link PropertySource}s in this application's
environment.
@param environment this application's environment
@param args arguments passed to the {@code run} method
@see #configureEnvironment(ConfigurableEnvironment, String[])
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 513
|
[
"environment",
"args"
] |
void
| true
| 5
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
splittingIterator
|
private Iterator<String> splittingIterator(CharSequence sequence) {
return strategy.iterator(this, sequence);
}
|
Splits {@code sequence} into string components and makes them available through an {@link
Iterator}, which may be lazily evaluated. If you want an eagerly computed {@link List}, use
{@link #splitToList(CharSequence)}.
@param sequence the sequence of characters to split
@return an iteration over the segments split from the parameter
|
java
|
android/guava/src/com/google/common/base/Splitter.java
| 387
|
[
"sequence"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
parseOptions
|
public OptionSet parseOptions(String[] args) {
return parser.parse(args);
}
|
Parse command line arguments for this command.
@param args The string arguments passed to the command
@return A set of parsed options
|
java
|
libs/cli/src/main/java/org/elasticsearch/cli/Command.java
| 109
|
[
"args"
] |
OptionSet
| true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
stream
|
@Deprecated
public static <E> FailableStream<E> stream(final Collection<E> collection) {
return failableStream(collection);
}
|
Converts the given {@link Collection} into a {@link FailableStream}. This is basically a simplified, reduced version
of the {@link Stream} class, with the same underlying element stream, except that failable objects, like
{@link FailablePredicate}, {@link FailableFunction}, or {@link FailableConsumer} may be applied, instead of
{@link Predicate}, {@link Function}, or {@link Consumer}. The idea is to rewrite a code snippet like this:
<pre>
{@code
final List<O> list;
final Method m;
final Function<O, String> mapper = (o) -> {
try {
return (String) m.invoke(o);
} catch (Throwable t) {
throw Failable.rethrow(t);
}
};
final List<String> strList = list.stream().map(mapper).collect(Collectors.toList());
}
</pre>
as follows:
<pre>
{@code
final List<O> list;
final Method m;
final List<String> strList = Failable.stream(list.stream()).map((o) -> (String) m.invoke(o)).collect(Collectors.toList());
}
</pre>
While the second version may not be <em>quite</em> as efficient (because it depends on the creation of additional,
intermediate objects, of type FailableStream), it is much more concise, and readable, and meets the spirit of Lambdas
better than the first version.
@param <E> The streams element type.
@param collection The stream, which is being converted.
@return The {@link FailableStream}, which has been created by converting the stream.
@deprecated Use {@link #failableStream(Collection)}.
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 780
|
[
"collection"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
reorder_pre_hook_nodes_to_mimic_eager
|
def reorder_pre_hook_nodes_to_mimic_eager(self) -> None:
"""
Usage of AOTAutograd causes all the pre_hook nodes to get pushed to the
end of the graph. This differs from eager mode, which schedules them
right before their registered node execution. This pass attempts to
reorder the graph to mimic eager behavior.
"""
pre_hooks = []
for node in self.fx_tracer.graph.find_nodes(
op="call_function", target=call_hook
):
if node.kwargs.get("hook_type", None) != "pre_hook":
continue
pre_hooks.append(node)
for node in reversed(pre_hooks):
hook_getitem_node = node.args[0]
users = list(node.users.keys())
if len(users) == 0:
continue
# users are all getitem ops and they are used by same registered node
assert all(
user.op == "call_function" and user.target is operator.getitem
for user in users
)
registered_node = next(iter(users[0].users.keys()))
if registered_node is not node.next:
registered_node.prepend(hook_getitem_node)
registered_node.prepend(node)
for getitem in users:
registered_node.prepend(getitem)
|
Usage of AOTAutograd causes all the pre_hook nodes to get pushed to the
end of the graph. This differs from eager mode, which schedules them
right before their registered node execution. This pass attempts to
reorder the graph to mimic eager behavior.
|
python
|
torch/_dynamo/compiled_autograd.py
| 1,258
|
[
"self"
] |
None
| true
| 8
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
buildRequest
|
Collection<RequestAndKeys<K>> buildRequest(int brokerId, Set<K> keys);
|
Build the requests necessary for the given keys. The set of keys is derived by
{@link AdminApiDriver} during the lookup stage as the set of keys which all map
to the same destination broker. Handlers can choose to issue a single request for
all of the provided keys (see {@link Batched}), issue one request per key (see
{@link Unbatched}), or implement their own custom grouping logic if necessary.
@param brokerId the target brokerId for the request
@param keys the set of keys that should be handled by this request
@return a collection of {@link RequestAndKeys} for the requests containing the given keys
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiHandler.java
| 50
|
[
"brokerId",
"keys"
] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
register
|
static void register(BeanDefinitionRegistry registry) {
Assert.notNull(registry, "'registry' must not be null");
if (!registry.containsBeanDefinition(BEAN_NAME)) {
BeanDefinition definition = BeanDefinitionBuilder.rootBeanDefinition(BoundConfigurationProperties.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.getBeanDefinition();
registry.registerBeanDefinition(BEAN_NAME, definition);
}
}
|
Return the {@link BoundConfigurationProperties} from the given
{@link ApplicationContext} if it is available.
@param context the context to search
@return a {@link BoundConfigurationProperties} or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/BoundConfigurationProperties.java
| 81
|
[
"registry"
] |
void
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
andThen
|
default FailableLongConsumer<E> andThen(final FailableLongConsumer<E> after) {
Objects.requireNonNull(after);
return (final long t) -> {
accept(t);
after.accept(t);
};
}
|
Returns a composed {@link FailableLongConsumer} like {@link LongConsumer#andThen(LongConsumer)}.
@param after the operation to perform after this one.
@return a composed {@link FailableLongConsumer} like {@link LongConsumer#andThen(LongConsumer)}.
@throws NullPointerException if {@code after} is null
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongConsumer.java
| 62
|
[
"after"
] | true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
compare_or_regex_search
|
def compare_or_regex_search(
a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
) -> ArrayLike:
"""
Compare two array-like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array-like
b : scalar or regex pattern
regex : bool
mask : np.ndarray[bool]
Returns
-------
mask : array-like of bool
"""
if isna(b):
return ~mask
def _check_comparison_types(
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
) -> None:
"""
Raises an error if the two arrays (a,b) cannot be compared.
Otherwise, returns the comparison result as expected.
"""
if is_bool(result) and isinstance(a, np.ndarray):
type_names = [type(a).__name__, type(b).__name__]
type_names[0] = f"ndarray(dtype={a.dtype})"
raise TypeError(
f"Cannot compare types {type_names[0]!r} and {type_names[1]!r}"
)
if not regex or not should_use_regex(regex, b):
# TODO: should use missing.mask_missing?
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x))
if isinstance(x, str) and isinstance(b, (str, Pattern))
else False,
otypes=[bool],
)
# GH#32621 use mask to avoid comparing to NAs
if isinstance(a, np.ndarray) and mask is not None:
a = a[mask]
result = op(a)
if isinstance(result, np.ndarray):
# The shape of the mask can differ to that of the result
# since we may compare only a subset of a's or b's elements
tmp = np.zeros(mask.shape, dtype=np.bool_)
np.place(tmp, mask, result)
result = tmp
else:
result = op(a)
_check_comparison_types(result, a, b)
return result
|
Compare two array-like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array-like
b : scalar or regex pattern
regex : bool
mask : np.ndarray[bool]
Returns
-------
mask : array-like of bool
|
python
|
pandas/core/array_algos/replace.py
| 46
|
[
"a",
"b",
"regex",
"mask"
] |
ArrayLike
| true
| 13
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
remove_code_blocks
|
def remove_code_blocks(text: str) -> str:
"""Remove content within code blocks (```...``` and `...`) from text."""
text = re.sub(r"```.*?```", "", text, flags=re.DOTALL)
text = re.sub(r"`[^`]+`", "", text)
return text
|
Remove content within code blocks (```...``` and `...`) from text.
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 241
|
[
"text"
] |
str
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
unknown
| false
|
containsAny
|
public static boolean containsAny(final CharSequence cs, final CharSequence searchChars) {
if (searchChars == null) {
return false;
}
return containsAny(cs, CharSequenceUtils.toCharArray(searchChars));
}
|
Tests if the CharSequence contains any character in the given set of characters.
<p>
A {@code null} CharSequence will return {@code false}. A {@code null} search CharSequence will return {@code false}.
</p>
<pre>
StringUtils.containsAny(null, *) = false
StringUtils.containsAny("", *) = false
StringUtils.containsAny(*, null) = false
StringUtils.containsAny(*, "") = false
StringUtils.containsAny("zzabyycdxx", "za") = true
StringUtils.containsAny("zzabyycdxx", "by") = true
StringUtils.containsAny("zzabyycdxx", "zy") = true
StringUtils.containsAny("zzabyycdxx", "\tx") = true
StringUtils.containsAny("zzabyycdxx", "$.#yF") = true
StringUtils.containsAny("aba", "z") = false
</pre>
@param cs the CharSequence to check, may be null.
@param searchChars the chars to search for, may be null.
@return the {@code true} if any of the chars are found, {@code false} if no match or null input.
@since 2.4
@since 3.0 Changed signature from containsAny(String, String) to containsAny(CharSequence, CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,093
|
[
"cs",
"searchChars"
] | true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readFully
|
public static void readFully(InputStream in, byte[] b, int off, int len) throws IOException {
int read = read(in, b, off, len);
if (read != len) {
throw new EOFException(
"reached end of stream after reading " + read + " bytes; " + len + " bytes expected");
}
}
|
Attempts to read {@code len} bytes from the stream into the given array starting at {@code
off}, with the same behavior as {@link DataInput#readFully(byte[], int, int)}. Does not close
the stream.
@param in the input stream to read from.
@param b the buffer into which the data is read.
@param off an int specifying the offset into the data.
@param len an int specifying the number of bytes to read.
@throws EOFException if this stream reaches the end before reading all the bytes.
@throws IOException if an I/O error occurs.
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 813
|
[
"in",
"b",
"off",
"len"
] |
void
| true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
getBaseUrl
|
URL getBaseUrl() throws IOException {
// Find URL pointing to Finalizer.class file.
String finalizerPath = FINALIZER_CLASS_NAME.replace('.', '/') + ".class";
URL finalizerUrl = getClass().getClassLoader().getResource(finalizerPath);
if (finalizerUrl == null) {
throw new FileNotFoundException(finalizerPath);
}
// Find URL pointing to base of class path.
String urlString = finalizerUrl.toString();
if (!urlString.endsWith(finalizerPath)) {
throw new IOException("Unsupported path style: " + urlString);
}
urlString = urlString.substring(0, urlString.length() - finalizerPath.length());
return new URL(finalizerUrl, urlString);
}
|
Gets URL for base of path containing Finalizer.class.
|
java
|
android/guava/src/com/google/common/base/FinalizableReferenceQueue.java
| 350
|
[] |
URL
| true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
register
|
public static void register() {
String packages = System.getProperty(PROTOCOL_HANDLER_PACKAGES, "");
packages = (!packages.isEmpty() && !packages.contains(PACKAGE)) ? packages + "|" + PACKAGE : PACKAGE;
System.setProperty(PROTOCOL_HANDLER_PACKAGES, packages);
resetCachedUrlHandlers();
}
|
Register a {@literal 'java.protocol.handler.pkgs'} property so that a
{@link URLStreamHandler} will be located to deal with jar URLs.
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/Handlers.java
| 42
|
[] |
void
| true
| 3
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
betweenExclusive
|
public boolean betweenExclusive(final A b, final A c) {
return betweenOrderedExclusive(b, c) || betweenOrderedExclusive(c, b);
}
|
Tests if {@code (b < a < c)} or {@code (b > a > c)} where the {@code a} is object passed to {@link #is}.
@param b the object to compare to the base object
@param c the object to compare to the base object
@return true if the base object is between b and c and not equal to those
|
java
|
src/main/java/org/apache/commons/lang3/compare/ComparableUtils.java
| 65
|
[
"b",
"c"
] | true
| 2
| 8.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_nanquantile
|
def _nanquantile(
values: np.ndarray,
qs: npt.NDArray[np.float64],
*,
na_value,
mask: npt.NDArray[np.bool_],
interpolation: str,
):
"""
Wrapper for np.quantile that skips missing values.
Parameters
----------
values : np.ndarray[ndim=2] over which to find quantiles
qs : np.ndarray[float64] of quantile indices to find
na_value : scalar
value to return for empty or all-null values
mask : np.ndarray[bool]
locations in values that should be considered missing
interpolation : str
Returns
-------
quantiles : scalar or array
"""
if values.dtype.kind in "mM":
# need to cast to integer to avoid rounding errors in numpy
result = _nanquantile(
values.view("i8"),
qs=qs,
na_value=na_value.view("i8"),
mask=mask,
interpolation=interpolation,
)
# Note: we have to do `astype` and not view because in general we
# have float result at this point, not i8
return result.astype(values.dtype)
if mask.any():
# Caller is responsible for ensuring mask shape match
assert mask.shape == values.shape
result = [
_nanquantile_1d(val, m, qs, na_value, interpolation=interpolation)
for (val, m) in zip(list(values), list(mask), strict=True)
]
if values.dtype.kind == "f":
# preserve itemsize
result = np.asarray(result, dtype=values.dtype).T
else:
result = np.asarray(result).T
if (
result.dtype != values.dtype
and not mask.all()
and (result == result.astype(values.dtype, copy=False)).all()
):
# mask.all() will never get cast back to int
# e.g. values id integer dtype and result is floating dtype,
# only cast back to integer dtype if result values are all-integer.
result = result.astype(values.dtype, copy=False)
return result
else:
return np.quantile(
values,
qs,
axis=1,
# error: No overload variant of "percentile" matches argument types
# "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]",
# "int", "Dict[str, str]" [call-overload]
method=interpolation, # type: ignore[call-overload]
)
|
Wrapper for np.quantile that skips missing values.
Parameters
----------
values : np.ndarray[ndim=2] over which to find quantiles
qs : np.ndarray[float64] of quantile indices to find
na_value : scalar
value to return for empty or all-null values
mask : np.ndarray[bool]
locations in values that should be considered missing
interpolation : str
Returns
-------
quantiles : scalar or array
|
python
|
pandas/core/array_algos/quantile.py
| 155
|
[
"values",
"qs",
"na_value",
"mask",
"interpolation"
] | true
| 9
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
createInstance
|
@Override
@SuppressWarnings("unchecked")
protected List<Object> createInstance() {
if (this.sourceList == null) {
throw new IllegalArgumentException("'sourceList' is required");
}
List<Object> result = null;
if (this.targetListClass != null) {
result = BeanUtils.instantiateClass(this.targetListClass);
}
else {
result = new ArrayList<>(this.sourceList.size());
}
Class<?> valueType = null;
if (this.targetListClass != null) {
valueType = ResolvableType.forClass(this.targetListClass).asCollection().resolveGeneric();
}
if (valueType != null) {
TypeConverter converter = getBeanTypeConverter();
for (Object elem : this.sourceList) {
result.add(converter.convertIfNecessary(elem, valueType));
}
}
else {
result.addAll(this.sourceList);
}
return result;
}
|
Set the class to use for the target List. Can be populated with a fully
qualified class name when defined in a Spring application context.
<p>Default is a {@code java.util.ArrayList}.
@see java.util.ArrayList
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/ListFactoryBean.java
| 76
|
[] | true
| 5
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
normalizedValue
|
public double normalizedValue() {
return normalizationFactor > 0 ? (value() / normalizationFactor) : value();
}
|
Returns the normalized value. If no normalised factor has been specified
this method will return {@link #value()}
@return the normalized value
|
java
|
modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/Derivative.java
| 56
|
[] | true
| 2
| 7.68
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getInnerMostModuleDeclarationFromDottedModule
|
function getInnerMostModuleDeclarationFromDottedModule(moduleDeclaration: ModuleDeclaration): ModuleDeclaration | undefined {
if (moduleDeclaration.body!.kind === SyntaxKind.ModuleDeclaration) {
const recursiveInnerModule = getInnerMostModuleDeclarationFromDottedModule(moduleDeclaration.body as ModuleDeclaration);
return recursiveInnerModule || moduleDeclaration.body as ModuleDeclaration;
}
}
|
Transforms the body of a module declaration.
@param node The module declaration node.
|
typescript
|
src/compiler/transformers/ts.ts
| 2,243
|
[
"moduleDeclaration"
] | true
| 3
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
parseAddress
|
private InternetAddress parseAddress(String address) throws MessagingException {
InternetAddress[] parsed = InternetAddress.parse(address);
if (parsed.length != 1) {
throw new AddressException("Illegal address", address);
}
InternetAddress raw = parsed[0];
try {
return (getEncoding() != null ?
new InternetAddress(raw.getAddress(), raw.getPersonal(), getEncoding()) : raw);
}
catch (UnsupportedEncodingException ex) {
throw new MessagingException("Failed to parse embedded personal name to correct encoding", ex);
}
}
|
Validate all given mail addresses.
<p>The default implementation simply delegates to {@link #validateAddress}
for each address.
@param addresses the addresses to validate
@throws AddressException if validation failed
@see #validateAddress(InternetAddress)
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 730
|
[
"address"
] |
InternetAddress
| true
| 4
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
skipFully
|
public static void skipFully(InputStream in, long n) throws IOException {
long skipped = skipUpTo(in, n);
if (skipped < n) {
throw new EOFException(
"reached end of stream after skipping " + skipped + " bytes; " + n + " bytes expected");
}
}
|
Discards {@code n} bytes of data from the input stream. This method will block until the full
amount has been skipped. Does not close the stream.
@param in the input stream to read from
@param n the number of bytes to skip
@throws EOFException if this stream reaches the end before skipping all the bytes
@throws IOException if an I/O error occurs, or the stream does not support skipping
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 830
|
[
"in",
"n"
] |
void
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
get_loc
|
def get_loc(self, key):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
The key to check its location if it is present in the index.
Returns
-------
int if unique index, slice if monotonic index, else mask
Integer location, slice or boolean mask.
See Also
--------
Index.get_slice_bound : Calculate slice bound that corresponds to
given label.
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> unique_index = pd.Index(list("abc"))
>>> unique_index.get_loc("b")
1
>>> monotonic_index = pd.Index(list("abbc"))
>>> monotonic_index.get_loc("b")
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list("abcb"))
>>> non_monotonic_index.get_loc("b")
array([False, True, False, True])
"""
casted_key = self._maybe_cast_indexer(key)
try:
return self._engine.get_loc(casted_key)
except KeyError as err:
if isinstance(casted_key, slice) or (
isinstance(casted_key, abc.Iterable)
and any(isinstance(x, slice) for x in casted_key)
):
raise InvalidIndexError(key) from err
raise KeyError(key) from err
except TypeError:
# If we have a listlike key, _check_indexing_error will raise
# InvalidIndexError. Otherwise we fall through and re-raise
# the TypeError.
self._check_indexing_error(key)
raise
|
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
The key to check its location if it is present in the index.
Returns
-------
int if unique index, slice if monotonic index, else mask
Integer location, slice or boolean mask.
See Also
--------
Index.get_slice_bound : Calculate slice bound that corresponds to
given label.
Index.get_indexer : Computes indexer and mask for new index given
the current index.
Index.get_non_unique : Returns indexer and masks for new index given
the current index.
Index.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> unique_index = pd.Index(list("abc"))
>>> unique_index.get_loc("b")
1
>>> monotonic_index = pd.Index(list("abbc"))
>>> monotonic_index.get_loc("b")
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list("abcb"))
>>> non_monotonic_index.get_loc("b")
array([False, True, False, True])
|
python
|
pandas/core/indexes/base.py
| 3,603
|
[
"self",
"key"
] | false
| 4
| 7.36
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
strict
|
public boolean strict() {
return this.strict;
}
|
@return whether the filter is strict, i.e. only includes specified components
|
java
|
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaFilter.java
| 80
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
should_realize_on_reuse
|
def should_realize_on_reuse(self, users: int) -> bool:
"""
A heuristic to decide if we should realize a tensor
that is used multiple times.
"""
if users > 1 and isinstance(self.data, (Pointwise, Reduction)):
if is_cpu(self.data):
# Heuristic for realizing reused result of heavy ops on cpu
opcount = self.data.inner_fn_opcount()
heavy_ops = ["exp", "sigmoid"] # a list of heavy ops
if any(x in opcount.used_ops for x in heavy_ops):
return True
return (
self.num_reads() > config.realize_reads_threshold
or self.has_large_inner_fn()
)
return False
|
A heuristic to decide if we should realize a tensor
that is used multiple times.
|
python
|
torch/_inductor/ir.py
| 8,594
|
[
"self",
"users"
] |
bool
| true
| 6
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
clearAssignment
|
private void clearAssignment() {
if (subscriptions.hasAutoAssignedPartitions()) {
subscriptions.assignFromSubscribed(Collections.emptySet());
notifyAssignmentChange(Collections.emptySet());
}
currentAssignment = LocalAssignment.NONE;
clearPendingAssignmentsAndLocalNamesCache();
}
|
Clear the assigned partitions in the member subscription, pending assignments and metadata cache.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 499
|
[] |
void
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
field
|
public XContentBuilder field(String name, String value) throws IOException {
if (value == null) {
return nullField(name);
}
ensureNameNotNull(name);
generator.writeStringField(name, value);
return this;
}
|
@return the value of the "human readable" flag. When the value is equal to true,
some types of values are written in a format easier to read for a human.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 735
|
[
"name",
"value"
] |
XContentBuilder
| true
| 2
| 7.04
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
selectorProviderChecks
|
private static Stream<InstrumentationService.InstrumentationInfo> selectorProviderChecks() {
var selectorProviderClass = SelectorProvider.provider().getClass();
var instrumentation = new InstrumentationInfoFactory() {
@Override
public InstrumentationService.InstrumentationInfo of(String methodName, Class<?>... parameterTypes)
throws ClassNotFoundException, NoSuchMethodException {
return INSTRUMENTATION_SERVICE.lookupImplementationMethod(
SelectorProvider.class,
methodName,
selectorProviderClass,
EntitlementChecker.class,
"checkSelectorProvider" + Character.toUpperCase(methodName.charAt(0)) + methodName.substring(1),
parameterTypes
);
}
};
try {
return Stream.of(
instrumentation.of("inheritedChannel"),
instrumentation.of("openDatagramChannel"),
instrumentation.of("openDatagramChannel", java.net.ProtocolFamily.class),
instrumentation.of("openServerSocketChannel"),
instrumentation.of("openServerSocketChannel", java.net.ProtocolFamily.class),
instrumentation.of("openSocketChannel"),
instrumentation.of("openSocketChannel", java.net.ProtocolFamily.class)
);
} catch (NoSuchMethodException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
|
Initializes the dynamic (agent-based) instrumentation:
<ol>
<li>
Finds the version-specific subclass of {@link EntitlementChecker} to use
</li>
<li>
Builds the set of methods to instrument using {@link InstrumentationService#lookupMethods}
</li>
<li>
Augment this set “dynamically” using {@link InstrumentationService#lookupImplementationMethod}
</li>
<li>
Creates an {@link Instrumenter} via {@link InstrumentationService#newInstrumenter}, and adds a new {@link Transformer} (derived from
{@link java.lang.instrument.ClassFileTransformer}) that uses it. Transformers are invoked when a class is about to load, after its
bytes have been deserialized to memory but before the class is initialized.
</li>
<li>
Re-transforms all already loaded classes: we force the {@link Instrumenter} to run on classes that might have been already loaded
before entitlement initialization by calling the {@link java.lang.instrument.Instrumentation#retransformClasses} method on all
classes that were already loaded.
</li>
</ol>
<p>
The third step is needed as the JDK exposes some API through interfaces that have different (internal) implementations
depending on the JVM host platform. As we cannot instrument an interfaces, we find its concrete implementation.
A prime example is {@link FileSystemProvider}, which has different implementations (e.g. {@code UnixFileSystemProvider} or
{@code WindowsFileSystemProvider}). At runtime, we find the implementation class which is currently used by the JVM, and add
its methods to the set of methods to instrument. See e.g. {@link DynamicInstrumentation#fileSystemProviderChecks}.
</p>
@param inst the JVM instrumentation class instance
@param checkerInterface the interface to use to find methods to instrument and to use in the injected instrumentation code
@param verifyBytecode whether we should perform bytecode verification before and after instrumenting each method
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/DynamicInstrumentation.java
| 251
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
repeat
|
public static String repeat(final String repeat, final String separator, final int count) {
if (repeat == null || separator == null) {
return repeat(repeat, count);
}
// given that repeat(String, int) is quite optimized, better to rely on it than try and splice this into it
final String result = repeat(repeat + separator, count);
return Strings.CS.removeEnd(result, separator);
}
|
Repeats a String {@code repeat} times to form a new String, with a String separator injected each time.
<pre>
StringUtils.repeat(null, null, 2) = null
StringUtils.repeat(null, "x", 2) = null
StringUtils.repeat("", null, 0) = ""
StringUtils.repeat("", "", 2) = ""
StringUtils.repeat("", "x", 3) = "xx"
StringUtils.repeat("?", ", ", 3) = "?, ?, ?"
</pre>
@param repeat the String to repeat, may be null.
@param separator the String to inject, may be null.
@param count number of times to repeat str, negative treated as zero.
@return a new String consisting of the original String repeated, {@code null} if null String input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,119
|
[
"repeat",
"separator",
"count"
] |
String
| true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
decode
|
def decode(
self, encoding, errors: str = "strict", dtype: str | DtypeObj | None = None
):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
Specifies the encoding to be used.
errors : str, optional
Specifies the error handling scheme.
Possible values are those supported by :meth:`bytes.decode`.
dtype : str or dtype, optional
The dtype of the result. When not ``None``, must be either a string or
object dtype. When ``None``, the dtype of the result is determined by
``pd.options.future.infer_string``.
.. versionadded:: 2.3.0
Returns
-------
Series or Index
A Series or Index with decoded strings.
See Also
--------
Series.str.encode : Encodes strings into bytes in a Series/Index.
Examples
--------
For Series:
>>> ser = pd.Series([b"cow", b"123", b"()"])
>>> ser.str.decode("ascii")
0 cow
1 123
2 ()
dtype: str
"""
if dtype is not None and not is_string_dtype(dtype):
raise ValueError(f"dtype must be string or object, got {dtype=}")
if dtype is None and using_string_dtype():
dtype = "str"
# TODO: Add a similar _bytes interface.
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
arr = self._data.array
result = arr._str_map(f)
return self._wrap_result(result, dtype=dtype)
|
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
Specifies the encoding to be used.
errors : str, optional
Specifies the error handling scheme.
Possible values are those supported by :meth:`bytes.decode`.
dtype : str or dtype, optional
The dtype of the result. When not ``None``, must be either a string or
object dtype. When ``None``, the dtype of the result is determined by
``pd.options.future.infer_string``.
.. versionadded:: 2.3.0
Returns
-------
Series or Index
A Series or Index with decoded strings.
See Also
--------
Series.str.encode : Encodes strings into bytes in a Series/Index.
Examples
--------
For Series:
>>> ser = pd.Series([b"cow", b"123", b"()"])
>>> ser.str.decode("ascii")
0 cow
1 123
2 ()
dtype: str
|
python
|
pandas/core/strings/accessor.py
| 2,120
|
[
"self",
"encoding",
"errors",
"dtype"
] | true
| 7
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
_summary
|
def _summary(self, name=None) -> str:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
result = super()._summary(name=name)
if self.freq:
result += f"\nFreq: {self.freqstr}"
return result
|
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
|
python
|
pandas/core/indexes/datetimelike.py
| 351
|
[
"self",
"name"
] |
str
| true
| 2
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.