function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
equals
|
@Override
public boolean equals(@Nullable Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
InjectionPoint otherPoint = (InjectionPoint) other;
return (ObjectUtils.nullSafeEquals(this.field, otherPoint.field) &&
ObjectUtils.nullSafeEquals(this.methodParameter, otherPoint.methodParameter));
}
|
Return the wrapped annotated element.
<p>Note: In case of a method/constructor parameter, this exposes
the annotations declared on the method or constructor itself
(i.e. at the method/constructor level, not at the parameter level).
Use {@link #getAnnotations()} to obtain parameter-level annotations in
such a scenario, transparently with corresponding field annotations.
@return the Field / Method / Constructor as AnnotatedElement
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/InjectionPoint.java
| 176
|
[
"other"
] | true
| 5
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
get
|
@Override
public ConfigData get(String path, Set<String> keys) {
if (path != null && !path.isEmpty()) {
log.error("Path is not supported for EnvVarConfigProvider, invalid value '{}'", path);
throw new ConfigException("Path is not supported for EnvVarConfigProvider, invalid value '" + path + "'");
}
if (keys == null) {
return new ConfigData(filteredEnvVarMap);
}
Map<String, String> filteredData = new HashMap<>(filteredEnvVarMap);
filteredData.keySet().retainAll(keys);
return new ConfigData(filteredData);
}
|
@param path path, not used for environment variables
@param keys the keys whose values will be retrieved.
@return the configuration data.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/provider/EnvVarConfigProvider.java
| 93
|
[
"path",
"keys"
] |
ConfigData
| true
| 4
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
benchmark_utilization
|
def benchmark_utilization(
f,
input,
trace_folder,
optimize_ctx=None,
trace_file_name="tmp_chrome_trace",
num_runs=1,
):
"""
Benchmark the GPU Utilization and percent of time spent on matmul and convolution operations of
running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times.
It will produce a chrome trace file in trace_folder/trace_file_name.json
Example:
```
def f(a):
return a.sum()
a = torch.rand(2**20, device="cuda")
utilization, mm_conv_utilization = benchmark_utilization(
f, a, "tmp", trace_file_name="tmp_chrome_trace"
)
```
Args:
f: function to benchmark
input: input to :attr:`f`
trace_folder: name of the folder to store the chrome trace
optimize_ctx: the context in which f will run
trace_file_name: name of the dumped chrome trace file, default to "tmp_chrome_trace"
num_runs: number of times to run f, excluding the warm-up runs, default to 1.
Return:
tuple: (GPU Utilization, percent of time spent on matmul and convolution)
"""
isExist = os.path.exists(trace_folder)
if not isExist:
os.makedirs(trace_folder)
print("create folder " + trace_folder)
if optimize_ctx is None:
optimize_ctx = contextlib.nullcontext()
chrome_trace_file_name = os.path.join(trace_folder, trace_file_name + ".json")
total_length = dump_chrome_trace(
f,
input,
chrome_trace_file_name,
optimize_ctx,
[ProfilerActivity.CUDA],
num_runs=num_runs,
devices=["cuda"],
)
utilization, mm_conv_utilization = compute_utilization(
chrome_trace_file_name, total_length
)
return utilization, mm_conv_utilization
|
Benchmark the GPU Utilization and percent of time spent on matmul and convolution operations of
running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times.
It will produce a chrome trace file in trace_folder/trace_file_name.json
Example:
```
def f(a):
return a.sum()
a = torch.rand(2**20, device="cuda")
utilization, mm_conv_utilization = benchmark_utilization(
f, a, "tmp", trace_file_name="tmp_chrome_trace"
)
```
Args:
f: function to benchmark
input: input to :attr:`f`
trace_folder: name of the folder to store the chrome trace
optimize_ctx: the context in which f will run
trace_file_name: name of the dumped chrome trace file, default to "tmp_chrome_trace"
num_runs: number of times to run f, excluding the warm-up runs, default to 1.
Return:
tuple: (GPU Utilization, percent of time spent on matmul and convolution)
|
python
|
torch/_functorch/benchmark_utils.py
| 170
|
[
"f",
"input",
"trace_folder",
"optimize_ctx",
"trace_file_name",
"num_runs"
] | false
| 3
| 8.16
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
iscontiguous
|
def iscontiguous(self):
"""
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
"""
return self.flags['CONTIGUOUS']
|
Return a boolean indicating whether the data is contiguous.
Parameters
----------
None
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1, 2, 3])
>>> x.iscontiguous()
True
`iscontiguous` returns one of the flags of the masked array:
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : True
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
|
python
|
numpy/ma/core.py
| 4,948
|
[
"self"
] | false
| 1
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
writeSignatureFileIfNecessary
|
@Override
protected void writeSignatureFileIfNecessary(Map<String, Library> writtenLibraries, AbstractJarWriter writer)
throws IOException {
String sourceName = getSource().getName().toLowerCase(Locale.ROOT);
if ((sourceName.endsWith(".jar") || sourceName.endsWith(".war")) && hasSignedLibrary(writtenLibraries)) {
writer.writeEntry("META-INF/BOOT.SF", (entryWriter) -> {
});
}
}
|
Create a new {@link Repackager} instance.
@param source the source archive file to package
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Repackager.java
| 53
|
[
"writtenLibraries",
"writer"
] |
void
| true
| 4
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
ordered
|
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
See Also
--------
categories : An Index containing the unique categories allowed.
Examples
--------
>>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=True)
>>> cat_type.ordered
True
>>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=False)
>>> cat_type.ordered
False
"""
return self._ordered
|
Whether the categories have an ordered relationship.
See Also
--------
categories : An Index containing the unique categories allowed.
Examples
--------
>>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=True)
>>> cat_type.ordered
True
>>> cat_type = pd.CategoricalDtype(categories=["a", "b"], ordered=False)
>>> cat_type.ordered
False
|
python
|
pandas/core/dtypes/dtypes.py
| 655
|
[
"self"
] |
Ordered
| true
| 1
| 6.48
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
edgeCount
|
protected long edgeCount() {
long degreeSum = 0L;
for (N node : nodes()) {
degreeSum += degree(node);
}
// According to the degree sum formula, this is equal to twice the number of edges.
checkState((degreeSum & 1) == 0);
return degreeSum >>> 1;
}
|
Returns the number of edges in this graph; used to calculate the size of {@link Graph#edges()}.
This implementation requires O(|N|) time. Classes extending this one may manually keep track of
the number of edges as the graph is updated, and override this method for better performance.
|
java
|
android/guava/src/com/google/common/graph/AbstractBaseGraph.java
| 52
|
[] | true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
getSubtype
|
public final TypeToken<? extends T> getSubtype(Class<?> subclass) {
checkArgument(
!(runtimeType instanceof TypeVariable), "Cannot get subtype of type variable <%s>", this);
if (runtimeType instanceof WildcardType) {
return getSubtypeFromLowerBounds(subclass, ((WildcardType) runtimeType).getLowerBounds());
}
// unwrap array type if necessary
if (isArray()) {
return getArraySubtype(subclass);
}
// At this point, it's either a raw class or parameterized type.
checkArgument(
getRawType().isAssignableFrom(subclass), "%s isn't a subclass of %s", subclass, this);
Type resolvedTypeArgs = resolveTypeArgsForSubclass(subclass);
@SuppressWarnings("unchecked") // guarded by the isAssignableFrom() statement above
TypeToken<? extends T> subtype = (TypeToken<? extends T>) of(resolvedTypeArgs);
checkArgument(
subtype.isSubtypeOf(this), "%s does not appear to be a subtype of %s", subtype, this);
return subtype;
}
|
Returns subtype of {@code this} with {@code subclass} as the raw class. For example, if this is
{@code Iterable<String>} and {@code subclass} is {@code List}, {@code List<String>} is
returned.
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 430
|
[
"subclass"
] | true
| 3
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
add
|
public Member<T> add() {
return from((value) -> value);
}
|
Add a new member with access to the instance being written. The member is added
without a name, so one of the {@code Member.using(...)} methods must be used to
complete the configuration.
@return the added {@link Member} which may be configured further
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 249
|
[] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
handleCoordinatorReady
|
void handleCoordinatorReady() {
NodeApiVersions nodeApiVersions = transactionCoordinator != null ?
apiVersions.get(transactionCoordinator.idString()) :
null;
ApiVersion initProducerIdVersion = nodeApiVersions != null ?
nodeApiVersions.apiVersion(ApiKeys.INIT_PRODUCER_ID) :
null;
this.coordinatorSupportsBumpingEpoch = initProducerIdVersion != null &&
initProducerIdVersion.maxVersion() >= 3;
}
|
Check if the transaction is in the prepared state.
@return true if the current state is PREPARED_TRANSACTION
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,105
|
[] |
void
| true
| 4
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
to_series
|
def to_series(self, index=None, name: Hashable | None = None) -> Series:
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
See Also
--------
Index.to_frame : Convert an Index to a DataFrame.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(["Ant", "Bear", "Cow"], name="animal")
By default, the original index and original name is reused.
>>> idx.to_series()
animal
Ant Ant
Bear Bear
Cow Cow
Name: animal, dtype: object
To enforce a new index, specify new labels to ``index``:
>>> idx.to_series(index=[0, 1, 2])
0 Ant
1 Bear
2 Cow
Name: animal, dtype: object
To override the name of the resulting column, specify ``name``:
>>> idx.to_series(name="zoo")
animal
Ant Ant
Bear Bear
Cow Cow
Name: zoo, dtype: object
"""
from pandas import Series
if index is None:
index = self._view()
if name is None:
name = self.name
return Series(self._values.copy(), index=index, name=name)
|
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
See Also
--------
Index.to_frame : Convert an Index to a DataFrame.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(["Ant", "Bear", "Cow"], name="animal")
By default, the original index and original name is reused.
>>> idx.to_series()
animal
Ant Ant
Bear Bear
Cow Cow
Name: animal, dtype: object
To enforce a new index, specify new labels to ``index``:
>>> idx.to_series(index=[0, 1, 2])
0 Ant
1 Bear
2 Cow
Name: animal, dtype: object
To override the name of the resulting column, specify ``name``:
>>> idx.to_series(name="zoo")
animal
Ant Ant
Bear Bear
Cow Cow
Name: zoo, dtype: object
|
python
|
pandas/core/indexes/base.py
| 1,650
|
[
"self",
"index",
"name"
] |
Series
| true
| 3
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
asBiPredicate
|
public static <O1, O2> BiPredicate<O1, O2> asBiPredicate(final FailableBiPredicate<O1, O2, ?> predicate) {
return (input1, input2) -> test(predicate, input1, input2);
}
|
Converts the given {@link FailableBiPredicate} into a standard {@link BiPredicate}.
@param <O1> the type of the first argument used by the predicates
@param <O2> the type of the second argument used by the predicates
@param predicate a {@link FailableBiPredicate}
@return a standard {@link BiPredicate}
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 379
|
[
"predicate"
] | true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
adviceIncluded
|
public boolean adviceIncluded(@Nullable Advice advice) {
if (advice != null) {
for (Advisor advisor : this.advisors) {
if (advisor.getAdvice() == advice) {
return true;
}
}
}
return false;
}
|
Is the given advice included in any advisor within this proxy configuration?
@param advice the advice to check inclusion of
@return whether this advice instance is included
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 480
|
[
"advice"
] | true
| 3
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
maybeBindPrimitiveArgsFromPointcutExpression
|
private void maybeBindPrimitiveArgsFromPointcutExpression() {
int numUnboundPrimitives = countNumberOfUnboundPrimitiveArguments();
if (numUnboundPrimitives > 1) {
throw new AmbiguousBindingException("Found " + numUnboundPrimitives +
" unbound primitive arguments with no way to distinguish between them.");
}
if (numUnboundPrimitives == 1) {
// Look for arg variable and bind it if we find exactly one...
List<String> varNames = new ArrayList<>();
String[] tokens = StringUtils.tokenizeToStringArray(this.pointcutExpression, " ");
for (int i = 0; i < tokens.length; i++) {
if (tokens[i].equals("args") || tokens[i].startsWith("args(")) {
PointcutBody body = getPointcutBody(tokens, i);
i += body.numTokensConsumed;
maybeExtractVariableNamesFromArgs(body.text, varNames);
}
}
if (varNames.size() > 1) {
throw new AmbiguousBindingException("Found " + varNames.size() +
" candidate variable names but only one candidate binding slot when matching primitive args");
}
else if (varNames.size() == 1) {
// 1 primitive arg, and one candidate...
for (int i = 0; i < this.argumentTypes.length; i++) {
if (isUnbound(i) && this.argumentTypes[i].isPrimitive()) {
bindParameterName(i, varNames.get(0));
break;
}
}
}
}
}
|
Match up args against unbound arguments of primitive types.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAdviceParameterNameDiscoverer.java
| 641
|
[] |
void
| true
| 11
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
to_numpy
|
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value: object = lib.no_default,
) -> np.ndarray:
"""
Convert to a NumPy ndarray.
This is similar to :meth:`numpy.asarray`, but may provide additional control
over how the conversion is done.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
Returns
-------
numpy.ndarray
"""
result = np.asarray(self, dtype=dtype)
if copy or na_value is not lib.no_default:
result = result.copy()
elif self._readonly and astype_is_view(self.dtype, result.dtype):
# If the ExtensionArray is readonly, make the numpy array readonly too
result = result.view()
result.flags.writeable = False
if na_value is not lib.no_default:
result[self.isna()] = na_value # type: ignore[index]
return result
|
Convert to a NumPy ndarray.
This is similar to :meth:`numpy.asarray`, but may provide additional control
over how the conversion is done.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
Returns
-------
numpy.ndarray
|
python
|
pandas/core/arrays/base.py
| 607
|
[
"self",
"dtype",
"copy",
"na_value"
] |
np.ndarray
| true
| 6
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
resolve
|
@Override
public Object resolve(EvaluationContext context, String beanName) throws AccessException {
try {
return this.beanFactory.getBean(beanName);
}
catch (BeansException ex) {
throw new AccessException("Could not resolve bean reference against BeanFactory", ex);
}
}
|
Create a new {@code BeanFactoryResolver} for the given factory.
@param beanFactory the {@code BeanFactory} to resolve bean names against
|
java
|
spring-context/src/main/java/org/springframework/context/expression/BeanFactoryResolver.java
| 47
|
[
"context",
"beanName"
] |
Object
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fit
|
def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
Returns an instance of self.
"""
if copy_X is None:
copy_X = self.copy_X
X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True)
X, y, Xmean, ymean, _, _ = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=copy_X
)
Gram = self.precompute
alphas_, _, coef_path_, self.n_iter_ = lars_path(
X,
y,
Gram=Gram,
copy_X=copy_X,
copy_Gram=True,
alpha_min=0.0,
method="lasso",
verbose=self.verbose,
max_iter=self.max_iter,
eps=self.eps,
return_n_iter=True,
positive=self.positive,
)
n_samples = X.shape[0]
if self.criterion == "aic":
criterion_factor = 2
elif self.criterion == "bic":
criterion_factor = log(n_samples)
else:
raise ValueError(
f"criterion should be either bic or aic, got {self.criterion!r}"
)
residuals = y[:, np.newaxis] - np.dot(X, coef_path_)
residuals_sum_squares = np.sum(residuals**2, axis=0)
degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int)
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
degrees_of_freedom[k] = np.sum(mask)
self.alphas_ = alphas_
if self.noise_variance is None:
self.noise_variance_ = self._estimate_noise_variance(
X, y, positive=self.positive
)
else:
self.noise_variance_ = self.noise_variance
self.criterion_ = (
n_samples * np.log(2 * np.pi * self.noise_variance_)
+ residuals_sum_squares / self.noise_variance_
+ criterion_factor * degrees_of_freedom
)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean)
return self
|
Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
Returns an instance of self.
|
python
|
sklearn/linear_model/_least_angle.py
| 2,225
|
[
"self",
"X",
"y",
"copy_X"
] | false
| 9
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
toString
|
@Override
public String toString() {
return "Generation{" +
"generationId=" + generationId +
", memberId='" + memberId + '\'' +
", protocol='" + protocolName + '\'' +
'}';
}
|
@return true if this generation has a valid member id, false otherwise. A member might have an id before
it becomes part of a group generation.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 1,625
|
[] |
String
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
generateFixItHint
|
static FixItHint generateFixItHint(const FunctionDecl *Decl) {
// A fixit can be generated for functions of static storage class but
// otherwise the check cannot determine the appropriate function name prefix
// to use.
if (Decl->getStorageClass() != SC_Static)
return {};
const StringRef Name = Decl->getName();
std::string NewName = Decl->getName().str();
size_t Index = 0;
bool AtWordBoundary = true;
while (Index < NewName.size()) {
const char Ch = NewName[Index];
if (isalnum(Ch)) {
// Capitalize the first letter after every word boundary.
if (AtWordBoundary) {
NewName[Index] = toupper(NewName[Index]);
AtWordBoundary = false;
}
// Advance the index after every alphanumeric character.
Index++;
} else {
// Strip out any characters other than alphanumeric characters.
NewName.erase(Index, 1);
AtWordBoundary = true;
}
}
// Generate a fixit hint if the new name is different.
if (NewName != Name)
return FixItHint::CreateReplacement(
CharSourceRange::getTokenRange(SourceRange(Decl->getLocation())),
llvm::StringRef(NewName));
return {};
}
|
other cases the user must determine an appropriate name on their own.
|
cpp
|
clang-tools-extra/clang-tidy/google/FunctionNamingCheck.cpp
| 44
|
[] | true
| 7
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
createRound
|
function createRound(methodName) {
var func = Math[methodName];
return function(number, precision) {
number = toNumber(number);
precision = precision == null ? 0 : nativeMin(toInteger(precision), 292);
if (precision && nativeIsFinite(number)) {
// Shift with exponential notation to avoid floating-point issues.
// See [MDN](https://mdn.io/round#Examples) for more details.
var pair = (toString(number) + 'e').split('e'),
value = func(pair[0] + 'e' + (+pair[1] + precision));
pair = (toString(value) + 'e').split('e');
return +(pair[0] + 'e' + (+pair[1] - precision));
}
return func(number);
};
}
|
Creates a function like `_.round`.
@private
@param {string} methodName The name of the `Math` method to use when rounding.
@returns {Function} Returns the new round function.
|
javascript
|
lodash.js
| 5,515
|
[
"methodName"
] | false
| 4
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
visitVariableDeclarationWorker
|
function visitVariableDeclarationWorker(node: VariableDeclaration, exportedVariableStatement: boolean): VisitResult<VariableDeclaration> {
// If we are here it is because the name contains a binding pattern with a rest somewhere in it.
if (isBindingPattern(node.name) && node.name.transformFlags & TransformFlags.ContainsObjectRestOrSpread) {
return flattenDestructuringBinding(
node,
visitor,
context,
FlattenLevel.ObjectRest,
/*rval*/ undefined,
exportedVariableStatement,
);
}
return visitEachChild(node, visitor, context);
}
|
Visits a VariableDeclaration node with a binding pattern.
@param node A VariableDeclaration node.
|
typescript
|
src/compiler/transformers/es2018.ts
| 695
|
[
"node",
"exportedVariableStatement"
] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
center
|
public static String center(final String str, final int size) {
return center(str, size, ' ');
}
|
Centers a String in a larger String of size {@code size} using the space character (' ').
<p>
If the size is less than the String length, the original String is returned. A {@code null} String returns {@code null}. A negative size is treated as
zero.
</p>
<p>
Equivalent to {@code center(str, size, " ")}.
</p>
<pre>
StringUtils.center(null, *) = null
StringUtils.center("", 4) = " "
StringUtils.center("ab", -1) = "ab"
StringUtils.center("ab", 4) = " ab "
StringUtils.center("abcd", 2) = "abcd"
StringUtils.center("a", 4) = " a "
</pre>
@param str the String to center, may be null.
@param size the int size of new String, negative treated as zero.
@return centered String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 568
|
[
"str",
"size"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
rank
|
def rank(
values: ArrayLike,
axis: AxisInt = 0,
method: str = "average",
na_option: str = "keep",
ascending: bool = True,
pct: bool = False,
) -> npt.NDArray[np.float64]:
"""
Rank the values along a given axis.
Parameters
----------
values : np.ndarray or ExtensionArray
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
is_datetimelike = needs_i8_conversion(values.dtype)
values = _ensure_data(values)
if values.ndim == 1:
ranks = algos.rank_1d(
values,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
elif values.ndim == 2:
ranks = algos.rank_2d(
values,
axis=axis,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
|
Rank the values along a given axis.
Parameters
----------
values : np.ndarray or ExtensionArray
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
|
python
|
pandas/core/algorithms.py
| 1,061
|
[
"values",
"axis",
"method",
"na_option",
"ascending",
"pct"
] |
npt.NDArray[np.float64]
| true
| 4
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
charBufferOrNull
|
@Override
public CharBuffer charBufferOrNull() throws IOException {
if (currentToken() == Token.VALUE_NULL) {
return null;
}
return charBuffer();
}
|
Return the long that {@code stringValue} stores or throws an exception if the
stored value cannot be converted to a long that stores the exact same
value and {@code coerce} is false.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/support/AbstractXContentParser.java
| 286
|
[] |
CharBuffer
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
triu
|
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of an array with the elements below the `k`-th diagonal
zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
final two axes.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> import numpy as np
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
>>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
array([[[ 0, 1, 2, 3, 4],
[ 0, 6, 7, 8, 9],
[ 0, 0, 12, 13, 14],
[ 0, 0, 0, 18, 19]],
[[20, 21, 22, 23, 24],
[ 0, 26, 27, 28, 29],
[ 0, 0, 32, 33, 34],
[ 0, 0, 0, 38, 39]],
[[40, 41, 42, 43, 44],
[ 0, 46, 47, 48, 49],
[ 0, 0, 52, 53, 54],
[ 0, 0, 0, 58, 59]]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k - 1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
|
Upper triangle of an array.
Return a copy of an array with the elements below the `k`-th diagonal
zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
final two axes.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> import numpy as np
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
>>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
array([[[ 0, 1, 2, 3, 4],
[ 0, 6, 7, 8, 9],
[ 0, 0, 12, 13, 14],
[ 0, 0, 0, 18, 19]],
[[20, 21, 22, 23, 24],
[ 0, 26, 27, 28, 29],
[ 0, 0, 32, 33, 34],
[ 0, 0, 0, 38, 39]],
[[40, 41, 42, 43, 44],
[ 0, 46, 47, 48, 49],
[ 0, 0, 52, 53, 54],
[ 0, 0, 0, 58, 59]]])
|
python
|
numpy/lib/_twodim_base_impl.py
| 511
|
[
"m",
"k"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
argsort
|
def argsort(
self,
axis: Axis = 0,
kind: SortKind = "quicksort",
order: None = None,
stable: None = None,
) -> Series:
"""
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
stable : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
Examples
--------
>>> s = pd.Series([3, 2, 1])
>>> s.argsort()
0 2
1 1
2 0
dtype: int64
"""
if axis != -1:
# GH#54257 We allow -1 here so that np.argsort(series) works
self._get_axis_number(axis)
result = self.array.argsort(kind=kind)
res = self._constructor(
result, index=self.index, name=self.name, dtype=np.intp, copy=False
)
return res.__finalize__(self, method="argsort")
|
Return the integer indices that would sort the Series values.
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable algorithms.
order : None
Has no effect but is accepted for compatibility with numpy.
stable : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series[np.intp]
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort : Returns the indices that would sort this array.
Examples
--------
>>> s = pd.Series([3, 2, 1])
>>> s.argsort()
0 2
1 1
2 0
dtype: int64
|
python
|
pandas/core/series.py
| 3,864
|
[
"self",
"axis",
"kind",
"order",
"stable"
] |
Series
| true
| 2
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
transitionToJoining
|
public void transitionToJoining() {
if (state == MemberState.FATAL) {
log.warn("No action taken to join the group with the updated subscription because " +
"the member is in FATAL state");
return;
}
if (reconciliationInProgress) {
rejoinedWhileReconciliationInProgress = true;
}
resetEpoch();
transitionTo(MemberState.JOINING);
log.debug("Member {} will join the group on the next call to poll.", memberId);
clearPendingAssignmentsAndLocalNamesCache();
}
|
Transition to the {@link MemberState#JOINING} state, indicating that the member will
try to join the group on the next heartbeat request. This is expected to be invoked when
the user calls the subscribe API, or when the member wants to rejoin after getting fenced.
Visible for testing.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 528
|
[] |
void
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
getProxy
|
@Override
public Object getProxy(@Nullable ClassLoader classLoader) {
if (logger.isTraceEnabled()) {
logger.trace("Creating JDK dynamic proxy: " + this.advised.getTargetSource());
}
return Proxy.newProxyInstance(determineClassLoader(classLoader), this.cache.proxiedInterfaces, this);
}
|
Construct a new JdkDynamicAopProxy for the given AOP configuration.
@param config the AOP configuration as AdvisedSupport object
@throws AopConfigException if the config is invalid. We try to throw an informative
exception in this case, rather than let a mysterious failure happen later.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/JdkDynamicAopProxy.java
| 119
|
[
"classLoader"
] |
Object
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
containsKey
|
@Override
public boolean containsKey(@Nullable Object key) {
return map.containsKey(key);
}
|
Creates the collection of values for an explicitly provided key. By default, it simply calls
{@link #createCollection()}, which is the correct behavior for most implementations. The {@link
LinkedHashMultimap} class overrides it.
@param key key to associate with values in the collection
@return an empty collection of values
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 180
|
[
"key"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
shouldNotWaitForHeartbeatInterval
|
public boolean shouldNotWaitForHeartbeatInterval() {
return state == MemberState.ACKNOWLEDGING || state == MemberState.LEAVING || state == MemberState.JOINING;
}
|
@return True if the member should send heartbeat to the coordinator without waiting for
the interval.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 607
|
[] | true
| 3
| 8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_set_wrap_both
|
def _set_wrap_both(padded, axis, width_pair, original_period):
"""
Pad `axis` of `arr` with wrapped values.
Parameters
----------
padded : ndarray
Input array of arbitrary shape.
axis : int
Axis along which to pad `arr`.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
original_period : int
Original length of data on `axis` of `arr`.
Returns
-------
pad_amt : tuple of ints, length 2
New index positions of padding to do along the `axis`. If these are
both 0, padding is done in this dimension.
"""
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
# Avoid wrapping with only a subset of the original area by ensuring period
# can only be a multiple of the original area's length.
period = period // original_period * original_period
# If the current dimension of `arr` doesn't contain enough valid values
# (not part of the undefined pad area) we need to pad multiple times.
# Each time the pad area shrinks on both sides which is communicated with
# these variables.
new_left_pad = 0
new_right_pad = 0
if left_pad > 0:
# Pad with wrapped values on left side
# First slice chunk from left side of the non-pad area.
# Use min(period, left_pad) to ensure that chunk is not larger than
# pad area.
slice_end = left_pad + period
slice_start = slice_end - min(period, left_pad)
right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
right_chunk = padded[right_slice]
if left_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
new_left_pad = left_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(None, left_pad), axis)
padded[pad_area] = right_chunk
if right_pad > 0:
# Pad with wrapped values on right side
# First slice chunk from right side of the non-pad area.
# Use min(period, right_pad) to ensure that chunk is not larger than
# pad area.
slice_start = -right_pad - period
slice_end = slice_start + min(period, right_pad)
left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
left_chunk = padded[left_slice]
if right_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(
slice(-right_pad, -right_pad + period), axis)
new_right_pad = right_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
padded[pad_area] = left_chunk
return new_left_pad, new_right_pad
|
Pad `axis` of `arr` with wrapped values.
Parameters
----------
padded : ndarray
Input array of arbitrary shape.
axis : int
Axis along which to pad `arr`.
width_pair : (int, int)
Pair of widths that mark the pad area on both sides in the given
dimension.
original_period : int
Original length of data on `axis` of `arr`.
Returns
-------
pad_amt : tuple of ints, length 2
New index positions of padding to do along the `axis`. If these are
both 0, padding is done in this dimension.
|
python
|
numpy/lib/_arraypad_impl.py
| 394
|
[
"padded",
"axis",
"width_pair",
"original_period"
] | false
| 7
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
efficient_conv_bn_eval
|
def efficient_conv_bn_eval(
bn: nn.modules.batchnorm._BatchNorm, conv: nn.modules.conv._ConvNd, x: torch.Tensor
):
"""
Implementation based on https://arxiv.org/abs/2305.11624
"Efficient ConvBN Blocks for Transfer Learning and Beyond"
It leverages the associative law between convolution and affine transform,
i.e., normalize (weight conv feature) = (normalize weight) conv feature.
It works for Eval mode of ConvBN blocks during validation, and can be used
for **training** as well, but only if one sets `bn.training=False`. It
reduces memory footprint and computation cost, at the cost of slightly
reduced numerical stability.
Args:
bn (nn.modules.batchnorm._BatchNorm): a BatchNorm module.
conv (nn.modules.conv._ConvNd): a conv module
x (torch.Tensor): Input feature map.
"""
assert bn.running_var is not None
assert bn.running_mean is not None
# These lines of code are designed to deal with various cases
# like bn without affine transform, and conv without bias
weight_on_the_fly = conv.weight
if conv.bias is not None:
bias_on_the_fly = conv.bias
else:
bias_on_the_fly = torch.zeros_like(bn.running_var)
if bn.weight is not None:
bn_weight = bn.weight
else:
bn_weight = torch.ones_like(bn.running_var)
if bn.bias is not None:
bn_bias = bn.bias
else:
bn_bias = torch.zeros_like(bn.running_var)
# shape of [C_out, 1, 1, 1] in Conv2d
target_shape = [-1] + [1] * (conv.weight.ndim - 1)
if isinstance(conv, nn.modules.conv._ConvTransposeNd):
# for transposed conv, the C_out dimension should at index 1.
target_shape[:2] = [target_shape[1], target_shape[0]]
weight_coeff = torch.rsqrt(bn.running_var + bn.eps).reshape(target_shape)
# shape of [C_out, 1, 1, 1] in Conv2d
coefff_on_the_fly = bn_weight.view_as(weight_coeff) * weight_coeff
# shape of [C_out, C_in, k, k] in Conv2d
weight_on_the_fly = weight_on_the_fly * coefff_on_the_fly
# shape of [C_out] in Conv2d
bias_on_the_fly = bn_bias + coefff_on_the_fly.flatten() * (
bias_on_the_fly - bn.running_mean
)
input = x
params = {"weight": weight_on_the_fly, "bias": bias_on_the_fly}
output = functional_call(conv, params, input)
return output
|
Implementation based on https://arxiv.org/abs/2305.11624
"Efficient ConvBN Blocks for Transfer Learning and Beyond"
It leverages the associative law between convolution and affine transform,
i.e., normalize (weight conv feature) = (normalize weight) conv feature.
It works for Eval mode of ConvBN blocks during validation, and can be used
for **training** as well, but only if one sets `bn.training=False`. It
reduces memory footprint and computation cost, at the cost of slightly
reduced numerical stability.
Args:
bn (nn.modules.batchnorm._BatchNorm): a BatchNorm module.
conv (nn.modules.conv._ConvNd): a conv module
x (torch.Tensor): Input feature map.
|
python
|
torch/_inductor/fx_passes/efficient_conv_bn_eval.py
| 17
|
[
"bn",
"conv",
"x"
] | true
| 8
| 6.32
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
_check_set_output_transform_dataframe
|
def _check_set_output_transform_dataframe(
name,
transformer_orig,
*,
dataframe_lib,
is_supported_dataframe,
create_dataframe,
assert_frame_equal,
context,
):
"""Check that a transformer can output a DataFrame when requested.
The DataFrame implementation is specified through the parameters of this function.
Parameters
----------
name : str
The name of the transformer.
transformer_orig : estimator
The original transformer instance.
dataframe_lib : str
The name of the library implementing the DataFrame.
is_supported_dataframe : callable
A callable that takes a DataFrame instance as input and returns whether or
not it is supported by the dataframe library.
E.g. `lambda X: isintance(X, pd.DataFrame)`.
create_dataframe : callable
A callable taking as parameters `data`, `columns`, and `index` and returns
a callable. Be aware that `index` can be ignored. For example, polars dataframes
will ignore the index.
assert_frame_equal : callable
A callable taking 2 dataframes to compare if they are equal.
context : {"local", "global"}
Whether to use a local context by setting `set_output(...)` on the transformer
or a global context by using the `with config_context(...)`
"""
# Check transformer.set_output configures the output of transform="pandas".
tags = get_tags(transformer_orig)
if not tags.input_tags.two_d_array or tags.no_validation:
return
rng = np.random.RandomState(0)
transformer = clone(transformer_orig)
X = rng.uniform(size=(20, 5))
X = _enforce_estimator_tags_X(transformer_orig, X)
y = rng.randint(0, 2, size=20)
y = _enforce_estimator_tags_y(transformer_orig, y)
set_random_state(transformer)
feature_names_in = [f"col{i}" for i in range(X.shape[1])]
index = [f"index{i}" for i in range(X.shape[0])]
df = create_dataframe(X, columns=feature_names_in, index=index)
transformer_default = clone(transformer).set_output(transform="default")
outputs_default = _output_from_fit_transform(transformer_default, name, X, df, y)
if context == "local":
transformer_df = clone(transformer).set_output(transform=dataframe_lib)
context_to_use = nullcontext()
else: # global
transformer_df = clone(transformer)
context_to_use = config_context(transform_output=dataframe_lib)
try:
with context_to_use:
outputs_df = _output_from_fit_transform(transformer_df, name, X, df, y)
except ValueError as e:
# transformer does not support sparse data
capitalized_lib = dataframe_lib.capitalize()
error_message = str(e)
assert (
f"{capitalized_lib} output does not support sparse data." in error_message
or "The transformer outputs a scipy sparse matrix." in error_message
), e
return
for case in outputs_default:
_check_generated_dataframe(
name,
case,
index,
outputs_default[case],
outputs_df[case],
is_supported_dataframe,
create_dataframe,
assert_frame_equal,
)
|
Check that a transformer can output a DataFrame when requested.
The DataFrame implementation is specified through the parameters of this function.
Parameters
----------
name : str
The name of the transformer.
transformer_orig : estimator
The original transformer instance.
dataframe_lib : str
The name of the library implementing the DataFrame.
is_supported_dataframe : callable
A callable that takes a DataFrame instance as input and returns whether or
not it is supported by the dataframe library.
E.g. `lambda X: isintance(X, pd.DataFrame)`.
create_dataframe : callable
A callable taking as parameters `data`, `columns`, and `index` and returns
a callable. Be aware that `index` can be ignored. For example, polars dataframes
will ignore the index.
assert_frame_equal : callable
A callable taking 2 dataframes to compare if they are equal.
context : {"local", "global"}
Whether to use a local context by setting `set_output(...)` on the transformer
or a global context by using the `with config_context(...)`
|
python
|
sklearn/utils/estimator_checks.py
| 5,133
|
[
"name",
"transformer_orig",
"dataframe_lib",
"is_supported_dataframe",
"create_dataframe",
"assert_frame_equal",
"context"
] | false
| 7
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
getExitCodeFromMappedException
|
private int getExitCodeFromMappedException(@Nullable ConfigurableApplicationContext context, Throwable exception) {
if (context == null || !context.isActive()) {
return 0;
}
ExitCodeGenerators generators = new ExitCodeGenerators();
Collection<ExitCodeExceptionMapper> beans = context.getBeansOfType(ExitCodeExceptionMapper.class).values();
generators.addAll(exception, beans);
return generators.getExitCode();
}
|
Register that the given exception has been logged. By default, if the running in
the main thread, this method will suppress additional printing of the stacktrace.
@param exception the exception that was logged
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 902
|
[
"context",
"exception"
] | true
| 3
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
inverse
|
BiMap<V, K> inverse();
|
Returns the inverse view of this bimap, which maps each of this bimap's values to its
associated key. The two bimaps are backed by the same data; any changes to one will appear in
the other.
<p><b>Note:</b> There is no guaranteed correspondence between the iteration order of a bimap
and that of its inverse.
@return the inverse view of this bimap
|
java
|
android/guava/src/com/google/common/collect/BiMap.java
| 116
|
[] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
getMainPart
|
private MimeBodyPart getMainPart() throws MessagingException {
MimeMultipart mimeMultipart = getMimeMultipart();
MimeBodyPart bodyPart = null;
for (int i = 0; i < mimeMultipart.getCount(); i++) {
BodyPart bp = mimeMultipart.getBodyPart(i);
if (bp.getFileName() == null) {
bodyPart = (MimeBodyPart) bp;
}
}
if (bodyPart == null) {
MimeBodyPart mimeBodyPart = new MimeBodyPart();
mimeMultipart.addBodyPart(mimeBodyPart);
bodyPart = mimeBodyPart;
}
return bodyPart;
}
|
Set the given plain text and HTML text as alternatives, offering
both options to the email client. Requires multipart mode.
<p><b>NOTE:</b> Invoke {@link #addInline} <i>after</i> {@code setText};
else, mail readers might not be able to resolve inline references correctly.
@param plainText the plain text for the message
@param htmlText the HTML text for the message
@throws MessagingException in case of errors
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 850
|
[] |
MimeBodyPart
| true
| 4
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
addAotGeneratedEnvironmentPostProcessorIfNecessary
|
private void addAotGeneratedEnvironmentPostProcessorIfNecessary(List<EnvironmentPostProcessor> postProcessors,
SpringApplication springApplication) {
if (AotDetector.useGeneratedArtifacts()) {
ClassLoader classLoader = (springApplication.getResourceLoader() != null)
? springApplication.getResourceLoader().getClassLoader() : null;
Class<?> mainApplicationClass = springApplication.getMainApplicationClass();
Assert.state(mainApplicationClass != null, "mainApplicationClass not found");
String postProcessorClassName = mainApplicationClass.getName() + "__" + AOT_FEATURE_NAME;
if (ClassUtils.isPresent(postProcessorClassName, classLoader)) {
postProcessors.add(0, instantiateEnvironmentPostProcessor(postProcessorClassName, classLoader));
}
}
}
|
Factory method that creates an {@link EnvironmentPostProcessorApplicationListener}
with a specific {@link EnvironmentPostProcessorsFactory}.
@param postProcessorsFactory the environment post processor factory
@return an {@link EnvironmentPostProcessorApplicationListener} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorApplicationListener.java
| 160
|
[
"postProcessors",
"springApplication"
] |
void
| true
| 4
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
baseIsMap
|
function baseIsMap(value) {
return isObjectLike(value) && getTag(value) == mapTag;
}
|
The base implementation of `_.isMap` without Node.js optimizations.
@private
@param {*} value The value to check.
@returns {boolean} Returns `true` if `value` is a map, else `false`.
|
javascript
|
lodash.js
| 3,385
|
[
"value"
] | false
| 2
| 6
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_validate_tz_from_dtype
|
def _validate_tz_from_dtype(
dtype, tz: tzinfo | None, explicit_tz_none: bool = False
) -> tzinfo | None:
"""
If the given dtype is a DatetimeTZDtype, extract the implied
tzinfo object from it and check that it does not conflict with the given
tz.
Parameters
----------
dtype : dtype, str
tz : None, tzinfo
explicit_tz_none : bool, default False
Whether tz=None was passed explicitly, as opposed to lib.no_default.
Returns
-------
tz : consensus tzinfo
Raises
------
ValueError : on tzinfo mismatch
"""
if dtype is not None:
if isinstance(dtype, str):
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
except TypeError:
# Things like `datetime64[ns]`, which is OK for the
# constructors, but also nonsense, which should be validated
# but not by us. We *do* allow non-existent tz errors to
# go through
pass
dtz = getattr(dtype, "tz", None)
if dtz is not None:
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError("cannot supply both a tz and a dtype with a tz")
if explicit_tz_none:
raise ValueError("Cannot pass both a timezone-aware dtype and tz=None")
tz = dtz
if tz is not None and lib.is_np_dtype(dtype, "M"):
# We also need to check for the case where the user passed a
# tz-naive dtype (i.e. datetime64[ns])
if tz is not None and not timezones.tz_compare(tz, dtz):
raise ValueError(
"cannot supply both a tz and a "
"timezone-naive dtype (i.e. datetime64[ns])"
)
return tz
|
If the given dtype is a DatetimeTZDtype, extract the implied
tzinfo object from it and check that it does not conflict with the given
tz.
Parameters
----------
dtype : dtype, str
tz : None, tzinfo
explicit_tz_none : bool, default False
Whether tz=None was passed explicitly, as opposed to lib.no_default.
Returns
-------
tz : consensus tzinfo
Raises
------
ValueError : on tzinfo mismatch
|
python
|
pandas/core/arrays/datetimes.py
| 2,794
|
[
"dtype",
"tz",
"explicit_tz_none"
] |
tzinfo | None
| true
| 11
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
waiter
|
def waiter(
get_state_callable: Callable,
get_state_args: dict,
parse_response: list,
desired_state: set,
failure_states: set,
object_type: str,
action: str,
countdown: int | float | None = 25 * 60,
check_interval_seconds: int = 60,
) -> None:
"""
Call get_state_callable until it reaches the desired_state or the failure_states.
PLEASE NOTE: While not yet deprecated, we are moving away from this method
and encourage using the custom boto waiters as explained in
https://github.com/apache/airflow/tree/main/airflow/providers/amazon/aws/waiters
:param get_state_callable: A callable to run until it returns True
:param get_state_args: Arguments to pass to get_state_callable
:param parse_response: Dictionary keys to extract state from response of get_state_callable
:param desired_state: Wait until the getter returns this value
:param failure_states: A set of states which indicate failure and should throw an
exception if any are reached before the desired_state
:param object_type: Used for the reporting string. What are you waiting for? (application, job, etc.)
:param action: Used for the reporting string. What action are you waiting for? (created, deleted, etc.)
:param countdown: Number of seconds the waiter should wait for the desired state before timing out.
Defaults to 25 * 60 seconds. None = infinite.
:param check_interval_seconds: Number of seconds waiter should wait before attempting
to retry get_state_callable. Defaults to 60 seconds.
"""
while True:
state = get_state(get_state_callable(**get_state_args), parse_response)
if state in desired_state:
break
if state in failure_states:
raise AirflowException(f"{object_type.title()} reached failure state {state}.")
if countdown is None:
countdown = float("inf")
if countdown > check_interval_seconds:
countdown -= check_interval_seconds
log.info("Waiting for %s to be %s.", object_type.lower(), action.lower())
time.sleep(check_interval_seconds)
else:
message = f"{object_type.title()} still not {action.lower()} after the allocated time limit."
log.error(message)
raise RuntimeError(message)
|
Call get_state_callable until it reaches the desired_state or the failure_states.
PLEASE NOTE: While not yet deprecated, we are moving away from this method
and encourage using the custom boto waiters as explained in
https://github.com/apache/airflow/tree/main/airflow/providers/amazon/aws/waiters
:param get_state_callable: A callable to run until it returns True
:param get_state_args: Arguments to pass to get_state_callable
:param parse_response: Dictionary keys to extract state from response of get_state_callable
:param desired_state: Wait until the getter returns this value
:param failure_states: A set of states which indicate failure and should throw an
exception if any are reached before the desired_state
:param object_type: Used for the reporting string. What are you waiting for? (application, job, etc.)
:param action: Used for the reporting string. What action are you waiting for? (created, deleted, etc.)
:param countdown: Number of seconds the waiter should wait for the desired state before timing out.
Defaults to 25 * 60 seconds. None = infinite.
:param check_interval_seconds: Number of seconds waiter should wait before attempting
to retry get_state_callable. Defaults to 60 seconds.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/utils/waiter.py
| 30
|
[
"get_state_callable",
"get_state_args",
"parse_response",
"desired_state",
"failure_states",
"object_type",
"action",
"countdown",
"check_interval_seconds"
] |
None
| true
| 7
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
addBean
|
private static <B, T> void addBean(FormatterRegistry registry, B bean, @Nullable ResolvableType beanType,
Class<T> type, Consumer<B> standardRegistrar, @Nullable Runnable beanAdapterRegistrar) {
if (beanType != null && beanAdapterRegistrar != null
&& ResolvableType.forInstance(bean).as(type).hasUnresolvableGenerics()) {
beanAdapterRegistrar.run();
return;
}
standardRegistrar.accept(bean);
}
|
Add {@link Printer}, {@link Parser}, {@link Formatter}, {@link Converter},
{@link ConverterFactory}, {@link GenericConverter}, and beans from the specified
bean factory.
@param registry the service to register beans with
@param beanFactory the bean factory to get the beans from
@param qualifier the qualifier required on the beans or {@code null}
@return the beans that were added
@since 3.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/convert/ApplicationConversionService.java
| 389
|
[
"registry",
"bean",
"beanType",
"type",
"standardRegistrar",
"beanAdapterRegistrar"
] |
void
| true
| 4
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
autowireByName
|
protected void autowireByName(
String beanName, AbstractBeanDefinition mbd, BeanWrapper bw, MutablePropertyValues pvs) {
String[] propertyNames = unsatisfiedNonSimpleProperties(mbd, bw);
for (String propertyName : propertyNames) {
if (containsBean(propertyName)) {
Object bean = getBean(propertyName);
pvs.add(propertyName, bean);
registerDependentBean(propertyName, beanName);
if (logger.isTraceEnabled()) {
logger.trace("Added autowiring by name from bean name '" + beanName +
"' via property '" + propertyName + "' to bean named '" + propertyName + "'");
}
}
else {
if (logger.isTraceEnabled()) {
logger.trace("Not autowiring property '" + propertyName + "' of bean '" + beanName +
"' by name: no matching bean found");
}
}
}
}
|
Fill in any missing property values with references to
other beans in this factory if autowire is set to "byName".
@param beanName the name of the bean we're wiring up.
Useful for debugging messages; not used functionally.
@param mbd bean definition to update through autowiring
@param bw the BeanWrapper from which we can obtain information about the bean
@param pvs the PropertyValues to register wired objects with
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,474
|
[
"beanName",
"mbd",
"bw",
"pvs"
] |
void
| true
| 4
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
countOrNull
|
@Override
public Integer countOrNull() {
return count();
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 230
|
[] |
Integer
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
_check_engine
|
def _check_engine(engine: str | None) -> str:
"""
Make sure a valid engine is passed.
Parameters
----------
engine : str
String to validate.
Raises
------
KeyError
* If an invalid engine is passed.
ImportError
* If numexpr was requested but doesn't exist.
Returns
-------
str
Engine name.
"""
from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.computation.expressions import USE_NUMEXPR
if engine is None:
engine = "numexpr" if USE_NUMEXPR else "python"
if engine not in ENGINES:
valid_engines = list(ENGINES.keys())
raise KeyError(
f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
)
# TODO: validate this in a more general way (thinking of future engines
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == "numexpr" and not NUMEXPR_INSTALLED:
raise ImportError(
"'numexpr' is not installed or an unsupported version. Cannot use "
"engine='numexpr' for query/eval if 'numexpr' is not installed"
)
return engine
|
Make sure a valid engine is passed.
Parameters
----------
engine : str
String to validate.
Raises
------
KeyError
* If an invalid engine is passed.
ImportError
* If numexpr was requested but doesn't exist.
Returns
-------
str
Engine name.
|
python
|
pandas/core/computation/eval.py
| 38
|
[
"engine"
] |
str
| true
| 6
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
addInline
|
public void addInline(String contentId, @Nullable String inlineFilename, DataSource dataSource)
throws MessagingException {
Assert.notNull(contentId, "Content ID must not be null");
Assert.notNull(dataSource, "DataSource must not be null");
MimeBodyPart mimeBodyPart = new MimeBodyPart();
mimeBodyPart.setDisposition(Part.INLINE);
mimeBodyPart.setContentID("<" + contentId + ">");
mimeBodyPart.setDataHandler(new DataHandler(dataSource));
if (inlineFilename != null) {
try {
mimeBodyPart.setFileName(isEncodeFilenames() ?
MimeUtility.encodeText(inlineFilename) : inlineFilename);
}
catch (UnsupportedEncodingException ex) {
throw new MessagingException("Failed to encode inline filename", ex);
}
}
getMimeMultipart().addBodyPart(mimeBodyPart);
}
|
Add an inline element to the MimeMessage, taking the content from a
{@code jakarta.activation.DataSource} and assigning the provided
{@code inlineFileName} to the element.
<p>Note that the InputStream returned by the DataSource implementation
needs to be a <i>fresh one on each call</i>, as JavaMail will invoke
{@code getInputStream()} multiple times.
<p><b>NOTE:</b> Invoke {@code addInline} <i>after</i> {@link #setText};
else, mail readers might not be able to resolve inline references correctly.
@param contentId the content ID to use. Will end up as "Content-ID" header
in the body part, surrounded by angle brackets: for example, "myId" → "<myId>".
Can be referenced in HTML source via src="cid:myId" expressions.
@param inlineFilename the fileName to use for the inline element's part
@param dataSource the {@code jakarta.activation.DataSource} to take
the content from, determining the InputStream and the content type
@throws MessagingException in case of errors
@since 6.2
@see #addInline(String, java.io.File)
@see #addInline(String, org.springframework.core.io.Resource)
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 927
|
[
"contentId",
"inlineFilename",
"dataSource"
] |
void
| true
| 4
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
lazyValue
|
function lazyValue() {
var array = this.__wrapped__.value(),
dir = this.__dir__,
isArr = isArray(array),
isRight = dir < 0,
arrLength = isArr ? array.length : 0,
view = getView(0, arrLength, this.__views__),
start = view.start,
end = view.end,
length = end - start,
index = isRight ? end : (start - 1),
iteratees = this.__iteratees__,
iterLength = iteratees.length,
resIndex = 0,
takeCount = nativeMin(length, this.__takeCount__);
if (!isArr || (!isRight && arrLength == length && takeCount == length)) {
return baseWrapperValue(array, this.__actions__);
}
var result = [];
outer:
while (length-- && resIndex < takeCount) {
index += dir;
var iterIndex = -1,
value = array[index];
while (++iterIndex < iterLength) {
var data = iteratees[iterIndex],
iteratee = data.iteratee,
type = data.type,
computed = iteratee(value);
if (type == LAZY_MAP_FLAG) {
value = computed;
} else if (!computed) {
if (type == LAZY_FILTER_FLAG) {
continue outer;
} else {
break outer;
}
}
}
result[resIndex++] = value;
}
return result;
}
|
Extracts the unwrapped value from its lazy wrapper.
@private
@name value
@memberOf LazyWrapper
@returns {*} Returns the unwrapped value.
|
javascript
|
lodash.js
| 1,884
|
[] | false
| 15
| 6.64
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
parseModifiersForConstructorType
|
function parseModifiersForConstructorType(): NodeArray<Modifier> | undefined {
let modifiers: NodeArray<Modifier> | undefined;
if (token() === SyntaxKind.AbstractKeyword) {
const pos = getNodePos();
nextToken();
const modifier = finishNode(factoryCreateToken(SyntaxKind.AbstractKeyword), pos);
modifiers = createNodeArray<Modifier>([modifier], pos);
}
return modifiers;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,497
|
[] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
fchmodSync
|
function fchmodSync(fd, mode) {
if (permission.isEnabled()) {
throw new ERR_ACCESS_DENIED('fchmod API is disabled when Permission Model is enabled.');
}
binding.fchmod(
fd,
parseFileMode(mode, 'mode'),
);
}
|
Synchronously sets the permissions on the file.
@param {number} fd
@param {string | number} mode
@returns {void}
|
javascript
|
lib/fs.js
| 1,949
|
[
"fd",
"mode"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
initiateJoinGroup
|
private synchronized RequestFuture<ByteBuffer> initiateJoinGroup() {
// we store the join future in case we are woken up by the user after beginning the
// rebalance in the call to poll below. This ensures that we do not mistakenly attempt
// to rejoin before the pending rebalance has completed.
if (joinFuture == null) {
state = MemberState.PREPARING_REBALANCE;
// a rebalance can be triggered consecutively if the previous one failed,
// in this case we would not update the start time.
if (lastRebalanceStartMs == -1L)
lastRebalanceStartMs = time.milliseconds();
joinFuture = sendJoinGroupRequest();
joinFuture.addListener(new RequestFutureListener<>() {
@Override
public void onSuccess(ByteBuffer value) {
// do nothing since all the handler logic are in SyncGroupResponseHandler already
}
@Override
public void onFailure(RuntimeException e) {
// we handle failures below after the request finishes. if the join completes
// after having been woken up, the exception is ignored and we will rejoin;
// this can be triggered when either join or sync request failed
synchronized (AbstractCoordinator.this) {
sensors.failedRebalanceSensor.record();
}
}
});
}
return joinFuture;
}
|
Joins the group without starting the heartbeat thread.
If this function returns true, the state must always be in STABLE and heartbeat enabled.
If this function returns false, the state can be in one of the following:
* UNJOINED: got error response but times out before being able to re-join, heartbeat disabled
* PREPARING_REBALANCE: not yet received join-group response before timeout, heartbeat disabled
* COMPLETING_REBALANCE: not yet received sync-group response before timeout, heartbeat enabled
Visible for testing.
@param timer Timer bounding how long this method can block
@throws KafkaException if the callback throws exception
@return true iff the operation succeeded
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 566
|
[] | true
| 3
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
resolvePattern
|
private List<StandardConfigDataResource> resolvePattern(StandardConfigDataReference reference) {
List<StandardConfigDataResource> resolved = new ArrayList<>();
for (Resource resource : this.resourceLoader.getResources(reference.getResourceLocation(), ResourceType.FILE)) {
if (!resource.exists() && reference.isSkippable()) {
logSkippingResource(reference);
}
else {
resolved.add(createConfigResourceLocation(reference, resource));
}
}
return resolved;
}
|
Create a new {@link StandardConfigDataLocationResolver} instance.
@param logFactory the factory for loggers to use
@param binder a binder backed by the initial {@link Environment}
@param resourceLoader a {@link ResourceLoader} used to load resources
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataLocationResolver.java
| 327
|
[
"reference"
] | true
| 3
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
sendOffsetsForLeaderEpochRequestsAndValidatePositions
|
private void sendOffsetsForLeaderEpochRequestsAndValidatePositions(
Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate) {
final Map<Node, Map<TopicPartition, SubscriptionState.FetchPosition>> regrouped =
regroupFetchPositionsByLeader(partitionsToValidate);
long nextResetTimeMs = time.milliseconds() + requestTimeoutMs;
final List<NetworkClientDelegate.UnsentRequest> unsentRequests = new ArrayList<>();
regrouped.forEach((node, fetchPositions) -> {
if (node.isEmpty()) {
metadata.requestUpdate(true);
return;
}
NodeApiVersions nodeApiVersions = apiVersions.get(node.idString());
if (nodeApiVersions == null) {
networkClientDelegate.tryConnect(node);
return;
}
if (!hasUsableOffsetForLeaderEpochVersion(nodeApiVersions)) {
log.debug("Skipping validation of fetch offsets for partitions {} since the broker does not " +
"support the required protocol version (introduced in Kafka 2.3)",
fetchPositions.keySet());
for (TopicPartition partition : fetchPositions.keySet()) {
subscriptionState.completeValidation(partition);
}
return;
}
subscriptionState.setNextAllowedRetry(fetchPositions.keySet(), nextResetTimeMs);
CompletableFuture<OffsetsForLeaderEpochUtils.OffsetForEpochResult> partialResult =
buildOffsetsForLeaderEpochRequestToNode(node, fetchPositions, unsentRequests);
partialResult.whenComplete((offsetsResult, error) -> {
if (error == null) {
offsetFetcherUtils.onSuccessfulResponseForValidatingPositions(fetchPositions,
offsetsResult);
} else {
RuntimeException e;
if (error instanceof RuntimeException) {
e = (RuntimeException) error;
} else {
e = new RuntimeException("Unexpected failure in OffsetsForLeaderEpoch " +
"request for validating positions", error);
}
offsetFetcherUtils.onFailedResponseForValidatingPositions(fetchPositions, e);
}
});
});
requestsToSend.addAll(unsentRequests);
}
|
For each partition that needs validation, make an asynchronous request to get the end-offsets
for the partition with the epoch less than or equal to the epoch the partition last saw.
<p/>
Requests are grouped by Node for efficiency.
This also adds the request to the list of unsentRequests.
@param partitionsToValidate a map of topic-partition positions to validate
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 722
|
[
"partitionsToValidate"
] |
void
| true
| 6
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return "OffsetAndMetadata{" +
"offset=" + offset +
", leaderEpoch=" + leaderEpoch().orElse(null) +
", metadata='" + metadata + '\'' +
'}';
}
|
Get the leader epoch of the previously consumed record (if one is known). Log truncation is detected
if there exists a leader epoch which is larger than this epoch and begins at an offset earlier than
the committed offset.
@return the leader epoch or empty if not known
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java
| 119
|
[] |
String
| true
| 1
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
appendAll
|
public StrBuilder appendAll(final Iterator<?> it) {
if (it != null) {
it.forEachRemaining(this::append);
}
return this;
}
|
Appends each item in an iterator to the builder without any separators.
Appending a null iterator will have no effect.
Each object is appended using {@link #append(Object)}.
@param it the iterator to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 804
|
[
"it"
] |
StrBuilder
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
memory_usage
|
def memory_usage(self, index: bool = True, deep: bool = False) -> int:
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
244
"""
v = self._memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
|
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
244
|
python
|
pandas/core/series.py
| 5,829
|
[
"self",
"index",
"deep"
] |
int
| true
| 2
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
buildDefaultToString
|
private String buildDefaultToString() {
if (this.elements.canShortcutWithSource(ElementType.UNIFORM, ElementType.DASHED)) {
return this.elements.getSource().toString();
}
int elements = getNumberOfElements();
StringBuilder result = new StringBuilder(elements * 8);
for (int i = 0; i < elements; i++) {
boolean indexed = isIndexed(i);
if (!result.isEmpty() && !indexed) {
result.append('.');
}
if (indexed) {
result.append('[');
result.append(getElement(i, Form.ORIGINAL));
result.append(']');
}
else {
result.append(getElement(i, Form.DASHED));
}
}
return result.toString();
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 573
|
[] |
String
| true
| 6
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
computeToString
|
private String computeToString() {
StringBuilder builder = new StringBuilder().append(type).append('/').append(subtype);
if (!parameters.isEmpty()) {
builder.append("; ");
Multimap<String, String> quotedParameters =
Multimaps.transformValues(
parameters,
(String value) ->
(TOKEN_MATCHER.matchesAllOf(value) && !value.isEmpty())
? value
: escapeAndQuote(value));
PARAMETER_JOINER.appendTo(builder, quotedParameters.entries());
}
return builder.toString();
}
|
Returns the string representation of this media type in the format described in <a
href="http://www.ietf.org/rfc/rfc2045.txt">RFC 2045</a>.
|
java
|
android/guava/src/com/google/common/net/MediaType.java
| 1,238
|
[] |
String
| true
| 4
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
_find_manylinux_interpreters
|
def _find_manylinux_interpreters() -> list[str]:
"""Find Python interpreters in manylinux format (/opt/python/)."""
supported_versions = get_supported_python_versions()
interpreters = []
python_root = Path("/opt/python")
if not python_root.exists():
logger.warning("Path /opt/python does not exist, no interpreters found")
return []
# Find all python3 binaries in /opt/python/
python_binaries = list(python_root.glob("*/bin/python3"))
for python_path in python_binaries:
try:
# Check if it's PyPy (skip it)
version_output = run_cmd(
[str(python_path), "--version"], capture_output=True
)
version_string = version_output.stdout.decode("utf-8").strip()
if "PyPy" in version_string:
logger.debug("Skipping PyPy interpreter: %s", python_path)
continue
# Extract Python version (e.g., "Python 3.9.1" -> "3.9")
match = re.search(r"Python (\d+\.\d+)", version_string)
if not match:
logger.debug("Could not parse version from: %s", version_string)
continue
python_version = match.group(1)
# Check if this version is supported
if python_version in supported_versions:
interpreters.append(str(python_path))
logger.debug(
"Found supported Python %s at %s", python_version, python_path
)
else:
logger.debug(
"Python %s not in supported versions: %s",
python_version,
supported_versions,
)
except subprocess.CalledProcessError as e:
logger.debug("Failed to get version for %s: %s", python_path, e) # noqa:G200
continue
return interpreters
|
Find Python interpreters in manylinux format (/opt/python/).
|
python
|
tools/packaging/build_wheel.py
| 70
|
[] |
list[str]
| true
| 7
| 6.56
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
equals
|
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final BaseVersionRange that = (BaseVersionRange) other;
return Objects.equals(this.minKeyLabel, that.minKeyLabel) &&
this.minValue == that.minValue &&
Objects.equals(this.maxKeyLabel, that.maxKeyLabel) &&
this.maxValue == that.maxValue;
}
|
Raises an exception unless the following condition is met:
minValue >= 0 and maxValue >= 0 and maxValue >= minValue.
@param minKeyLabel Label for the min version key, that's used only to convert to/from a map.
@param minValue The minimum version value.
@param maxKeyLabel Label for the max version key, that's used only to convert to/from a map.
@param maxValue The maximum version value.
@throws IllegalArgumentException If any of the following conditions are true:
- (minValue < 0) OR (maxValue < 0) OR (maxValue < minValue).
- minKeyLabel is empty, OR, minKeyLabel is empty.
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/BaseVersionRange.java
| 109
|
[
"other"
] | true
| 7
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
parseLiteralLikeNode
|
function parseLiteralLikeNode(kind: SyntaxKind): LiteralLikeNode {
const pos = getNodePos();
const node = isTemplateLiteralKind(kind) ? factory.createTemplateLiteralLikeNode(kind, scanner.getTokenValue(), getTemplateLiteralRawText(kind), scanner.getTokenFlags() & TokenFlags.TemplateLiteralLikeFlags) :
// Note that theoretically the following condition would hold true literals like 009,
// which is not octal. But because of how the scanner separates the tokens, we would
// never get a token like this. Instead, we would get 00 and 9 as two separate tokens.
// We also do not need to check for negatives because any prefix operator would be part of a
// parent unary expression.
kind === SyntaxKind.NumericLiteral ? factoryCreateNumericLiteral(scanner.getTokenValue(), scanner.getNumericLiteralFlags()) :
kind === SyntaxKind.StringLiteral ? factoryCreateStringLiteral(scanner.getTokenValue(), /*isSingleQuote*/ undefined, scanner.hasExtendedUnicodeEscape()) :
isLiteralKind(kind) ? factoryCreateLiteralLikeNode(kind, scanner.getTokenValue()) :
Debug.fail();
if (scanner.hasExtendedUnicodeEscape()) {
node.hasExtendedUnicodeEscape = true;
}
if (scanner.isUnterminated()) {
node.isUnterminated = true;
}
nextToken();
return finishNode(node, pos);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,760
|
[
"kind"
] | true
| 7
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
update
|
@Override
protected void update(Sample sample, MetricConfig config, double value, long timeMs) {
HistogramSample hist = (HistogramSample) sample;
hist.histogram.record(value);
}
|
Return the computed frequency describing the number of occurrences of the values in the bucket for the given
center point, relative to the total number of occurrences in the samples.
@param config the metric configuration
@param now the current time in milliseconds
@param centerValue the value corresponding to the center point of the bucket
@return the frequency of the values in the bucket relative to the total number of samples
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java
| 166
|
[
"sample",
"config",
"value",
"timeMs"
] |
void
| true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
getTsconfigPath
|
async function getTsconfigPath(baseDirUri: vscode.Uri, pathValue: string, linkType: TsConfigLinkType): Promise<vscode.Uri | undefined> {
async function resolve(absolutePath: vscode.Uri): Promise<vscode.Uri> {
if (absolutePath.path.endsWith('.json') || await exists(absolutePath)) {
return absolutePath;
}
return absolutePath.with({
path: `${absolutePath.path}${linkType === TsConfigLinkType.References ? '/tsconfig.json' : '.json'}`
});
}
const isRelativePath = ['./', '../'].some(str => pathValue.startsWith(str));
if (isRelativePath) {
return resolve(vscode.Uri.joinPath(baseDirUri, pathValue));
}
if (pathValue.startsWith('/') || looksLikeAbsoluteWindowsPath(pathValue)) {
return resolve(vscode.Uri.file(pathValue));
}
// Otherwise resolve like a module
return resolveNodeModulesPath(baseDirUri, [
pathValue,
...pathValue.endsWith('.json') ? [] : [
`${pathValue}.json`,
`${pathValue}/tsconfig.json`,
]
]);
}
|
@returns Returns undefined in case of lack of result while trying to resolve from node_modules
|
typescript
|
extensions/typescript-language-features/src/languageFeatures/tsconfig.ts
| 164
|
[
"baseDirUri",
"pathValue",
"linkType"
] | true
| 8
| 6.56
|
microsoft/vscode
| 179,840
|
jsdoc
| true
|
|
save_to_buffer
|
def save_to_buffer(
string: str,
buf: FilePath | WriteBuffer[str] | None = None,
encoding: str | None = None,
) -> str | None:
"""
Perform serialization. Write to buf or return as string if buf is None.
"""
with _get_buffer(buf, encoding=encoding) as fd:
fd.write(string)
if buf is None:
# error: "WriteBuffer[str]" has no attribute "getvalue"
return fd.getvalue() # type: ignore[attr-defined]
return None
|
Perform serialization. Write to buf or return as string if buf is None.
|
python
|
pandas/io/formats/format.py
| 1,036
|
[
"string",
"buf",
"encoding"
] |
str | None
| true
| 2
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
_update_range_helper
|
def _update_range_helper(
self, node: int, start: int, end: int, left: int, right: int, value: T
) -> None:
"""
Helper method to update a range of values in the segment tree.
Args:
node: Current node index
start: Start index of the current segment
end: End index of the current segment
left: Start index of the range to update
right: End index of the range to update
value: Value to apply to the range
"""
# Push lazy updates before processing this node
self._push_lazy(node, start, end)
# No overlap
if start > right or end < left:
return
# Complete overlap
if start >= left and end <= right:
# Apply update to current node
self.lazy[node] = value
self._push_lazy(node, start, end)
return
# Partial overlap, recurse to children
mid = (start + end) // 2
left_child = 2 * node
right_child = 2 * node + 1
self._update_range_helper(left_child, start, mid, left, right, value)
self._update_range_helper(right_child, mid + 1, end, left, right, value)
# Update current node based on children
self.tree[node] = self.summary_op(self.tree[left_child], self.tree[right_child])
|
Helper method to update a range of values in the segment tree.
Args:
node: Current node index
start: Start index of the current segment
end: End index of the current segment
left: Start index of the range to update
right: End index of the range to update
value: Value to apply to the range
|
python
|
torch/_inductor/codegen/segmented_tree.py
| 119
|
[
"self",
"node",
"start",
"end",
"left",
"right",
"value"
] |
None
| true
| 5
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
onClose
|
private void onClose(JarFile jarFile) {
this.cache.remove(jarFile);
}
|
Reconnect to the {@link JarFile}, returning a replacement {@link URLConnection}.
@param jarFile the jar file
@param existingConnection the existing connection
@return a newly opened connection inhering the same {@code useCaches} value as the
existing connection
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
| 134
|
[
"jarFile"
] |
void
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
claimArg
|
private @Nullable String claimArg(Deque<String> args) {
if (this.valueDescription == null) {
return null;
}
if (this.optionalValue) {
String nextArg = args.peek();
return (nextArg != null && !nextArg.startsWith("--")) ? args.removeFirst() : null;
}
try {
return args.removeFirst();
}
catch (NoSuchElementException ex) {
throw new MissingValueException(this.name);
}
}
|
Return a description of the option.
@return the option description
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 315
|
[
"args"
] |
String
| true
| 6
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
build
|
public <K1 extends K, V1 extends V> Cache<K1, V1> build() {
checkWeightWithWeigher();
checkNonLoadingCache();
return new LocalCache.LocalManualCache<>(this);
}
|
Builds a cache which does not automatically load values when keys are requested.
<p>Consider {@link #build(CacheLoader)} instead, if it is feasible to implement a {@code
CacheLoader}.
<p>This method does not alter the state of this {@code CacheBuilder} instance, so it can be
invoked again to create multiple independent caches.
@return a cache having the requested features
@since 11.0
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 1,054
|
[] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
add_references
|
def add_references(self, mgr: BaseBlockManager) -> None:
"""
Adds the references from one manager to another. We assume that both
managers have the same block structure.
"""
if len(self.blocks) != len(mgr.blocks):
# If block structure changes, then we made a copy
return
for i, blk in enumerate(self.blocks):
blk.refs = mgr.blocks[i].refs
blk.refs.add_reference(blk)
|
Adds the references from one manager to another. We assume that both
managers have the same block structure.
|
python
|
pandas/core/internals/managers.py
| 318
|
[
"self",
"mgr"
] |
None
| true
| 3
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
fn
|
abstract long fn(long currentValue, long newValue);
|
Computes the function of current and new value. Subclasses should open-code this update
function for most uses, but the virtualized form is needed within retryUpdate.
@param currentValue the current value (of either base or a cell)
@param newValue the argument from a user update call
@return result of the update function
|
java
|
android/guava/src/com/google/common/cache/Striped64.java
| 175
|
[
"currentValue",
"newValue"
] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
compareTo
|
@Override
public int compareTo(final MutableBoolean other) {
return BooleanUtils.compare(this.value, other.value);
}
|
Compares this mutable to another in ascending order.
@param other the other mutable to compare to, not null
@return negative if this is less, zero if equal, positive if greater
where false is less than true
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableBoolean.java
| 91
|
[
"other"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
lexx
|
static Token[] lexx(final String format) {
final ArrayList<Token> list = new ArrayList<>(format.length());
boolean inLiteral = false;
// Although the buffer is stored in a Token, the Tokens are only
// used internally, so cannot be accessed by other threads
StringBuilder buffer = null;
Token previous = null;
boolean inOptional = false;
int optionalIndex = -1;
for (int i = 0; i < format.length(); i++) {
final char ch = format.charAt(i);
if (inLiteral && ch != '\'') {
buffer.append(ch); // buffer can't be null if inLiteral is true
continue;
}
String value = null;
switch (ch) {
// TODO: Need to handle escaping of '
case '[':
if (inOptional) {
throw new IllegalArgumentException("Nested optional block at index: " + i);
}
optionalIndex++;
inOptional = true;
break;
case ']':
if (!inOptional) {
throw new IllegalArgumentException("Attempting to close unopened optional block at index: " + i);
}
inOptional = false;
break;
case '\'':
if (inLiteral) {
buffer = null;
inLiteral = false;
} else {
buffer = new StringBuilder();
list.add(new Token(buffer, inOptional, optionalIndex));
inLiteral = true;
}
break;
case 'y':
value = y;
break;
case 'M':
value = M;
break;
case 'd':
value = d;
break;
case 'H':
value = H;
break;
case 'm':
value = m;
break;
case 's':
value = s;
break;
case 'S':
value = S;
break;
default:
if (buffer == null) {
buffer = new StringBuilder();
list.add(new Token(buffer, inOptional, optionalIndex));
}
buffer.append(ch);
}
if (value != null) {
if (previous != null && previous.getValue().equals(value)) {
previous.increment();
} else {
final Token token = new Token(value, inOptional, optionalIndex);
list.add(token);
previous = token;
}
buffer = null;
}
}
if (inLiteral) { // i.e. we have not found the end of the literal
throw new IllegalArgumentException("Unmatched quote in format: " + format);
}
if (inOptional) { // i.e. we have not found the end of the literal
throw new IllegalArgumentException("Unmatched optional in format: " + format);
}
return list.toArray(Token.EMPTY_ARRAY);
}
|
Parses a classic date format string into Tokens
@param format the format to parse, not null
@return array of Token[]
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 680
|
[
"format"
] | true
| 13
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
from_codes
|
def from_codes(
cls,
codes,
categories=None,
ordered=None,
dtype: Dtype | None = None,
validate: bool = True,
) -> Self:
"""
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
validate : bool, default True
If True, validate that the codes are valid for the dtype.
If False, don't validate that the codes are valid. Be careful about skipping
validation, as invalid codes can lead to severe problems, such as segfaults.
.. versionadded:: 2.1.0
Returns
-------
Categorical
See Also
--------
codes : The category codes of the categorical.
CategoricalIndex : An Index with an underlying ``Categorical``.
Examples
--------
>>> dtype = pd.CategoricalDtype(["a", "b"], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
['a', 'b', 'a', 'b']
Categories (2, str): ['a' < 'b']
"""
dtype = CategoricalDtype._from_values_or_dtype(
categories=categories, ordered=ordered, dtype=dtype
)
if dtype.categories is None:
msg = (
"The categories must be provided in 'categories' or "
"'dtype'. Both were None."
)
raise ValueError(msg)
if validate:
# beware: non-valid codes may segfault
codes = cls._validate_codes_for_dtype(codes, dtype=dtype)
return cls._simple_new(codes, dtype=dtype)
|
Make a Categorical type from codes and categories or dtype.
This constructor is useful if you already have codes and
categories/dtype and so do not need the (computation intensive)
factorization step, which is usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like of int
An integer array, where each integer points to a category in
categories or dtype.categories, or else is -1 for NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here, then they must be provided
in `dtype`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
validate : bool, default True
If True, validate that the codes are valid for the dtype.
If False, don't validate that the codes are valid. Be careful about skipping
validation, as invalid codes can lead to severe problems, such as segfaults.
.. versionadded:: 2.1.0
Returns
-------
Categorical
See Also
--------
codes : The category codes of the categorical.
CategoricalIndex : An Index with an underlying ``Categorical``.
Examples
--------
>>> dtype = pd.CategoricalDtype(["a", "b"], ordered=True)
>>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)
['a', 'b', 'a', 'b']
Categories (2, str): ['a' < 'b']
|
python
|
pandas/core/arrays/categorical.py
| 716
|
[
"cls",
"codes",
"categories",
"ordered",
"dtype",
"validate"
] |
Self
| true
| 3
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
asBinderOptionsSet
|
private Set<BinderOption> asBinderOptionsSet(BinderOption... options) {
return ObjectUtils.isEmpty(options) ? EnumSet.noneOf(BinderOption.class)
: EnumSet.copyOf(Arrays.asList(options));
}
|
Return a {@link Binder} backed by the contributors.
@param activationContext the activation context
@param filter a filter used to limit the contributors
@param options binder options to apply
@return a binder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributors.java
| 223
|
[] | true
| 2
| 7.52
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
compareIgnoreCase
|
@Deprecated
public static int compareIgnoreCase(final String str1, final String str2) {
return Strings.CI.compare(str1, str2);
}
|
Compares two Strings lexicographically, ignoring case differences, as per {@link String#compareToIgnoreCase(String)}, returning :
<ul>
<li>{@code int = 0}, if {@code str1} is equal to {@code str2} (or both {@code null})</li>
<li>{@code int < 0}, if {@code str1} is less than {@code str2}</li>
<li>{@code int > 0}, if {@code str1} is greater than {@code str2}</li>
</ul>
<p>
This is a {@code null} safe version of:
</p>
<pre>
str1.compareToIgnoreCase(str2)
</pre>
<p>
{@code null} value is considered less than non-{@code null} value. Two {@code null} references are considered equal. Comparison is case insensitive.
</p>
<pre>{@code
StringUtils.compareIgnoreCase(null, null) = 0
StringUtils.compareIgnoreCase(null , "a") < 0
StringUtils.compareIgnoreCase("a", null) > 0
StringUtils.compareIgnoreCase("abc", "abc") = 0
StringUtils.compareIgnoreCase("abc", "ABC") = 0
StringUtils.compareIgnoreCase("a", "b") < 0
StringUtils.compareIgnoreCase("b", "a") > 0
StringUtils.compareIgnoreCase("a", "B") < 0
StringUtils.compareIgnoreCase("A", "b") < 0
StringUtils.compareIgnoreCase("ab", "ABC") < 0
}</pre>
@param str1 the String to compare from.
@param str2 the String to compare to.
@return < 0, 0, > 0, if {@code str1} is respectively less, equal ou greater than {@code str2}, ignoring case differences.
@see #compareIgnoreCase(String, String, boolean)
@see String#compareToIgnoreCase(String)
@since 3.5
@deprecated Use {@link Strings#compare(String, String) Strings.CI.compare(String, String)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 907
|
[
"str1",
"str2"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
anom
|
def anom(self, axis=None, dtype=None):
"""
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> import numpy as np
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data=[-1., 0., 1.],
mask=False,
fill_value=1e+20)
"""
m = self.mean(axis, dtype)
if not axis:
return self - m
else:
return self - expand_dims(m, axis)
|
Compute the anomalies (deviations from the arithmetic mean)
along the given axis.
Returns an array of anomalies, with the same shape as the input and
where the arithmetic mean is computed along the given axis.
Parameters
----------
axis : int, optional
Axis over which the anomalies are taken.
The default is to use the mean of the flattened array as reference.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
See Also
--------
mean : Compute the mean of the array.
Examples
--------
>>> import numpy as np
>>> a = np.ma.array([1,2,3])
>>> a.anom()
masked_array(data=[-1., 0., 1.],
mask=False,
fill_value=1e+20)
|
python
|
numpy/ma/core.py
| 5,430
|
[
"self",
"axis",
"dtype"
] | false
| 3
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
bindModuleDeclaration
|
function bindModuleDeclaration(node: ModuleDeclaration) {
setExportContextFlag(node);
if (isAmbientModule(node)) {
if (hasSyntacticModifier(node, ModifierFlags.Export)) {
errorOnFirstToken(node, Diagnostics.export_modifier_cannot_be_applied_to_ambient_modules_and_module_augmentations_since_they_are_always_visible);
}
if (isModuleAugmentationExternal(node)) {
declareModuleSymbol(node);
}
else {
let pattern: string | Pattern | undefined;
if (node.name.kind === SyntaxKind.StringLiteral) {
const { text } = node.name;
pattern = tryParsePattern(text);
if (pattern === undefined) {
errorOnFirstToken(node.name, Diagnostics.Pattern_0_can_have_at_most_one_Asterisk_character, text);
}
}
const symbol = declareSymbolAndAddToSymbolTable(node, SymbolFlags.ValueModule, SymbolFlags.ValueModuleExcludes)!;
file.patternAmbientModules = append<PatternAmbientModule>(file.patternAmbientModules, pattern && !isString(pattern) ? { pattern, symbol } : undefined);
}
}
else {
const state = declareModuleSymbol(node);
if (state !== ModuleInstanceState.NonInstantiated) {
const { symbol } = node;
// if module was already merged with some function, class or non-const enum, treat it as non-const-enum-only
symbol.constEnumOnlyModule = (!(symbol.flags & (SymbolFlags.Function | SymbolFlags.Class | SymbolFlags.RegularEnum)))
// Current must be `const enum` only
&& state === ModuleInstanceState.ConstEnumOnly
// Can't have been set to 'false' in a previous merged symbol. ('undefined' OK)
&& symbol.constEnumOnlyModule !== false;
}
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,351
|
[
"node"
] | false
| 13
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
requiresDestruction
|
default boolean requiresDestruction(Object bean) {
return true;
}
|
Determine whether the given bean instance requires destruction by this
post-processor.
<p>The default implementation returns {@code true}. If a pre-5 implementation
of {@code DestructionAwareBeanPostProcessor} does not provide a concrete
implementation of this method, Spring silently assumes {@code true} as well.
@param bean the bean instance to check
@return {@code true} if {@link #postProcessBeforeDestruction} is supposed to
be called for this bean instance eventually, or {@code false} if not needed
@since 4.3
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/DestructionAwareBeanPostProcessor.java
| 57
|
[
"bean"
] | true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
asStoreDetails
|
private static JksSslStoreDetails asStoreDetails(JksSslBundleProperties.Store properties) {
return new JksSslStoreDetails(properties.getType(), properties.getProvider(), properties.getLocation(),
properties.getPassword());
}
|
Get an {@link SslBundle} for the given {@link JksSslBundleProperties}.
@param properties the source properties
@param resourceLoader the resource loader used to load content
@return an {@link SslBundle} instance
@since 3.3.5
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/PropertiesSslBundle.java
| 178
|
[
"properties"
] |
JksSslStoreDetails
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_get_relative_fileloc
|
def _get_relative_fileloc(self, filepath: str) -> str:
"""
Get the relative file location for a given filepath.
:param filepath: Absolute path to the file
:return: Relative path from bundle_path, or original filepath if no bundle_path
"""
if self.bundle_path:
return str(Path(filepath).relative_to(self.bundle_path))
return filepath
|
Get the relative file location for a given filepath.
:param filepath: Absolute path to the file
:return: Relative path from bundle_path, or original filepath if no bundle_path
|
python
|
airflow-core/src/airflow/dag_processing/dagbag.py
| 391
|
[
"self",
"filepath"
] |
str
| true
| 2
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
handleDirents
|
function handleDirents({ result, currentPath, context }) {
const { 0: names, 1: types } = result;
const { length } = names;
for (let i = 0; i < length; i++) {
// Avoid excluding symlinks, as they are not directories.
// Refs: https://github.com/nodejs/node/issues/52663
const fullPath = pathModule.join(currentPath, names[i]);
const dirent = getDirent(currentPath, names[i], types[i]);
ArrayPrototypePush(context.readdirResults, dirent);
if (dirent.isDirectory() || binding.internalModuleStat(fullPath) === 1) {
ArrayPrototypePush(context.pathsQueue, fullPath);
}
}
}
|
Synchronously creates a directory.
@param {string | Buffer | URL} path
@param {{
recursive?: boolean;
mode?: string | number;
} | number} [options]
@returns {string | void}
|
javascript
|
lib/fs.js
| 1,403
|
[] | false
| 4
| 7.28
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
toArray
|
@GwtIncompatible // reflection
public @Nullable V[][] toArray(Class<V> valueClass) {
@SuppressWarnings("unchecked") // TODO: safe?
@Nullable V[][] copy =
(@Nullable V[][]) Array.newInstance(valueClass, rowList.size(), columnList.size());
for (int i = 0; i < rowList.size(); i++) {
arraycopy(array[i], 0, copy[i], 0, array[i].length);
}
return copy;
}
|
Returns a two-dimensional array with the table contents. The row and column indices correspond
to the positions of the row and column in the iterables provided during table construction. If
the table lacks a mapping for a given row and column, the corresponding array element is null.
<p>Subsequent table changes will not modify the array, and vice versa.
@param valueClass class of values stored in the returned array
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 362
|
[
"valueClass"
] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
_unpack_nested_dtype
|
def _unpack_nested_dtype(other: Index) -> DtypeObj:
"""
When checking if our dtype is comparable with another, we need
to unpack CategoricalDtype to look at its categories.dtype.
Parameters
----------
other : Index
Returns
-------
np.dtype or ExtensionDtype
"""
dtype = other.dtype
if isinstance(dtype, CategoricalDtype):
# If there is ever a SparseIndex, this could get dispatched
# here too.
return dtype.categories.dtype
elif isinstance(dtype, ArrowDtype):
# GH 53617
import pyarrow as pa
if pa.types.is_dictionary(dtype.pyarrow_dtype):
other = other[:0].astype(ArrowDtype(dtype.pyarrow_dtype.value_type))
return other.dtype
|
When checking if our dtype is comparable with another, we need
to unpack CategoricalDtype to look at its categories.dtype.
Parameters
----------
other : Index
Returns
-------
np.dtype or ExtensionDtype
|
python
|
pandas/core/indexes/base.py
| 7,856
|
[
"other"
] |
DtypeObj
| true
| 4
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
mergeNewValues
|
private void mergeNewValues(double compression) {
if (totalWeight == 0 && unmergedWeight == 0) {
// seriously nothing to do
return;
}
if (unmergedWeight > 0) {
// note that we run the merge in reverse every other merge to avoid left-to-right bias in merging
merge(tempMean, tempWeight, tempUsed, order, unmergedWeight, useAlternatingSort & mergeCount % 2 == 1, compression);
mergeCount++;
tempUsed = 0;
unmergedWeight = 0;
}
}
|
Fully specified constructor. Normally only used for deserializing a buffer t-digest.
@param compression Compression factor
@param bufferSize Number of temporary centroids
@param size Size of main buffer
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java
| 290
|
[
"compression"
] |
void
| true
| 4
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
close
|
@Override
public void close() {
List<String> connections = new ArrayList<>(channels.keySet());
AtomicReference<Throwable> firstException = new AtomicReference<>();
Utils.closeAllQuietly(firstException, "release connections",
connections.stream().map(id -> (AutoCloseable) () -> close(id)).toArray(AutoCloseable[]::new));
// If there is any exception thrown in close(id), we should still be able
// to close the remaining objects, especially the sensors because keeping
// the sensors may lead to failure to start up the ReplicaFetcherThread if
// the old sensors with the same names has not yet been cleaned up.
Utils.closeQuietly(nioSelector, "nioSelector", firstException);
Utils.closeQuietly(sensors, "sensors", firstException);
Utils.closeQuietly(channelBuilder, "channelBuilder", firstException);
Throwable exception = firstException.get();
if (exception instanceof RuntimeException && !(exception instanceof SecurityException)) {
throw (RuntimeException) exception;
}
}
|
Close this selector and all associated connections
|
java
|
clients/src/main/java/org/apache/kafka/common/network/Selector.java
| 368
|
[] |
void
| true
| 3
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
public String toString() {
if (magic() > 0)
return String.format("Record(magic=%d, attributes=%d, compression=%s, crc=%d, %s=%d, key=%d bytes, value=%d bytes)",
magic(),
attributes(),
compressionType(),
checksum(),
timestampType(),
timestamp(),
key() == null ? 0 : key().limit(),
value() == null ? 0 : value().limit());
else
return String.format("Record(magic=%d, attributes=%d, compression=%s, crc=%d, key=%d bytes, value=%d bytes)",
magic(),
attributes(),
compressionType(),
checksum(),
key() == null ? 0 : key().limit(),
value() == null ? 0 : value().limit());
}
|
Get the underlying buffer backing this record instance.
@return the buffer
|
java
|
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
| 274
|
[] |
String
| true
| 6
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
initializeCjsConditions
|
function initializeCjsConditions() {
const userConditions = getOptionValue('--conditions');
const noAddons = getOptionValue('--no-addons');
const addonConditions = noAddons ? [] : ['node-addons'];
// TODO: Use this set when resolving pkg#exports conditions in loader.js.
cjsConditionsArray = [
'require',
'node',
...addonConditions,
...userConditions,
];
if (getOptionValue('--require-module')) {
cjsConditionsArray.push('module-sync');
}
ObjectFreeze(cjsConditionsArray);
cjsConditions = new SafeSet(cjsConditionsArray);
}
|
Define the conditions that apply to the CommonJS loader.
@returns {void}
|
javascript
|
lib/internal/modules/helpers.js
| 76
|
[] | false
| 3
| 7.12
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
send_callback
|
def send_callback(self, request: CallbackRequest) -> None:
"""
Send callback for execution.
Provides a default implementation which sends the callback to the `callback_sink` object.
:param request: Callback request to be executed.
"""
if not self.callback_sink:
raise ValueError("Callback sink is not ready.")
self.callback_sink.send(request)
|
Send callback for execution.
Provides a default implementation which sends the callback to the `callback_sink` object.
:param request: Callback request to be executed.
|
python
|
airflow-core/src/airflow/executors/base_executor.py
| 586
|
[
"self",
"request"
] |
None
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
handleCachedTransactionRequestResult
|
private TransactionalRequestResult handleCachedTransactionRequestResult(
Supplier<TransactionalRequestResult> transactionalRequestResultSupplier,
State nextState,
String operation
) {
ensureTransactional();
if (pendingTransition != null) {
if (pendingTransition.result.isAcked()) {
pendingTransition = null;
} else if (nextState != pendingTransition.state) {
throw new IllegalStateException("Cannot attempt operation `" + operation + "` "
+ "because the previous call to `" + pendingTransition.operation + "` "
+ "timed out and must be retried");
} else {
return pendingTransition.result;
}
}
TransactionalRequestResult result = transactionalRequestResultSupplier.get();
pendingTransition = new PendingStateTransition(result, nextState, operation);
return result;
}
|
Check if the transaction is in the prepared state.
@return true if the current state is PREPARED_TRANSACTION
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,263
|
[
"transactionalRequestResultSupplier",
"nextState",
"operation"
] |
TransactionalRequestResult
| true
| 4
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
getSwitchCaseDefaultOccurrences
|
function getSwitchCaseDefaultOccurrences(switchStatement: SwitchStatement): Node[] {
const keywords: Node[] = [];
pushKeywordIf(keywords, switchStatement.getFirstToken(), SyntaxKind.SwitchKeyword);
// Go through each clause in the switch statement, collecting the 'case'/'default' keywords.
forEach(switchStatement.caseBlock.clauses, clause => {
pushKeywordIf(keywords, clause.getFirstToken(), SyntaxKind.CaseKeyword, SyntaxKind.DefaultKeyword);
forEach(aggregateAllBreakAndContinueStatements(clause), statement => {
if (ownsBreakOrContinueStatement(switchStatement, statement)) {
pushKeywordIf(keywords, statement.getFirstToken(), SyntaxKind.BreakKeyword);
}
});
});
return keywords;
}
|
For lack of a better name, this function takes a throw statement and returns the
nearest ancestor that is a try-block (whose try statement has a catch clause),
function-block, or source file.
|
typescript
|
src/services/documentHighlights.ts
| 392
|
[
"switchStatement"
] | true
| 2
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
add
|
@Override
public void add(double x, long w) {
checkValue(x);
needsCompression = true;
if (x < min) {
min = x;
}
if (x > max) {
max = x;
}
int start = summary.floor(x);
if (start == NIL) {
start = summary.first();
}
if (start == NIL) { // empty summary
assert summary.isEmpty();
summary.add(x, w);
count = w;
} else {
double minDistance = Double.MAX_VALUE;
int lastNeighbor = NIL;
for (int neighbor = start; neighbor != NIL; neighbor = summary.next(neighbor)) {
double z = Math.abs(summary.mean(neighbor) - x);
if (z < minDistance) {
start = neighbor;
minDistance = z;
} else if (z > minDistance) {
// as soon as z increases, we have passed the nearest neighbor and can quit
lastNeighbor = neighbor;
break;
}
}
int closest = NIL;
double n = 0;
long sum = summary.headSum(start);
for (int neighbor = start; neighbor != lastNeighbor; neighbor = summary.next(neighbor)) {
assert minDistance == Math.abs(summary.mean(neighbor) - x);
double q = count == 1 ? 0.5 : (sum + (summary.count(neighbor) - 1) / 2.0) / (count - 1);
double k = 4 * count * q * (1 - q) / compression;
// this slightly clever selection method improves accuracy with lots of repeated points
// what it does is sample uniformly from all clusters that have room
if (summary.count(neighbor) + w <= k) {
n++;
if (gen.nextDouble() < 1 / n) {
closest = neighbor;
}
}
sum += summary.count(neighbor);
}
if (closest == NIL) {
summary.add(x, w);
} else {
// if the nearest point was not unique, then we may not be modifying the first copy
// which means that ordering can change
double centroid = summary.mean(closest);
long count = summary.count(closest);
centroid = weightedAverage(centroid, count, x, w);
count += w;
summary.update(closest, centroid, count);
}
count += w;
if (summary.size() > 20 * compression) {
// may happen in case of sequential points
compress();
}
}
}
|
Sets the seed for the RNG.
In cases where a predictable tree should be created, this function may be used to make the
randomness in this AVLTree become more deterministic.
@param seed The random seed to use for RNG purposes
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLTreeDigest.java
| 95
|
[
"x",
"w"
] |
void
| true
| 14
| 7.12
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
common_fill_value
|
def common_fill_value(a, b):
"""
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
"""
t1 = get_fill_value(a)
t2 = get_fill_value(b)
if t1 == t2:
return t1
return None
|
Return the common filling value of two masked arrays, if any.
If ``a.fill_value == b.fill_value``, return the fill value,
otherwise return None.
Parameters
----------
a, b : MaskedArray
The masked arrays for which to compare fill values.
Returns
-------
fill_value : scalar or None
The common fill value, or None.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([0, 1.], fill_value=3)
>>> y = np.ma.array([0, 1.], fill_value=3)
>>> np.ma.common_fill_value(x, y)
3.0
|
python
|
numpy/ma/core.py
| 586
|
[
"a",
"b"
] | false
| 2
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
endsWithElementsEqualTo
|
private boolean endsWithElementsEqualTo(ConfigurationPropertyName name) {
for (int i = this.elements.getSize() - 1; i >= 0; i--) {
if (elementDiffers(this.elements, name.elements, i)) {
return false;
}
}
return true;
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 385
|
[
"name"
] | true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
storeToXML
|
@Override
public void storeToXML(OutputStream out, String comments) throws IOException {
super.storeToXML(out, (this.omitComments ? null : comments));
}
|
Construct a new {@code SortedProperties} instance with properties populated
from the supplied {@link Properties} object and honoring the supplied
{@code omitComments} flag.
<p>Default properties from the supplied {@code Properties} object will
not be copied.
@param properties the {@code Properties} object from which to copy the
initial properties
@param omitComments {@code true} if comments should be omitted when
storing properties in a file
|
java
|
spring-context-indexer/src/main/java/org/springframework/context/index/processor/SortedProperties.java
| 111
|
[
"out",
"comments"
] |
void
| true
| 2
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_validate_scalar
|
def _validate_scalar(
self,
value,
*,
allow_listlike: bool = False,
unbox: bool = True,
):
"""
Validate that the input value can be cast to our scalar_type.
Parameters
----------
value : object
allow_listlike: bool, default False
When raising an exception, whether the message should say
listlike inputs are allowed.
unbox : bool, default True
Whether to unbox the result before returning. Note: unbox=False
skips the setitem compatibility check.
Returns
-------
self._scalar_type or NaT
"""
if isinstance(value, self._scalar_type):
pass
elif isinstance(value, str):
# NB: Careful about tzawareness
try:
value = self._scalar_from_string(value)
except ValueError as err:
msg = self._validation_error_message(value, allow_listlike)
raise TypeError(msg) from err
elif is_valid_na_for_dtype(value, self.dtype):
# GH#18295
value = NaT
elif isna(value):
# if we are dt64tz and value is dt64("NaT"), dont cast to NaT,
# or else we'll fail to raise in _unbox_scalar
msg = self._validation_error_message(value, allow_listlike)
raise TypeError(msg)
elif isinstance(value, self._recognized_scalars):
# error: Argument 1 to "Timestamp" has incompatible type "object"; expected
# "integer[Any] | float | str | date | datetime | datetime64"
value = self._scalar_type(value) # type: ignore[arg-type]
else:
msg = self._validation_error_message(value, allow_listlike)
raise TypeError(msg)
if not unbox:
# NB: In general NDArrayBackedExtensionArray will unbox here;
# this option exists to prevent a performance hit in
# TimedeltaIndex.get_loc
return value
return self._unbox_scalar(value)
|
Validate that the input value can be cast to our scalar_type.
Parameters
----------
value : object
allow_listlike: bool, default False
When raising an exception, whether the message should say
listlike inputs are allowed.
unbox : bool, default True
Whether to unbox the result before returning. Note: unbox=False
skips the setitem compatibility check.
Returns
-------
self._scalar_type or NaT
|
python
|
pandas/core/arrays/datetimelike.py
| 579
|
[
"self",
"value",
"allow_listlike",
"unbox"
] | true
| 8
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
get_dag_dependencies
|
def get_dag_dependencies(cls, session: Session = NEW_SESSION) -> dict[str, list[DagDependency]]:
"""
Get the dependencies between DAGs.
:param session: ORM Session
"""
load_json: Callable
data_col_to_select: ColumnElement[Any] | InstrumentedAttribute[bytes | None]
if COMPRESS_SERIALIZED_DAGS is False:
dialect = get_dialect_name(session)
if dialect in ["sqlite", "mysql"]:
data_col_to_select = func.json_extract(cls._data, "$.dag.dag_dependencies")
def load_json(deps_data):
return json.loads(deps_data) if deps_data else []
elif dialect == "postgresql":
# Use #> operator which works for both JSON and JSONB types
# Returns the JSON sub-object at the specified path
data_col_to_select = cls._data.op("#>")(literal('{"dag","dag_dependencies"}'))
load_json = lambda x: x
else:
data_col_to_select = func.json_extract_path(cls._data, "dag", "dag_dependencies")
load_json = lambda x: x
else:
data_col_to_select = cls._data_compressed
def load_json(deps_data):
return json.loads(zlib.decompress(deps_data))["dag"]["dag_dependencies"] if deps_data else []
latest_sdag_subquery = (
select(cls.dag_id, func.max(cls.created_at).label("max_created")).group_by(cls.dag_id).subquery()
)
query = session.execute(
select(cls.dag_id, data_col_to_select)
.join(
latest_sdag_subquery,
(cls.dag_id == latest_sdag_subquery.c.dag_id)
& (cls.created_at == latest_sdag_subquery.c.max_created),
)
.join(cls.dag_model)
.where(~DagModel.is_stale)
)
dag_depdendencies = [(str(dag_id), load_json(deps_data)) for dag_id, deps_data in query]
resolver = _DagDependenciesResolver(dag_id_dependencies=dag_depdendencies, session=session)
dag_depdendencies_by_dag = resolver.resolve()
return dag_depdendencies_by_dag
|
Get the dependencies between DAGs.
:param session: ORM Session
|
python
|
airflow-core/src/airflow/models/serialized_dag.py
| 612
|
[
"cls",
"session"
] |
dict[str, list[DagDependency]]
| true
| 8
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
getClassOrFunctionName
|
function getClassOrFunctionName(fn: Function, defaultName?: string) {
const isArrow = !fn.hasOwnProperty('prototype');
const isEmptyName = fn.name === '';
if ((isArrow && isEmptyName) || isEmptyName) {
return '[Function]';
}
const hasDefaultName = fn.name === defaultName;
if (hasDefaultName) {
return '[Function]';
}
return fn.name;
}
|
Get the display name for a function or class.
@param fn - The function or class to get the name from
@param defaultName - Optional name to check against. If the function name matches this value,
'[Function]' is returned instead
@returns The formatted name: class name, function name with '()', or '[Function]' for anonymous/arrow functions
|
typescript
|
devtools/projects/ng-devtools-backend/src/lib/router-tree.ts
| 191
|
[
"fn",
"defaultName?"
] | false
| 5
| 7.12
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
getDouble
|
public double getDouble(int index) throws JSONException {
Object object = get(index);
Double result = JSON.toDouble(object);
if (result == null) {
throw JSON.typeMismatch(index, object, "double");
}
return result;
}
|
Returns the value at {@code index} if it exists and is a double or can be coerced
to a double.
@param index the index to get the value from
@return the {@code value}
@throws JSONException if the value at {@code index} doesn't exist or cannot be
coerced to a double.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 366
|
[
"index"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getVersionsMap
|
private Map<String, @Nullable Object> getVersionsMap(Environment environment, @Nullable String defaultValue) {
String appVersion = getApplicationVersion(environment);
String bootVersion = getBootVersion();
Map<String, @Nullable Object> versions = new HashMap<>();
versions.put("application.version", getVersionString(appVersion, false, defaultValue));
versions.put("spring-boot.version", getVersionString(bootVersion, false, defaultValue));
versions.put("application.formatted-version", getVersionString(appVersion, true, defaultValue));
versions.put("spring-boot.formatted-version", getVersionString(bootVersion, true, defaultValue));
return versions;
}
|
Return the application title that should be used for the source class. By default
will use {@link Package#getImplementationTitle()}.
@param sourceClass the source class
@return the application title
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ResourceBanner.java
| 143
|
[
"environment",
"defaultValue"
] | true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
set_names
|
def set_names(self, names, *, level=None, inplace: bool = False) -> Self | None:
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : Hashable or a sequence of the previous or dict-like for MultiIndex
Name(s) to set.
level : int, Hashable or a sequence of the previous, optional
If the index is a MultiIndex and names is not dict-like, level(s) to set
(None for all levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names("quarter")
Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx = idx.set_names(["kind", "year"])
>>> idx.set_names("species", level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
When renaming levels with a dict, levels can not be passed.
>>> idx.set_names({"kind": "snake"})
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['snake', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string when a single level is provided.")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if is_dict_like(names) and not isinstance(self, ABCMultiIndex):
raise TypeError("Can only pass dict-like as `names` for MultiIndex.")
if is_dict_like(names) and level is not None:
raise TypeError("Can not pass level for dictlike `names`.")
if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None:
# Transform dict to list of new names and corresponding levels
level, names_adjusted = [], []
for i, name in enumerate(self.names):
if name in names.keys():
level.append(i)
names_adjusted.append(names[name])
names = names_adjusted
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._view()
idx._set_names(names, level=level)
if not inplace:
return idx
return None
|
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : Hashable or a sequence of the previous or dict-like for MultiIndex
Name(s) to set.
level : int, Hashable or a sequence of the previous, optional
If the index is a MultiIndex and names is not dict-like, level(s) to set
(None for all levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names("quarter")
Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx = idx.set_names(["kind", "year"])
>>> idx.set_names("species", level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
When renaming levels with a dict, levels can not be passed.
>>> idx.set_names({"kind": "snake"})
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['snake', 'year'])
|
python
|
pandas/core/indexes/base.py
| 1,958
|
[
"self",
"names",
"level",
"inplace"
] |
Self | None
| true
| 24
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
appendSeparator
|
public StrBuilder appendSeparator(final String separator, final int loopIndex) {
if (separator != null && loopIndex > 0) {
append(separator);
}
return this;
}
|
Appends a separator to the builder if the loop index is greater than zero.
Appending a null separator will have no effect.
The separator is appended using {@link #append(String)}.
<p>
This method is useful for adding a separator each time around the
loop except the first.
</p>
<pre>{@code
for (int i = 0; i < list.size(); i++) {
appendSeparator(",", i);
append(list.get(i));
}
}</pre>
Note that for this simple example, you should use
{@link #appendWithSeparators(Iterable, String)}.
@param separator the separator to use, null means no separator
@param loopIndex the loop index
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,324
|
[
"separator",
"loopIndex"
] |
StrBuilder
| true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
offsetsForInitializingPartitions
|
private Map<TopicPartition, OffsetAndMetadata> offsetsForInitializingPartitions(Map<TopicPartition, OffsetAndMetadata> offsets) {
Set<TopicPartition> currentlyInitializingPartitions = subscriptionState.initializingPartitions();
Map<TopicPartition, OffsetAndMetadata> result = new HashMap<>();
offsets.forEach((key, value) -> {
if (currentlyInitializingPartitions.contains(key)) {
result.put(key, value);
}
});
return result;
}
|
Get the offsets, from the given collection, that belong to partitions that still require a position (partitions
that are initializing). This is expected to be used to filter out offsets that were retrieved for partitions
that do not need a position anymore.
@param offsets Offsets per partition
@return Subset of the offsets associated to partitions that are still initializing
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 437
|
[
"offsets"
] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.