function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
greatCircleMaxLatitude
|
public double greatCircleMaxLatitude(LatLng latLng) {
if (isNumericallyIdentical(latLng)) {
return latLng.lat;
}
return latLng.lat > this.lat ? greatCircleMaxLatitude(latLng, this) : greatCircleMaxLatitude(this, latLng);
}
|
Determines the maximum latitude of the great circle defined by this LatLng to the provided LatLng.
@param latLng The LatLng.
@return The maximum latitude of the great circle in radians.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java
| 134
|
[
"latLng"
] | true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
positionToLineOffset
|
function positionToLineOffset(info: ScriptInfoOrConfig, position: number): protocol.Location {
return isConfigFile(info) ? locationFromLineAndCharacter(info.getLineAndCharacterOfPosition(position)) : info.positionToLineOffset(position);
}
|
@param projects Projects initially known to contain {@link initialLocation}
@param defaultProject The default project containing {@link initialLocation}
@param initialLocation Where the search operation was triggered
@param getResultsForPosition This is where you plug in `findReferences`, `renameLocation`, etc
@param forPositionInResult Given an item returned by {@link getResultsForPosition} enumerate the positions referred to by that result
@returns In the common case where there's only one project, returns an array of results from {@link getResultsForPosition}.
If multiple projects were searched - even if they didn't return results - the result will be a map from project to per-project results.
|
typescript
|
src/server/session.ts
| 4,002
|
[
"info",
"position"
] | true
| 2
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
getMetadataStream
|
private InputStream getMetadataStream(FileObject fileObject) {
try {
return fileObject.openInputStream();
}
catch (IOException ex) {
return null;
}
}
|
Read additional {@link ConfigurationMetadata} for the {@link TypeElement} or
{@code null}.
@param typeElement the type to get additional metadata for
@return additional metadata for the given type or {@code null} if none is present
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 198
|
[
"fileObject"
] |
InputStream
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
backingMap
|
Map<K, Collection<V>> backingMap() {
return map;
}
|
Creates the collection of values for an explicitly provided key. By default, it simply calls
{@link #createCollection()}, which is the correct behavior for most implementations. The {@link
LinkedHashMultimap} class overrides it.
@param key key to associate with values in the collection
@return an empty collection of values
|
java
|
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
| 169
|
[] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
SettingsContextController
|
function SettingsContextController({
browserTheme,
children,
componentsPortalContainer,
profilerPortalContainer,
suspensePortalContainer,
}: Props): React.Node {
const bridge = useContext(BridgeContext);
const [displayDensity, setDisplayDensity] =
useLocalStorageWithLog<DisplayDensity>(
'React::DevTools::displayDensity',
'compact',
);
const [theme, setTheme] = useLocalStorageWithLog<Theme>(
LOCAL_STORAGE_BROWSER_THEME,
'auto',
);
const [parseHookNames, setParseHookNames] = useLocalStorageWithLog<boolean>(
LOCAL_STORAGE_PARSE_HOOK_NAMES_KEY,
false,
);
const [traceUpdatesEnabled, setTraceUpdatesEnabled] =
useLocalStorageWithLog<boolean>(
LOCAL_STORAGE_TRACE_UPDATES_ENABLED_KEY,
false,
);
const documentElements = useMemo<DocumentElements>(() => {
const array: Array<HTMLElement> = [
((document.documentElement: any): HTMLElement),
];
if (componentsPortalContainer != null) {
array.push(
((componentsPortalContainer.ownerDocument
.documentElement: any): HTMLElement),
);
}
if (profilerPortalContainer != null) {
array.push(
((profilerPortalContainer.ownerDocument
.documentElement: any): HTMLElement),
);
}
if (suspensePortalContainer != null) {
array.push(
((suspensePortalContainer.ownerDocument
.documentElement: any): HTMLElement),
);
}
return array;
}, [
componentsPortalContainer,
profilerPortalContainer,
suspensePortalContainer,
]);
useLayoutEffect(() => {
switch (displayDensity) {
case 'comfortable':
updateDisplayDensity('comfortable', documentElements);
break;
case 'compact':
updateDisplayDensity('compact', documentElements);
break;
default:
throw Error(`Unsupported displayDensity value "${displayDensity}"`);
}
}, [displayDensity, documentElements]);
useLayoutEffect(() => {
switch (theme) {
case 'light':
updateThemeVariables('light', documentElements);
break;
case 'dark':
updateThemeVariables('dark', documentElements);
break;
case 'auto':
updateThemeVariables(browserTheme, documentElements);
break;
default:
throw Error(`Unsupported theme value "${theme}"`);
}
}, [browserTheme, theme, documentElements]);
useEffect(() => {
bridge.send('setTraceUpdatesEnabled', traceUpdatesEnabled);
}, [bridge, traceUpdatesEnabled]);
const value: Context = useMemo(
() => ({
displayDensity,
lineHeight:
displayDensity === 'compact'
? COMPACT_LINE_HEIGHT
: COMFORTABLE_LINE_HEIGHT,
parseHookNames,
setDisplayDensity,
setParseHookNames,
setTheme,
setTraceUpdatesEnabled,
theme,
browserTheme,
traceUpdatesEnabled,
}),
[
displayDensity,
parseHookNames,
setDisplayDensity,
setParseHookNames,
setTheme,
setTraceUpdatesEnabled,
theme,
browserTheme,
traceUpdatesEnabled,
],
);
return (
<SettingsContext.Provider value={value}>
{children}
</SettingsContext.Provider>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/Settings/SettingsContext.js
| 89
|
[] | false
| 2
| 6
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
chi2
|
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the `n_features` features with the
highest values for the test chi-squared statistic from X, which must
contain only **non-negative integer feature values** such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
If some of your features are continuous, you need to bin them, for
example by using :class:`~sklearn.preprocessing.KBinsDiscretizer`.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample vectors.
y : array-like of shape (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : ndarray of shape (n_features,)
Chi2 statistics for each feature.
p_values : ndarray of shape (n_features,)
P-values for each feature.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
f_regression : F-value between label/feature for regression tasks.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_selection import chi2
>>> X = np.array([[1, 1, 3],
... [0, 1, 5],
... [5, 4, 1],
... [6, 6, 2],
... [1, 4, 0],
... [0, 0, 0]])
>>> y = np.array([1, 1, 0, 0, 2, 2])
>>> chi2_stats, p_values = chi2(X, y)
>>> chi2_stats
array([15.3, 6.5 , 8.9])
>>> p_values
array([0.000456, 0.0387, 0.0116 ])
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
# Converting X to float allows getting better performance for the
# safe_sparse_dot call made below.
X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32))
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
# Use a sparse representation for Y by default to reduce memory usage when
# y has many unique classes.
Y = LabelBinarizer(sparse_output=True).fit_transform(y)
if Y.shape[1] == 1:
Y = Y.toarray()
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
if issparse(observed):
# convert back to a dense array before calling _chisquare
# XXX: could _chisquare be reimplement to accept sparse matrices for
# cases where both n_classes and n_features are large (and X is
# sparse)?
observed = observed.toarray()
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
|
Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the `n_features` features with the
highest values for the test chi-squared statistic from X, which must
contain only **non-negative integer feature values** such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
If some of your features are continuous, you need to bin them, for
example by using :class:`~sklearn.preprocessing.KBinsDiscretizer`.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample vectors.
y : array-like of shape (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : ndarray of shape (n_features,)
Chi2 statistics for each feature.
p_values : ndarray of shape (n_features,)
P-values for each feature.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
f_regression : F-value between label/feature for regression tasks.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_selection import chi2
>>> X = np.array([[1, 1, 3],
... [0, 1, 5],
... [5, 4, 1],
... [6, 6, 2],
... [1, 4, 0],
... [0, 0, 0]])
>>> y = np.array([1, 1, 0, 0, 2, 2])
>>> chi2_stats, p_values = chi2(X, y)
>>> chi2_stats
array([15.3, 6.5 , 8.9])
>>> p_values
array([0.000456, 0.0387, 0.0116 ])
|
python
|
sklearn/feature_selection/_univariate_selection.py
| 200
|
[
"X",
"y"
] | false
| 5
| 7.6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
partitionsForTopic
|
public List<PartitionInfo> partitionsForTopic(String topic) {
return partitionsByTopic.getOrDefault(topic, Collections.emptyList());
}
|
Get the list of partitions for this topic
@param topic The topic name
@return A list of partitions
|
java
|
clients/src/main/java/org/apache/kafka/common/Cluster.java
| 294
|
[
"topic"
] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
replaceFrom
|
public String replaceFrom(CharSequence sequence, char replacement) {
String string = sequence.toString();
int pos = indexIn(string);
if (pos == -1) {
return string;
}
char[] chars = string.toCharArray();
chars[pos] = replacement;
for (int i = pos + 1; i < chars.length; i++) {
if (matches(chars[i])) {
chars[i] = replacement;
}
}
return new String(chars);
}
|
Returns a string copy of the input character sequence, with each matching BMP character
replaced by a given replacement character. For example:
{@snippet :
CharMatcher.is('a').replaceFrom("radar", 'o')
}
... returns {@code "rodor"}.
<p>The default implementation uses {@link #indexIn(CharSequence)} to find the first matching
character, then iterates the remainder of the sequence calling {@link #matches(char)} for each
character.
@param sequence the character sequence to replace matching characters in
@param replacement the character to append to the result string in place of each matching
character in {@code sequence}
@return the new string
|
java
|
android/guava/src/com/google/common/base/CharMatcher.java
| 680
|
[
"sequence",
"replacement"
] |
String
| true
| 4
| 7.6
|
google/guava
| 51,352
|
javadoc
| false
|
getAvailableParameter
|
private @Nullable Function<Class<?>, Object> getAvailableParameter(Class<?> parameterType) {
for (Map.Entry<Class<?>, Function<Class<?>, Object>> entry : this.availableParameters.entrySet()) {
if (entry.getKey().isAssignableFrom(parameterType)) {
return entry.getValue();
}
}
return null;
}
|
Get an injectable argument instance for the given type. This method can be used
when manually instantiating an object without reflection.
@param <A> the argument type
@param type the argument type
@return the argument to inject or {@code null}
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/Instantiator.java
| 233
|
[
"parameterType"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
fuzz_tensor_simple
|
def fuzz_tensor_simple(
size: tuple[int, ...] | None = None,
stride: tuple[int, ...] | None = None,
dtype: torch.dtype | None = None,
seed: int | None = None,
) -> torch.Tensor:
"""
Convenience function that returns just the tensor without the seed.
Args:
size: Tensor shape. If None, will be randomly generated.
stride: Tensor stride. If None, will be randomly generated based on size.
dtype: Tensor data type. If None, will be randomly generated.
seed: Random seed for reproducibility. If None, uses current random state.
Returns:
torch.Tensor: A tensor with the specified or randomly generated properties.
"""
tensor, _ = fuzz_tensor(size, stride, dtype, seed)
return tensor
|
Convenience function that returns just the tensor without the seed.
Args:
size: Tensor shape. If None, will be randomly generated.
stride: Tensor stride. If None, will be randomly generated based on size.
dtype: Tensor data type. If None, will be randomly generated.
seed: Random seed for reproducibility. If None, uses current random state.
Returns:
torch.Tensor: A tensor with the specified or randomly generated properties.
|
python
|
tools/experimental/torchfuzz/tensor_fuzzer.py
| 425
|
[
"size",
"stride",
"dtype",
"seed"
] |
torch.Tensor
| true
| 1
| 6.72
|
pytorch/pytorch
| 96,034
|
google
| false
|
divide
|
public static long divide(long dividend, long divisor) {
if (divisor < 0) { // i.e., divisor >= 2^63:
if (compare(dividend, divisor) < 0) {
return 0; // dividend < divisor
} else {
return 1; // dividend >= divisor
}
}
// Optimization - use signed division if dividend < 2^63
if (dividend >= 0) {
return dividend / divisor;
}
/*
* Otherwise, approximate the quotient, check, and correct if necessary. Our approximation is
* guaranteed to be either exact or one less than the correct value. This follows from fact that
* floor(floor(x)/i) == floor(x/i) for any real x and integer i != 0. The proof is not quite
* trivial.
*/
long quotient = ((dividend >>> 1) / divisor) << 1;
long rem = dividend - quotient * divisor;
return quotient + (compare(rem, divisor) >= 0 ? 1 : 0);
}
|
Returns dividend / divisor, where the dividend and divisor are treated as unsigned 64-bit
quantities.
<p><b>Java 8+ users:</b> use {@link Long#divideUnsigned(long, long)} instead.
@param dividend the dividend (numerator)
@param divisor the divisor (denominator)
@throws ArithmeticException if divisor is 0
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLongs.java
| 250
|
[
"dividend",
"divisor"
] | true
| 5
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
fileStoreChecks
|
private static Stream<InstrumentationService.InstrumentationInfo> fileStoreChecks() {
var fileStoreClasses = StreamSupport.stream(FileSystems.getDefault().getFileStores().spliterator(), false)
.map(FileStore::getClass)
.distinct();
return fileStoreClasses.flatMap(fileStoreClass -> {
var instrumentation = new InstrumentationInfoFactory() {
@Override
public InstrumentationService.InstrumentationInfo of(String methodName, Class<?>... parameterTypes)
throws ClassNotFoundException, NoSuchMethodException {
return INSTRUMENTATION_SERVICE.lookupImplementationMethod(
FileStore.class,
methodName,
fileStoreClass,
EntitlementChecker.class,
"check" + Character.toUpperCase(methodName.charAt(0)) + methodName.substring(1),
parameterTypes
);
}
};
try {
return Stream.of(
instrumentation.of("getFileStoreAttributeView", Class.class),
instrumentation.of("getAttribute", String.class),
instrumentation.of("getBlockSize"),
instrumentation.of("getTotalSpace"),
instrumentation.of("getUnallocatedSpace"),
instrumentation.of("getUsableSpace"),
instrumentation.of("isReadOnly"),
instrumentation.of("name"),
instrumentation.of("type")
);
} catch (NoSuchMethodException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
});
}
|
Initializes the dynamic (agent-based) instrumentation:
<ol>
<li>
Finds the version-specific subclass of {@link EntitlementChecker} to use
</li>
<li>
Builds the set of methods to instrument using {@link InstrumentationService#lookupMethods}
</li>
<li>
Augment this set “dynamically” using {@link InstrumentationService#lookupImplementationMethod}
</li>
<li>
Creates an {@link Instrumenter} via {@link InstrumentationService#newInstrumenter}, and adds a new {@link Transformer} (derived from
{@link java.lang.instrument.ClassFileTransformer}) that uses it. Transformers are invoked when a class is about to load, after its
bytes have been deserialized to memory but before the class is initialized.
</li>
<li>
Re-transforms all already loaded classes: we force the {@link Instrumenter} to run on classes that might have been already loaded
before entitlement initialization by calling the {@link java.lang.instrument.Instrumentation#retransformClasses} method on all
classes that were already loaded.
</li>
</ol>
<p>
The third step is needed as the JDK exposes some API through interfaces that have different (internal) implementations
depending on the JVM host platform. As we cannot instrument an interfaces, we find its concrete implementation.
A prime example is {@link FileSystemProvider}, which has different implementations (e.g. {@code UnixFileSystemProvider} or
{@code WindowsFileSystemProvider}). At runtime, we find the implementation class which is currently used by the JVM, and add
its methods to the set of methods to instrument. See e.g. {@link DynamicInstrumentation#fileSystemProviderChecks}.
</p>
@param inst the JVM instrumentation class instance
@param checkerInterface the interface to use to find methods to instrument and to use in the injected instrumentation code
@param verifyBytecode whether we should perform bytecode verification before and after instrumenting each method
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/DynamicInstrumentation.java
| 185
|
[] | true
| 2
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
resolve
|
static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException {
InetAddress[] addresses = hostResolver.resolve(host);
List<InetAddress> result = filterPreferredAddresses(addresses);
if (log.isDebugEnabled())
log.debug("Resolved host {} as {}", host, result.stream().map(InetAddress::getHostAddress).collect(Collectors.joining(",")));
return result;
}
|
Create a new channel builder from the provided configuration.
@param config client configs
@param time the time implementation
@param logContext the logging context
@return configured ChannelBuilder based on the configs.
|
java
|
clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
| 124
|
[
"host",
"hostResolver"
] | true
| 2
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
assert_allclose
|
def assert_allclose(
actual, desired, rtol=None, atol=0.0, equal_nan=True, err_msg="", verbose=True
):
"""dtype-aware variant of numpy.testing.assert_allclose
This variant introspects the least precise floating point dtype
in the input argument and automatically sets the relative tolerance
parameter to 1e-4 float32 and use 1e-7 otherwise (typically float64
in scikit-learn).
`atol` is always left to 0. by default. It should be adjusted manually
to an assertion-specific value in case there are null values expected
in `desired`.
The aggregate tolerance is `atol + rtol * abs(desired)`.
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional, default=None
Relative tolerance.
If None, it is set based on the provided arrays' dtypes.
atol : float, optional, default=0.
Absolute tolerance.
equal_nan : bool, optional, default=True
If True, NaNs will compare equal.
err_msg : str, optional, default=''
The error message to be printed in case of failure.
verbose : bool, optional, default=True
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
numpy.testing.assert_allclose
Examples
--------
>>> import numpy as np
>>> from sklearn.utils._testing import assert_allclose
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> assert_allclose(x, y, rtol=1e-5, atol=0)
>>> a = np.full(shape=10, fill_value=1e-5, dtype=np.float32)
>>> assert_allclose(a, 1e-5)
"""
dtypes = []
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
dtypes = [actual.dtype, desired.dtype]
if rtol is None:
rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
rtol = max(rtols)
np_assert_allclose(
actual,
desired,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
err_msg=err_msg,
verbose=verbose,
)
|
dtype-aware variant of numpy.testing.assert_allclose
This variant introspects the least precise floating point dtype
in the input argument and automatically sets the relative tolerance
parameter to 1e-4 float32 and use 1e-7 otherwise (typically float64
in scikit-learn).
`atol` is always left to 0. by default. It should be adjusted manually
to an assertion-specific value in case there are null values expected
in `desired`.
The aggregate tolerance is `atol + rtol * abs(desired)`.
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional, default=None
Relative tolerance.
If None, it is set based on the provided arrays' dtypes.
atol : float, optional, default=0.
Absolute tolerance.
equal_nan : bool, optional, default=True
If True, NaNs will compare equal.
err_msg : str, optional, default=''
The error message to be printed in case of failure.
verbose : bool, optional, default=True
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
numpy.testing.assert_allclose
Examples
--------
>>> import numpy as np
>>> from sklearn.utils._testing import assert_allclose
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> assert_allclose(x, y, rtol=1e-5, atol=0)
>>> a = np.full(shape=10, fill_value=1e-5, dtype=np.float32)
>>> assert_allclose(a, 1e-5)
|
python
|
sklearn/utils/_testing.py
| 171
|
[
"actual",
"desired",
"rtol",
"atol",
"equal_nan",
"err_msg",
"verbose"
] | false
| 3
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
convertSingleIdentifierImport
|
function convertSingleIdentifierImport(name: Identifier, moduleSpecifier: StringLiteralLike, checker: TypeChecker, identifiers: Identifiers, quotePreference: QuotePreference): ConvertedImports {
const nameSymbol = checker.getSymbolAtLocation(name);
// Maps from module property name to name actually used. (The same if there isn't shadowing.)
const namedBindingsNames = new Map<string, string>();
// True if there is some non-property use like `x()` or `f(x)`.
let needDefaultImport = false;
let useSitesToUnqualify: Map<Node, Node> | undefined;
for (const use of identifiers.original.get(name.text)!) {
if (checker.getSymbolAtLocation(use) !== nameSymbol || use === name) {
// This was a use of a different symbol with the same name, due to shadowing. Ignore.
continue;
}
const { parent } = use;
if (isPropertyAccessExpression(parent)) {
const { name: { text: propertyName } } = parent;
if (propertyName === "default") {
needDefaultImport = true;
const importDefaultName = use.getText();
(useSitesToUnqualify ??= new Map()).set(parent, factory.createIdentifier(importDefaultName));
}
else {
Debug.assert(parent.expression === use, "Didn't expect expression === use"); // Else shouldn't have been in `collectIdentifiers`
let idName = namedBindingsNames.get(propertyName);
if (idName === undefined) {
idName = makeUniqueName(propertyName, identifiers);
namedBindingsNames.set(propertyName, idName);
}
(useSitesToUnqualify ??= new Map()).set(parent, factory.createIdentifier(idName));
}
}
else {
needDefaultImport = true;
}
}
const namedBindings = namedBindingsNames.size === 0 ? undefined : arrayFrom(mapIterator(namedBindingsNames.entries(), ([propertyName, idName]) => factory.createImportSpecifier(/*isTypeOnly*/ false, propertyName === idName ? undefined : factory.createIdentifier(propertyName), factory.createIdentifier(idName))));
if (!namedBindings) {
// If it was unused, ensure that we at least import *something*.
needDefaultImport = true;
}
return convertedImports(
[makeImport(needDefaultImport ? getSynthesizedDeepClone(name) : undefined, namedBindings, moduleSpecifier, quotePreference)],
useSitesToUnqualify,
);
}
|
Convert `import x = require("x").`
Also:
- Convert `x.default()` to `x()` to handle ES6 default export
- Converts uses like `x.y()` to `y()` and uses a named import.
|
typescript
|
src/services/codefixes/convertToEsModule.ts
| 528
|
[
"name",
"moduleSpecifier",
"checker",
"identifiers",
"quotePreference"
] | true
| 12
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
ensure_finished_tis
|
def ensure_finished_tis(self, dag_run: DagRun, session: Session) -> list[TaskInstance]:
"""
Ensure finished_tis is populated if it's currently None, which allows running tasks without dag_run.
:param dag_run: The DagRun for which to find finished tasks
:return: A list of all the finished tasks of this DAG and logical_date
"""
if self.finished_tis is None:
finished_tis = dag_run.get_task_instances(state=State.finished, session=session)
for ti in finished_tis:
if getattr(ti, "task", None) is not None or (dag := dag_run.dag) is None:
continue
with contextlib.suppress(TaskNotFound):
ti.task = dag.get_task(ti.task_id)
self.finished_tis = finished_tis
else:
finished_tis = self.finished_tis
return finished_tis
|
Ensure finished_tis is populated if it's currently None, which allows running tasks without dag_run.
:param dag_run: The DagRun for which to find finished tasks
:return: A list of all the finished tasks of this DAG and logical_date
|
python
|
airflow-core/src/airflow/ti_deps/dep_context.py
| 88
|
[
"self",
"dag_run",
"session"
] |
list[TaskInstance]
| true
| 6
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
configure
|
public <T extends SimpleAsyncTaskExecutor> T configure(T taskExecutor) {
PropertyMapper map = PropertyMapper.get();
map.from(this.virtualThreads).to(taskExecutor::setVirtualThreads);
map.from(this.threadNamePrefix).whenHasText().to(taskExecutor::setThreadNamePrefix);
map.from(this.cancelRemainingTasksOnClose).to(taskExecutor::setCancelRemainingTasksOnClose);
map.from(this.rejectTasksWhenLimitReached).to(taskExecutor::setRejectTasksWhenLimitReached);
map.from(this.concurrencyLimit).to(taskExecutor::setConcurrencyLimit);
map.from(this.taskDecorator).to(taskExecutor::setTaskDecorator);
map.from(this.taskTerminationTimeout).as(Duration::toMillis).to(taskExecutor::setTaskTerminationTimeout);
if (!CollectionUtils.isEmpty(this.customizers)) {
this.customizers.forEach((customizer) -> customizer.customize(taskExecutor));
}
return taskExecutor;
}
|
Configure the provided {@link SimpleAsyncTaskExecutor} instance using this builder.
@param <T> the type of task executor
@param taskExecutor the {@link SimpleAsyncTaskExecutor} to configure
@return the task executor instance
@see #build()
@see #build(Class)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskExecutorBuilder.java
| 266
|
[
"taskExecutor"
] |
T
| true
| 2
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
computeReplacement
|
public static @Nullable String computeReplacement(CharEscaper escaper, char c) {
return stringOrNull(escaper.escape(c));
}
|
Returns a string that would replace the given character in the specified escaper, or {@code
null} if no replacement should be made. This method is intended for use in tests through the
{@code EscaperAsserts} class; production users of {@link CharEscaper} should limit themselves
to its public interface.
@param c the character to escape if necessary
@return the replacement string, or {@code null} if no escaping was needed
|
java
|
android/guava/src/com/google/common/escape/Escapers.java
| 171
|
[
"escaper",
"c"
] |
String
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
where
|
def where(self, cond, other=None) -> Index:
"""
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
pandas.Index
A copy of self with values replaced from other
where the condition is False.
See Also
--------
Series.where : Same method for Series.
DataFrame.where : Same method for DataFrame.
Examples
--------
>>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.where(idx.isin(["car", "train"]), "other")
Index(['car', 'other', 'train', 'other'], dtype='object')
"""
if isinstance(self, ABCMultiIndex):
raise NotImplementedError(
".where is not supported for MultiIndex operations"
)
cond = np.asarray(cond, dtype=bool)
return self.putmask(~cond, other)
|
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
pandas.Index
A copy of self with values replaced from other
where the condition is False.
See Also
--------
Series.where : Same method for Series.
DataFrame.where : Same method for DataFrame.
Examples
--------
>>> idx = pd.Index(["car", "bike", "train", "tractor"])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.where(idx.isin(["car", "train"]), "other")
Index(['car', 'other', 'train', 'other'], dtype='object')
|
python
|
pandas/core/indexes/base.py
| 5,144
|
[
"self",
"cond",
"other"
] |
Index
| true
| 2
| 8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isLaziable
|
function isLaziable(func) {
var funcName = getFuncName(func),
other = lodash[funcName];
if (typeof other != 'function' || !(funcName in LazyWrapper.prototype)) {
return false;
}
if (func === other) {
return true;
}
var data = getData(other);
return !!data && func === data[0];
}
|
Checks if `func` has a lazy counterpart.
@private
@param {Function} func The function to check.
@returns {boolean} Returns `true` if `func` has a lazy counterpart,
else `false`.
|
javascript
|
lodash.js
| 6,440
|
[
"func"
] | false
| 5
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
load
|
static @Nullable PemSslStore load(@Nullable PemSslStoreDetails details, ResourceLoader resourceLoader) {
if (details == null || details.isEmpty()) {
return null;
}
return new LoadedPemSslStore(details, resourceLoader);
}
|
Return a {@link PemSslStore} instance loaded using the given
{@link PemSslStoreDetails}.
@param details the PEM store details
@param resourceLoader the resource loader used to load content
@return a loaded {@link PemSslStore} or {@code null}.
@since 3.3.5
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
| 115
|
[
"details",
"resourceLoader"
] |
PemSslStore
| true
| 3
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
readBlock
|
private void readBlock() throws IOException {
if (in.remaining() < 4) {
throw new IOException(PREMATURE_EOS);
}
int blockSize = in.getInt();
boolean compressed = (blockSize & LZ4_FRAME_INCOMPRESSIBLE_MASK) == 0;
blockSize &= ~LZ4_FRAME_INCOMPRESSIBLE_MASK;
// Check for EndMark
if (blockSize == 0) {
finished = true;
if (flg.isContentChecksumSet())
in.getInt(); // TODO: verify this content checksum
return;
} else if (blockSize > maxBlockSize) {
throw new IOException(String.format("Block size %d exceeded max: %d", blockSize, maxBlockSize));
}
if (in.remaining() < blockSize) {
throw new IOException(PREMATURE_EOS);
}
if (compressed) {
try {
final int bufferSize = DECOMPRESSOR.decompress(in, in.position(), blockSize, decompressionBuffer, 0,
maxBlockSize);
decompressionBuffer.position(0);
decompressionBuffer.limit(bufferSize);
decompressedBuffer = decompressionBuffer;
} catch (LZ4Exception e) {
throw new IOException(e);
}
} else {
decompressedBuffer = in.slice();
decompressedBuffer.limit(blockSize);
}
// verify checksum
if (flg.isBlockChecksumSet()) {
int hash = CHECKSUM.hash(in, in.position(), blockSize, 0);
in.position(in.position() + blockSize);
if (hash != in.getInt()) {
throw new IOException(BLOCK_HASH_MISMATCH);
}
} else {
in.position(in.position() + blockSize);
}
}
|
Decompresses (if necessary) buffered data, optionally computes and validates a XXHash32 checksum, and writes the
result to a buffer.
@throws IOException
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java
| 160
|
[] |
void
| true
| 10
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
replace
|
public static <V> String replace(final Object source, final Map<String, V> valueMap) {
return new StrSubstitutor(valueMap).replace(source);
}
|
Replaces all the occurrences of variables in the given source object with
their matching values from the map.
@param <V> the type of the values in the map.
@param source the source text containing the variables to substitute, null returns null.
@param valueMap the map with the values, may be null.
@return the result of the replace operation.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 178
|
[
"source",
"valueMap"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toIntValue
|
public static int toIntValue(final Character ch, final int defaultValue) {
return ch != null ? toIntValue(ch.charValue(), defaultValue) : defaultValue;
}
|
Converts the character to the Integer it represents, throwing an
exception if the character is not numeric.
<p>This method converts the char '1' to the int 1 and so on.</p>
<pre>
CharUtils.toIntValue(null, -1) = -1
CharUtils.toIntValue('3', -1) = 3
CharUtils.toIntValue('A', -1) = -1
</pre>
@param ch the character to convert
@param defaultValue the default value to use if the character is not numeric
@return the int value of the character
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 451
|
[
"ch",
"defaultValue"
] | true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readConfig
|
async function readConfig(): Promise<NpsConfig | undefined> {
const data = await fs.promises
.readFile(getConfigPath(), 'utf-8')
.catch((err) => (err.code === 'ENOENT' ? Promise.resolve(undefined) : Promise.reject(err)))
if (data === undefined) {
return undefined
}
const obj = JSON.parse(data)
if (
obj.acknowledgedTimeframe &&
typeof obj.acknowledgedTimeframe.start === 'string' &&
typeof obj.acknowledgedTimeframe.end === 'string'
) {
return obj
} else {
throw new Error('Invalid NPS config schema')
}
}
|
Creates a proxy that aborts the readline interface when the underlying stream closes.
|
typescript
|
packages/cli/src/utils/nps/survey.ts
| 164
|
[] | true
| 7
| 6.88
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
transform
|
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group.
Return a Series with the transformed values.
Parameters
----------
arg : function
To apply to each group. Should return a Series with the same index.
*args, **kwargs
Additional arguments and keywords.
Returns
-------
Series
A Series with the transformed values, maintaining the same index as
the original object.
See Also
--------
core.resample.Resampler.apply : Apply a function along each group.
core.resample.Resampler.aggregate : Aggregate using one or more operations
over the specified axis.
Examples
--------
>>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h"))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: h, dtype: int64
>>> resampled = s.resample("15min")
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
2018-01-01 00:00:00 NaN
2018-01-01 01:00:00 NaN
Freq: h, dtype: float64
"""
return self._selected_obj.groupby(self._timegrouper).transform(
arg, *args, **kwargs
)
|
Call function producing a like-indexed Series on each group.
Return a Series with the transformed values.
Parameters
----------
arg : function
To apply to each group. Should return a Series with the same index.
*args, **kwargs
Additional arguments and keywords.
Returns
-------
Series
A Series with the transformed values, maintaining the same index as
the original object.
See Also
--------
core.resample.Resampler.apply : Apply a function along each group.
core.resample.Resampler.aggregate : Aggregate using one or more operations
over the specified axis.
Examples
--------
>>> s = pd.Series([1, 2], index=pd.date_range("20180101", periods=2, freq="1h"))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
Freq: h, dtype: int64
>>> resampled = s.resample("15min")
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
2018-01-01 00:00:00 NaN
2018-01-01 01:00:00 NaN
Freq: h, dtype: float64
|
python
|
pandas/core/resample.py
| 453
|
[
"self",
"arg"
] | false
| 1
| 6
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
registerContainedBean
|
public void registerContainedBean(String containedBeanName, String containingBeanName) {
synchronized (this.containedBeanMap) {
Set<String> containedBeans =
this.containedBeanMap.computeIfAbsent(containingBeanName, key -> new LinkedHashSet<>(8));
if (!containedBeans.add(containedBeanName)) {
return;
}
}
registerDependentBean(containedBeanName, containingBeanName);
}
|
Register a containment relationship between two beans,
for example, between an inner bean and its containing outer bean.
<p>Also registers the containing bean as dependent on the contained bean
in terms of destruction order.
@param containedBeanName the name of the contained (inner) bean
@param containingBeanName the name of the containing (outer) bean
@see #registerDependentBean
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 582
|
[
"containedBeanName",
"containingBeanName"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
escapeEcmaScript
|
public static final String escapeEcmaScript(final String input) {
return ESCAPE_ECMASCRIPT.translate(input);
}
|
Escapes the characters in a {@link String} using EcmaScript String rules.
<p>Escapes any values it finds into their EcmaScript String form.
Deals correctly with quotes and control-chars (tab, backslash, cr, ff, etc.) </p>
<p>So a tab becomes the characters {@code '\\'} and
{@code 't'}.</p>
<p>The only difference between Java strings and EcmaScript strings
is that in EcmaScript, a single quote and forward-slash (/) are escaped.</p>
<p>Note that EcmaScript is best known by the JavaScript and ActionScript dialects.</p>
<p>Example:</p>
<pre>
input string: He didn't say, "Stop!"
output string: He didn\'t say, \"Stop!\"
</pre>
@param input String to escape values in, may be null
@return String with escaped values, {@code null} if null string input
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 458
|
[
"input"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
info
|
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
A String containing the python pandas class name, filepath to the HDF5
file and all the object keys along with their respective dataframe shapes.
See Also
--------
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["C", "D"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data1", df1) # doctest: +SKIP
>>> store.put("data2", df2) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data1 frame (shape->[2,2])
/data2 frame (shape->[2,2])
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if lkeys:
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
|
Print detailed information on the store.
Returns
-------
str
A String containing the python pandas class name, filepath to the HDF5
file and all the object keys along with their respective dataframe shapes.
See Also
--------
HDFStore.get_storer : Returns the storer object for a key.
Examples
--------
>>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=["C", "D"])
>>> store = pd.HDFStore("store.h5", "w") # doctest: +SKIP
>>> store.put("data1", df1) # doctest: +SKIP
>>> store.put("data2", df2) # doctest: +SKIP
>>> print(store.info()) # doctest: +SKIP
>>> store.close() # doctest: +SKIP
<class 'pandas.io.pytables.HDFStore'>
File path: store.h5
/data1 frame (shape->[2,2])
/data2 frame (shape->[2,2])
|
python
|
pandas/io/pytables.py
| 1,747
|
[
"self"
] |
str
| true
| 9
| 7.04
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
toString
|
@Override
public String toString() {
// consistent with the original implementation
return " at character " + this.pos + " of " + this.in;
}
|
Returns the current position and the entire input string.
@return the current position and the entire input string.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
| 463
|
[] |
String
| true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
internalMatch
|
function internalMatch(string, regexp, message, fn) {
if (!isRegExp(regexp)) {
throw new ERR_INVALID_ARG_TYPE(
'regexp', 'RegExp', regexp,
);
}
const match = fn === Assert.prototype.match;
if (typeof string !== 'string' ||
RegExpPrototypeExec(regexp, string) !== null !== match) {
const generatedMessage = message.length === 0;
// 'The input was expected to not match the regular expression ' +
message[0] ||= (typeof string !== 'string' ?
'The "string" argument must be of type string. Received type ' +
`${typeof string} (${inspect(string)})` :
(match ?
'The input did not match the regular expression ' :
'The input was expected to not match the regular expression ') +
`${inspect(regexp)}. Input:\n\n${inspect(string)}\n`);
innerFail({
actual: string,
expected: regexp,
message,
operator: fn.name,
stackStartFn: fn,
diff: this?.[kOptions]?.diff,
generatedMessage: generatedMessage,
});
}
}
|
Throws `AssertionError` if the value is not `null` or `undefined`.
@param {any} err
@returns {void}
|
javascript
|
lib/assert.js
| 822
|
[
"string",
"regexp",
"message",
"fn"
] | false
| 6
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return this.element + (this.targetClass != null ? " on " + this.targetClass : "");
}
|
Create a new instance with the specified {@link AnnotatedElement} and
optional target {@link Class}.
|
java
|
spring-context/src/main/java/org/springframework/context/expression/AnnotatedElementKey.java
| 65
|
[] |
String
| true
| 2
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
createMetadataResource
|
private FileObject createMetadataResource(String location) throws IOException {
return this.environment.getFiler().createResource(StandardLocation.CLASS_OUTPUT, "", location);
}
|
Read additional {@link ConfigurationMetadata} for the {@link TypeElement} or
{@code null}.
@param typeElement the type to get additional metadata for
@return additional metadata for the given type or {@code null} if none is present
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 177
|
[
"location"
] |
FileObject
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
to_string
|
def to_string(
self,
buf: FilePath | WriteBuffer[str] | None = None,
*,
columns: Axes | None = None,
col_space: int | list[int] | dict[Hashable, int] | None = None,
header: bool | SequenceNotStr[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: fmt.FormattersType | None = None,
float_format: fmt.FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: int | None = None,
min_rows: int | None = None,
max_colwidth: int | None = None,
encoding: str | None = None,
) -> str | None:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
encoding : str, default "utf-8"
Set character encoding.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {"col1": [1, 2, 3], "col2": [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
|
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
encoding : str, default "utf-8"
Set character encoding.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {"col1": [1, 2, 3], "col2": [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
|
python
|
pandas/core/frame.py
| 1,316
|
[
"self",
"buf",
"columns",
"col_space",
"header",
"index",
"na_rep",
"formatters",
"float_format",
"sparsify",
"index_names",
"justify",
"max_rows",
"max_cols",
"show_dimensions",
"decimal",
"line_width",
"min_rows",
"max_colwidth",
"encoding"
] |
str | None
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
create
|
public static URL create(File file) {
return create(file, (String) null);
}
|
Create a new jar URL.
@param file the jar file
@return a jar file URL
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrl.java
| 40
|
[
"file"
] |
URL
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_hc_cut
|
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
"""
if n_clusters > n_leaves:
raise ValueError(
"Cannot extract more clusters than samples: "
f"{n_clusters} clusters were given for a tree with {n_leaves} leaves."
)
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for _ in range(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
|
Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
|
python
|
sklearn/cluster/_agglomerative.py
| 731
|
[
"n_clusters",
"children",
"n_leaves"
] | false
| 4
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
fit
|
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : ndarray of shape (n_samples,) or (n_samples, n_classes)
Training data.
This should be the output of `decision_function` or `predict_proba`.
If the input appears to be probabilities (i.e., values between 0 and 1
that sum to 1 across classes), it will be converted to logits using
`np.log(p + eps)`.
Binary decision function outputs (1D) will be converted to two-class
logits of the form (-x, x). For shapes of the form (n_samples, 1), the
same process applies.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
xp, _, xp_device = get_namespace_and_device(X, y)
X, y = indexable(X, y)
check_consistent_length(X, y)
logits = _convert_to_logits(X) # guarantees xp.float64 or xp.float32
dtype_ = logits.dtype
labels = column_or_1d(y, dtype=dtype_)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, labels, dtype=dtype_)
if _is_numpy_namespace(xp):
multinomial_loss = HalfMultinomialLoss(n_classes=logits.shape[1])
else:
multinomial_loss = partial(_half_multinomial_loss, xp=xp)
def log_loss(log_beta=0.0):
"""Compute the log loss as a parameter of the inverse temperature
(beta).
Parameters
----------
log_beta : float
The current logarithm of the inverse temperature value during
optimisation.
Returns
-------
negative_log_likelihood_loss : float
The negative log likelihood loss.
"""
# TODO: numpy 2.0
# Ensure raw_prediction has the same dtype as labels using .astype().
# Without this, dtype promotion rules differ across NumPy versions:
#
# beta = np.float64(0)
# logits = np.array([1, 2], dtype=np.float32)
#
# result = beta * logits
# - NumPy < 2: result.dtype is float32
# - NumPy 2+: result.dtype is float64
#
# This can cause dtype mismatch errors downstream (e.g., buffer dtype).
log_beta = xp.asarray(log_beta, dtype=dtype_, device=xp_device)
raw_prediction = xp.exp(log_beta) * logits
return multinomial_loss(labels, raw_prediction, sample_weight)
xatol = 64 * xp.finfo(dtype_).eps
log_beta_minimizer = minimize_scalar(
log_loss,
bounds=(-10.0, 10.0),
options={
"xatol": xatol,
},
)
if not log_beta_minimizer.success: # pragma: no cover
raise RuntimeError(
"Temperature scaling fails to optimize during calibration. "
"Reason from `scipy.optimize.minimize_scalar`: "
f"{log_beta_minimizer.message}"
)
self.beta_ = xp.exp(
xp.asarray(log_beta_minimizer.x, dtype=dtype_, device=xp_device)
)
return self
|
Fit the model using X, y as training data.
Parameters
----------
X : ndarray of shape (n_samples,) or (n_samples, n_classes)
Training data.
This should be the output of `decision_function` or `predict_proba`.
If the input appears to be probabilities (i.e., values between 0 and 1
that sum to 1 across classes), it will be converted to logits using
`np.log(p + eps)`.
Binary decision function outputs (1D) will be converted to two-class
logits of the form (-x, x). For shapes of the form (n_samples, 1), the
same process applies.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
|
python
|
sklearn/calibration.py
| 1,076
|
[
"self",
"X",
"y",
"sample_weight"
] | false
| 5
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_do_scheduling
|
def _do_scheduling(self, session: Session) -> int:
"""
Make the main scheduling decisions.
It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
We don't select all dagruns at once, because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_enqueue_task_instances for more.
:return: Number of TIs enqueued in this iteration
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
self._start_queued_dagruns(session)
guard.commit()
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
dag_runs = DagRun.get_running_dag_runs_to_examine(session=session)
callback_tuples = self._schedule_all_dag_runs(guard, dag_runs, session)
# Send the callbacks after we commit to ensure the context is up to date when it gets run
# cache saves time during scheduling of many dag_runs for same dag
cached_get_dag: Callable[[DagRun], SerializedDAG | None] = lru_cache()(
partial(self.scheduler_dag_bag.get_dag_for_run, session=session)
)
for dag_run, callback_to_run in callback_tuples:
dag = cached_get_dag(dag_run)
if dag:
# Sending callbacks to the database, so it must be done outside of prohibit_commit.
self._send_dag_callbacks_to_processor(dag, callback_to_run)
else:
self.log.error("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
with prohibit_commit(session) as guard:
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
# Attempt to schedule even if some executors are full but not all.
total_free_executor_slots = sum([executor.slots_available for executor in self.job.executors])
if total_free_executor_slots <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("All executors are full, skipping critical section")
num_queued_tis = 0
else:
try:
timer = Stats.timer("scheduler.critical_section_duration")
timer.start()
# Find any TIs in state SCHEDULED, try to QUEUE them (send it to the executors)
num_queued_tis = self._critical_section_enqueue_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr("scheduler.critical_section_busy")
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
|
Make the main scheduling decisions.
It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
We don't select all dagruns at once, because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_enqueue_task_instances for more.
:return: Number of TIs enqueued in this iteration
|
python
|
airflow-core/src/airflow/jobs/scheduler_job_runner.py
| 1,583
|
[
"self",
"session"
] |
int
| true
| 8
| 6.8
|
apache/airflow
| 43,597
|
unknown
| false
|
mergeValuesToHistogram
|
private void mergeValuesToHistogram() {
if (valueCount == 0) {
return;
}
Arrays.sort(rawValueBuffer, 0, valueCount);
int negativeValuesCount = 0;
while (negativeValuesCount < valueCount && rawValueBuffer[negativeValuesCount] < 0) {
negativeValuesCount++;
}
valueBuffer.reset();
Aggregates aggregates = rawValuesAggregates();
valueBuffer.setSum(aggregates.sum());
valueBuffer.setMin(aggregates.min());
valueBuffer.setMax(aggregates.max());
int scale = valueBuffer.scale();
// Buckets must be provided with their indices in ascending order.
// For the negative range, higher bucket indices correspond to bucket boundaries closer to -INF
// and smaller bucket indices correspond to bucket boundaries closer to zero.
// therefore we have to iterate the negative values in the sorted rawValueBuffer reverse order,
// from the value closest to -INF to the value closest to zero.
// not that i here is the index of the value in the rawValueBuffer array
// and is unrelated to the histogram bucket index for the value.
for (int i = negativeValuesCount - 1; i >= 0; i--) {
long count = 1;
long index = computeIndex(rawValueBuffer[i], scale);
while ((i - 1) >= 0 && computeIndex(rawValueBuffer[i - 1], scale) == index) {
i--;
count++;
}
valueBuffer.tryAddBucket(index, count, false);
}
int zeroCount = 0;
while ((negativeValuesCount + zeroCount) < valueCount && rawValueBuffer[negativeValuesCount + zeroCount] == 0) {
zeroCount++;
}
valueBuffer.setZeroBucket(ZeroBucket.minimalWithCount(zeroCount));
for (int i = negativeValuesCount + zeroCount; i < valueCount; i++) {
long count = 1;
long index = computeIndex(rawValueBuffer[i], scale);
while ((i + 1) < valueCount && computeIndex(rawValueBuffer[i + 1], scale) == index) {
i++;
count++;
}
valueBuffer.tryAddBucket(index, count, true);
}
resultMerger.add(valueBuffer);
valueCount = 0;
}
|
Returns the histogram representing the distribution of all accumulated values.
@return the histogram representing the distribution of all accumulated values
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramGenerator.java
| 115
|
[] |
void
| true
| 12
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
setAsText
|
@Override
public void setAsText(String text) throws IllegalArgumentException {
boolean nioPathCandidate = !text.startsWith(ResourceUtils.CLASSPATH_URL_PREFIX);
if (nioPathCandidate && !text.startsWith("/")) {
try {
URI uri = ResourceUtils.toURI(text);
String scheme = uri.getScheme();
if (scheme != null) {
// No NIO candidate except for "C:" style drive letters
nioPathCandidate = (scheme.length() == 1);
// Let's try NIO file system providers via Paths.get(URI)
setValue(Paths.get(uri).normalize());
return;
}
}
catch (URISyntaxException ex) {
// Not a valid URI; potentially a Windows-style path after
// a file prefix (let's try as Spring resource location)
nioPathCandidate = !text.startsWith(ResourceUtils.FILE_URL_PREFIX);
}
catch (FileSystemNotFoundException | IllegalArgumentException ex) {
// URI scheme not registered for NIO or not meeting Paths requirements:
// let's try URL protocol handlers via Spring's resource mechanism.
}
}
this.resourceEditor.setAsText(text);
Resource resource = (Resource) this.resourceEditor.getValue();
if (resource == null) {
setValue(null);
}
else if (nioPathCandidate && (!resource.isFile() || !resource.exists())) {
setValue(Paths.get(text).normalize());
}
else {
try {
setValue(resource.getFilePath());
}
catch (IOException ex) {
String msg = "Could not resolve \"" + text + "\" to 'java.nio.file.Path' for " + resource + ": " +
ex.getMessage();
if (nioPathCandidate) {
msg += " - In case of ambiguity, consider adding the 'file:' prefix for an explicit reference " +
"to a file system resource of the same name: \"file:" + text + "\"";
}
throw new IllegalArgumentException(msg);
}
}
}
|
Create a new PathEditor, using the given ResourceEditor underneath.
@param resourceEditor the ResourceEditor to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/PathEditor.java
| 75
|
[
"text"
] |
void
| true
| 12
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
containsNone
|
public static boolean containsNone(final CharSequence cs, final char... searchChars) {
if (cs == null || searchChars == null) {
return true;
}
final int csLen = cs.length();
final int csLast = csLen - 1;
final int searchLen = searchChars.length;
final int searchLast = searchLen - 1;
for (int i = 0; i < csLen; i++) {
final char ch = cs.charAt(i);
for (int j = 0; j < searchLen; j++) {
if (searchChars[j] == ch) {
if (!Character.isHighSurrogate(ch) || j == searchLast || i < csLast && searchChars[j + 1] == cs.charAt(i + 1)) {
return false;
}
}
}
}
return true;
}
|
Tests that the CharSequence does not contain certain characters.
<p>
A {@code null} CharSequence will return {@code true}. A {@code null} invalid character array will return {@code true}. An empty CharSequence (length()=0)
always returns true.
</p>
<pre>
StringUtils.containsNone(null, *) = true
StringUtils.containsNone(*, null) = true
StringUtils.containsNone("", *) = true
StringUtils.containsNone("ab", '') = true
StringUtils.containsNone("abab", 'xyz') = true
StringUtils.containsNone("ab1", 'xyz') = true
StringUtils.containsNone("abz", 'xyz') = false
</pre>
@param cs the CharSequence to check, may be null.
@param searchChars an array of invalid chars, may be null.
@return true if it contains none of the invalid chars, or is null.
@since 2.0
@since 3.0 Changed signature from containsNone(String, char[]) to containsNone(CharSequence, char...)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,216
|
[
"cs"
] | true
| 10
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getSpringInitializationConfig
|
@Override
protected @Nullable String getSpringInitializationConfig() {
ConfigurationFactory configurationFactory = ConfigurationFactory.getInstance();
try {
Configuration springConfiguration = configurationFactory.getConfiguration(getLoggerContext(), "-spring",
null, getClassLoader());
String configLocation = getConfigLocation(springConfiguration);
return (configLocation != null && configLocation.contains("-spring")) ? configLocation : null;
}
catch (ConfigurationException ex) {
statusLogger.warn("Could not load Spring-specific Log4j Core configuration", ex);
return null;
}
}
|
Create a new {@link Log4J2LoggingSystem} instance.
@param classLoader the class loader to use.
@param loggerContext the {@link LoggerContext} to use.
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 142
|
[] |
String
| true
| 4
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
build
|
@Override
public ImmutableSortedSet<E> build() {
sortAndDedup();
if (n == 0) {
return emptySet(comparator);
} else {
forceCopy = true;
return new RegularImmutableSortedSet<>(asImmutableList(elements, n), comparator);
}
}
|
Returns a newly-created {@code ImmutableSortedSet} based on the contents of the {@code
Builder} and its comparator.
|
java
|
guava/src/com/google/common/collect/ImmutableSortedSet.java
| 586
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
deleteTopics
|
default DeleteTopicsResult deleteTopics(Collection<String> topics) {
return deleteTopics(TopicCollection.ofTopicNames(topics), new DeleteTopicsOptions());
}
|
This is a convenience method for {@link #deleteTopics(TopicCollection, DeleteTopicsOptions)}
with default options. See the overload for more details.
<p>
This operation is supported by brokers with version 0.10.1.0 or higher.
@param topics The topic names to delete.
@return The DeleteTopicsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 212
|
[
"topics"
] |
DeleteTopicsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
getBean
|
public <T> T getBean(String name, @Nullable Class<T> requiredType, @Nullable Object @Nullable ... args)
throws BeansException {
return doGetBean(name, requiredType, args, false);
}
|
Return an instance, which may be shared or independent, of the specified bean.
@param name the name of the bean to retrieve
@param requiredType the required type of the bean to retrieve
@param args arguments to use when creating a bean instance using explicit arguments
(only applied when creating a new instance as opposed to retrieving an existing one)
@return an instance of the bean
@throws BeansException if the bean could not be created
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 218
|
[
"name",
"requiredType"
] |
T
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
clearListeners
|
private @Nullable Listener clearListeners(@Nullable Listener onto) {
// We need to
// 1. atomically swap the listeners with TOMBSTONE, this is because addListener uses that
// to synchronize with us
// 2. reverse the linked list, because despite our rather clear contract, people depend on us
// executing listeners in the order they were added
// 3. push all the items onto 'onto' and return the new head of the stack
Listener head = gasListeners(Listener.TOMBSTONE);
Listener reversedList = onto;
while (head != null) {
Listener tmp = head;
head = head.next;
tmp.next = reversedList;
reversedList = tmp;
}
return reversedList;
}
|
Clears the {@link #listeners} list and prepends its contents to {@code onto}, least recently
added first.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
| 840
|
[
"onto"
] |
Listener
| true
| 2
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
dotAllMatcher
|
public static Matcher dotAllMatcher(final String regex, final CharSequence text) {
return dotAll(regex).matcher(text);
}
|
Compiles the given regular expression into a pattern with the {@link Pattern#DOTALL} flag, then creates a matcher that will match the given text against
this pattern.
@param regex The expression to be compiled.
@param text The character sequence to be matched.
@return A new matcher for this pattern.
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/RegExUtils.java
| 56
|
[
"regex",
"text"
] |
Matcher
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
columnKeySet
|
@Override
public Set<C> columnKeySet() {
Set<C> result = columnKeySet;
return (result == null) ? columnKeySet = new ColumnKeySet() : result;
}
|
{@inheritDoc}
<p>The returned set has an iterator that does not support {@code remove()}.
<p>The set's iterator traverses the columns of the first row, the columns of the second row,
etc., skipping any columns that have appeared previously.
|
java
|
android/guava/src/com/google/common/collect/StandardTable.java
| 656
|
[] | true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
putmask
|
def putmask(self, mask, value) -> Index:
"""
Return a new Index of the values set with the mask.
Parameters
----------
mask : array-like of bool
Array of booleans denoting where values should be replaced.
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
Returns
-------
Index
A new Index of the values set with the mask.
See Also
--------
numpy.putmask : Changes elements of an array
based on conditional and input values.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([5, 6, 7])
>>> idx1.putmask([True, False, False], idx2)
Index([5, 2, 3], dtype='int64')
"""
mask, noop = validate_putmask(self._values, mask)
if noop:
return self.copy()
if self.dtype != object and is_valid_na_for_dtype(value, self.dtype):
# e.g. None -> np.nan, see also Block._standardize_fill_value
value = self._na_value
try:
converted = self._validate_fill_value(value)
except (LossySetitemError, ValueError, TypeError) as err:
if is_object_dtype(self.dtype): # pragma: no cover
raise err
# See also: Block.coerce_to_target_dtype
dtype = self._find_common_type_compat(value)
if dtype == self.dtype:
# GH#56376 avoid RecursionError
raise AssertionError(
"Something has gone wrong. Please report a bug at "
"github.com/pandas-dev/pandas"
) from err
return self.astype(dtype).putmask(mask, value)
values = self._values.copy()
if isinstance(values, np.ndarray):
converted = setitem_datetimelike_compat(values, mask.sum(), converted)
np.putmask(values, mask, converted)
else:
# Note: we use the original value here, not converted, as
# _validate_fill_value is not idempotent
values._putmask(mask, value)
return self._shallow_copy(values)
|
Return a new Index of the values set with the mask.
Parameters
----------
mask : array-like of bool
Array of booleans denoting where values should be replaced.
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
Returns
-------
Index
A new Index of the values set with the mask.
See Also
--------
numpy.putmask : Changes elements of an array
based on conditional and input values.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3])
>>> idx2 = pd.Index([5, 6, 7])
>>> idx1.putmask([True, False, False], idx2)
Index([5, 2, 3], dtype='int64')
|
python
|
pandas/core/indexes/base.py
| 5,427
|
[
"self",
"mask",
"value"
] |
Index
| true
| 8
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
forceMergeIndex
|
private void forceMergeIndex(ProjectId projectId, ForceMergeRequest forceMergeRequest, ActionListener<Void> listener) {
assert forceMergeRequest.indices() != null && forceMergeRequest.indices().length == 1
: "Data stream lifecycle force merges one index at a time";
final String targetIndex = forceMergeRequest.indices()[0];
logger.info("Data stream lifecycle is issuing a request to force merge index [{}]", targetIndex);
client.projectClient(projectId).admin().indices().forceMerge(forceMergeRequest, new ActionListener<>() {
@Override
public void onResponse(BroadcastResponse forceMergeResponse) {
if (forceMergeResponse.getFailedShards() > 0) {
DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures();
String message = Strings.format(
"Data stream lifecycle failed to forcemerge %d shards for index [%s] due to failures [%s]",
forceMergeResponse.getFailedShards(),
targetIndex,
failures == null
? "unknown"
: Arrays.stream(failures).map(DefaultShardOperationFailedException::toString).collect(Collectors.joining(","))
);
onFailure(new ElasticsearchException(message));
} else if (forceMergeResponse.getTotalShards() != forceMergeResponse.getSuccessfulShards()) {
String message = Strings.format(
"Force merge request only had %d successful shards out of a total of %d",
forceMergeResponse.getSuccessfulShards(),
forceMergeResponse.getTotalShards()
);
onFailure(new ElasticsearchException(message));
} else {
logger.info("Data stream lifecycle successfully force merged index [{}]", targetIndex);
setForceMergeCompletedTimestamp(projectId, targetIndex, listener);
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
|
This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
it has sent delete requests for.
@param project The project metadata from which to get index metadata
@param dataStream The data stream
@param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
@return The set of indices that delete requests have been sent for
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,332
|
[
"projectId",
"forceMergeRequest",
"listener"
] |
void
| true
| 5
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
newCopyOnWriteArrayList
|
@J2ktIncompatible
@GwtIncompatible // CopyOnWriteArrayList
@InlineMe(
replacement = "new CopyOnWriteArrayList<>()",
imports = {"java.util.concurrent.CopyOnWriteArrayList"})
public static <E extends @Nullable Object> CopyOnWriteArrayList<E> newCopyOnWriteArrayList() {
return new CopyOnWriteArrayList<>();
}
|
Creates an empty {@code CopyOnWriteArrayList} instance.
<p><b>Note:</b> if you need an immutable empty {@link List}, use {@link Collections#emptyList}
instead.
@return a new, empty {@code CopyOnWriteArrayList}
@since 12.0
|
java
|
android/guava/src/com/google/common/collect/Lists.java
| 264
|
[] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
readToHeapBuffer
|
private static int readToHeapBuffer(InputStream input, ByteBuffer buffer, int count) throws IOException {
final int pos = buffer.position();
int read = readFully(input, buffer.array(), buffer.arrayOffset() + pos, count);
if (read > 0) {
buffer.position(pos + read);
}
return read;
}
|
Read up to {code count} bytes from {@code input} and store them into {@code buffer}.
The buffers position will be incremented by the number of bytes read from the stream.
@param input stream to read from
@param buffer buffer to read into
@param count maximum number of bytes to read
@return number of bytes read from the stream
@throws IOException in case of I/O errors
|
java
|
libs/core/src/main/java/org/elasticsearch/core/Streams.java
| 99
|
[
"input",
"buffer",
"count"
] | true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
bind
|
@Contract("_, _, _, true -> !null")
private <T> @Nullable T bind(ConfigurationPropertyName name, Bindable<T> target, @Nullable BindHandler handler,
boolean create) {
Assert.notNull(name, "'name' must not be null");
Assert.notNull(target, "'target' must not be null");
handler = (handler != null) ? handler : this.defaultBindHandler;
Context context = new Context();
return bind(name, target, handler, context, false, create);
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 353
|
[
"name",
"target",
"handler",
"create"
] |
T
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
open
|
private static ZipContent open(Source source) throws IOException {
ZipContent zipContent = cache.get(source);
if (zipContent != null) {
debug.log("Opening existing cached zip content for %s", zipContent);
zipContent.data.open();
return zipContent;
}
debug.log("Loading zip content from %s", source);
zipContent = Loader.load(source);
ZipContent previouslyCached = cache.putIfAbsent(source, zipContent);
if (previouslyCached != null) {
debug.log("Closing zip content from %s since cache was populated from another thread", source);
zipContent.close();
previouslyCached.data.open();
return previouslyCached;
}
return zipContent;
}
|
Open nested {@link ZipContent} from the specified path. The resulting
{@link ZipContent} <em>must</em> be {@link #close() closed} by the caller.
@param path the zip path
@param nestedEntryName the nested entry name to open
@return a {@link ZipContent} instance
@throws IOException on I/O error
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 376
|
[
"source"
] |
ZipContent
| true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
timeoutCallsToSend
|
private int timeoutCallsToSend(TimeoutProcessor processor) {
int numTimedOut = 0;
for (List<Call> callList : callsToSend.values()) {
numTimedOut += processor.handleTimeouts(callList,
"Timed out waiting to send the call.");
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) with assigned nodes.", numTimedOut);
return numTimedOut;
}
|
Time out calls which have been assigned to nodes.
@param processor The timeout processor.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,139
|
[
"processor"
] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
ensureNotClosed
|
private void ensureNotClosed() {
if (this.closed)
throw new IllegalStateException("This consumer has already been closed.");
}
|
Schedule a task to be executed during a poll(). One enqueued task will be executed per {@link #poll(Duration)}
invocation. You can use this repeatedly to mock out multiple responses to poll invocations.
@param task the task to be executed
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java
| 617
|
[] |
void
| true
| 2
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
validate_host
|
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(
pattern == "*" or is_same_domain(host, pattern) for pattern in allowed_hosts
)
|
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
|
python
|
django/http/request.py
| 826
|
[
"host",
"allowed_hosts"
] | false
| 2
| 6.24
|
django/django
| 86,204
|
unknown
| false
|
|
_get_contiguous_fusible_spans
|
def _get_contiguous_fusible_spans(gm: fx.GraphModule) -> list[list[fx.Node]]:
"""Get contiguous spans of fusible nodes from the graph.
Walks the graph in topological order and groups consecutive fusible
nodes into spans. Non-fusible nodes act as span boundaries.
"""
spans: list[list[fx.Node]] = []
current_span: list[fx.Node] = []
for node in gm.graph.nodes:
if is_fusible_node(node):
current_span.append(node)
else:
# Non-fusible node ends the current span
if current_span:
spans.append(current_span)
current_span = []
if current_span:
spans.append(current_span)
return spans
|
Get contiguous spans of fusible nodes from the graph.
Walks the graph in topological order and groups consecutive fusible
nodes into spans. Non-fusible nodes act as span boundaries.
|
python
|
torch/_inductor/fx_passes/fusion_regions.py
| 106
|
[
"gm"
] |
list[list[fx.Node]]
| true
| 6
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
python_type
|
def python_type(self) -> type:
"""
Abstract method to be implemented by subclasses of VariableTracker.
This method should return the type represented by the instance of the subclass.
The purpose is to provide a standardized way to retrieve the Python type information
of the variable being tracked.
Returns:
type: The Python type (such as int, str, list, etc.) of the variable tracked by
the subclass. If the type cannot be determined or is not relevant,
leaving it undefined or invoking super() is always sound.
Note:
This is an abstract method and may be overridden in subclasses.
Example:
class SetVariable(VariableTracker):
def python_type(self):
return set
Raises:
NotImplementedError: If the method is not implemented in a subclass.
"""
try:
return type(self.as_python_constant())
except NotImplementedError:
raise NotImplementedError(f"{self} has no type") from None
|
Abstract method to be implemented by subclasses of VariableTracker.
This method should return the type represented by the instance of the subclass.
The purpose is to provide a standardized way to retrieve the Python type information
of the variable being tracked.
Returns:
type: The Python type (such as int, str, list, etc.) of the variable tracked by
the subclass. If the type cannot be determined or is not relevant,
leaving it undefined or invoking super() is always sound.
Note:
This is an abstract method and may be overridden in subclasses.
Example:
class SetVariable(VariableTracker):
def python_type(self):
return set
Raises:
NotImplementedError: If the method is not implemented in a subclass.
|
python
|
torch/_dynamo/variables/base.py
| 328
|
[
"self"
] |
type
| true
| 1
| 8.32
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
add
|
private void add(@Nullable Object[] elements, int n) {
ensureRoomFor(n);
/*
* The following call is not statically checked, since arraycopy accepts plain Object for its
* parameters. If it were statically checked, the checker would still be OK with it, since
* we're copying into a `contents` array whose type allows it to contain nulls. Still, it's
* worth noting that we promise not to put nulls into the array in the first `size` elements.
* We uphold that promise here because our callers promise that `elements` will not contain
* nulls in its first `n` elements.
*/
System.arraycopy(elements, 0, contents, size, n);
size += n;
}
|
Adds each element of {@code elements} to the {@code ImmutableList}.
@param elements the {@code Iterable} to add to the {@code ImmutableList}
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
guava/src/com/google/common/collect/ImmutableList.java
| 866
|
[
"elements",
"n"
] |
void
| true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
getUmdImportKind
|
function getUmdImportKind(importingFile: SourceFile | FutureSourceFile, program: Program, forceImportKeyword: boolean): ImportKind {
// Import a synthetic `default` if enabled.
if (getAllowSyntheticDefaultImports(program.getCompilerOptions())) {
return ImportKind.Default;
}
// When a synthetic `default` is unavailable, use `import..require` if the module kind supports it.
const moduleKind = getEmitModuleKind(program.getCompilerOptions());
switch (moduleKind) {
case ModuleKind.AMD:
case ModuleKind.CommonJS:
case ModuleKind.UMD:
if (hasJSFileExtension(importingFile.fileName)) {
return importingFile.externalModuleIndicator || forceImportKeyword ? ImportKind.Namespace : ImportKind.CommonJS;
}
return ImportKind.CommonJS;
case ModuleKind.System:
case ModuleKind.ES2015:
case ModuleKind.ES2020:
case ModuleKind.ES2022:
case ModuleKind.ESNext:
case ModuleKind.None:
case ModuleKind.Preserve:
// Fall back to the `import * as ns` style import.
return ImportKind.Namespace;
case ModuleKind.Node16:
case ModuleKind.Node18:
case ModuleKind.Node20:
case ModuleKind.NodeNext:
return getImpliedNodeFormatForEmit(importingFile, program) === ModuleKind.ESNext ? ImportKind.Namespace : ImportKind.CommonJS;
default:
return Debug.assertNever(moduleKind, `Unexpected moduleKind ${moduleKind}`);
}
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,535
|
[
"importingFile",
"program",
"forceImportKeyword"
] | true
| 6
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
scratch
|
private ByteBuffer scratch() {
if (scratch == null) {
scratch = ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN);
}
return scratch;
}
|
Updates the sink with the given number of bytes from the buffer.
|
java
|
android/guava/src/com/google/common/hash/AbstractByteHasher.java
| 139
|
[] |
ByteBuffer
| true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
dtypes
|
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
Series.dtypes : Return the dtype object of the underlying data.
Examples
--------
>>> df = pd.DataFrame(
... {
... "float": [1.0],
... "int": [1],
... "datetime": [pd.Timestamp("20180310")],
... "string": ["foo"],
... }
... )
>>> df.dtypes
float float64
int int64
datetime datetime64[us]
string str
dtype: object
"""
data = self._mgr.get_dtypes()
return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_)
|
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
Series.dtypes : Return the dtype object of the underlying data.
Examples
--------
>>> df = pd.DataFrame(
... {
... "float": [1.0],
... "int": [1],
... "datetime": [pd.Timestamp("20180310")],
... "string": ["foo"],
... }
... )
>>> df.dtypes
float float64
int int64
datetime datetime64[us]
string str
dtype: object
|
python
|
pandas/core/generic.py
| 6,279
|
[
"self"
] | false
| 1
| 6.48
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
jit_user_function
|
def jit_user_function(func: Callable) -> Callable:
"""
If user function is not jitted already, mark the user's function
as jitable.
Parameters
----------
func : function
user defined function
Returns
-------
function
Numba JITed function, or function marked as JITable by numba
"""
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
if numba.extending.is_jitted(func):
# Don't jit a user passed jitted function
numba_func = func
elif getattr(np, func.__name__, False) is func or isinstance(
func, types.BuiltinFunctionType
):
# Not necessary to jit builtins or np functions
# This will mess up register_jitable
numba_func = func
else:
numba_func = numba.extending.register_jitable(func)
return numba_func
|
If user function is not jitted already, mark the user's function
as jitable.
Parameters
----------
func : function
user defined function
Returns
-------
function
Numba JITed function, or function marked as JITable by numba
|
python
|
pandas/core/util/numba_.py
| 59
|
[
"func"
] |
Callable
| true
| 7
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
find_duplicates
|
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> import numpy as np
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
|
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> import numpy as np
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
|
python
|
numpy/lib/recfunctions.py
| 1,417
|
[
"a",
"key",
"ignoremask",
"return_index"
] | false
| 6
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
make_low_rank_matrix
|
def make_low_rank_matrix(
n_samples=100,
n_features=100,
*,
effective_rank=10,
tail_strength=0.5,
random_state=None,
):
"""Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
Examples
--------
>>> from numpy.linalg import svd
>>> from sklearn.datasets import make_low_rank_matrix
>>> X = make_low_rank_matrix(
... n_samples=50,
... n_features=25,
... effective_rank=5,
... tail_strength=0.01,
... random_state=0,
... )
>>> X.shape
(50, 25)
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(
generator.standard_normal(size=(n_samples, n)),
mode="economic",
check_finite=False,
)
v, _ = linalg.qr(
generator.standard_normal(size=(n_features, n)),
mode="economic",
check_finite=False,
)
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
|
Generate a mostly low rank matrix with bell-shaped singular values.
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, default=100
The number of samples.
n_features : int, default=100
The number of features.
effective_rank : int, default=10
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float, default=0.5
The relative importance of the fat noisy tail of the singular values
profile. The value should be between 0 and 1.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The matrix.
Examples
--------
>>> from numpy.linalg import svd
>>> from sklearn.datasets import make_low_rank_matrix
>>> X = make_low_rank_matrix(
... n_samples=50,
... n_features=25,
... effective_rank=5,
... tail_strength=0.01,
... random_state=0,
... )
>>> X.shape
(50, 25)
|
python
|
sklearn/datasets/_samples_generator.py
| 1,423
|
[
"n_samples",
"n_features",
"effective_rank",
"tail_strength",
"random_state"
] | false
| 1
| 6.4
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
batchIterator
|
private AbstractIterator<FileChannelRecordBatch> batchIterator(int start) {
final int end;
if (isSlice)
end = this.end;
else
end = this.sizeInBytes();
FileLogInputStream inputStream = new FileLogInputStream(this, start, end);
return new RecordBatchIterator<>(inputStream);
}
|
Get an iterator over the record batches in the file, starting at a specific position. This is similar to
{@link #batches()} except that callers specify a particular position to start reading the batches from. This
method must be used with caution: the start position passed in must be a known start of a batch.
@param start The position to start record iteration from; must be a known position for start of a batch
@return An iterator over batches starting from {@code start}
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 434
|
[
"start"
] | true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
groupState
|
public Optional<GroupState> groupState() {
return groupState;
}
|
The group state.
<p>
If the broker returns a group state which is not recognised, as might
happen when talking to a broker with a later version, the state will be
<code>Optional.of(GroupState.UNKNOWN)</code>.
@return An Optional containing the state, if available.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/GroupListing.java
| 90
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
visitLabeledStatement
|
function visitLabeledStatement(node: LabeledStatement): VisitResult<Statement | undefined> {
if (convertedLoopState && !convertedLoopState.labels) {
convertedLoopState.labels = new Map<string, boolean>();
}
const statement = unwrapInnermostStatementOfLabel(node, convertedLoopState && recordLabel);
return isIterationStatement(statement, /*lookInLabeledStatements*/ false)
? visitIterationStatement(statement, /*outermostLabeledStatement*/ node)
: factory.restoreEnclosingLabel(visitNode(statement, visitor, isStatement, factory.liftToBlock) ?? setTextRange(factory.createEmptyStatement(), statement), node, convertedLoopState && resetLabel);
}
|
Visits a VariableDeclaration node with a binding pattern.
@param node A VariableDeclaration node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 2,943
|
[
"node"
] | true
| 6
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
nunique
|
def nunique(self, dropna: bool = True) -> Series | DataFrame:
"""
Return number of unique elements in the group.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
Number of unique values within each group.
See Also
--------
core.resample.Resampler.nunique : Method nunique for Resampler.
Examples
--------
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 3], index=lst)
>>> ser
a 1
a 2
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).nunique()
a 2
b 1
dtype: int64
"""
ids = self._grouper.ids
ngroups = self._grouper.ngroups
val = self.obj._values
codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False)
if self._grouper.has_dropped_na:
mask = ids >= 0
ids = ids[mask]
codes = codes[mask]
group_index = get_group_index(
labels=[ids, codes],
shape=(ngroups, len(uniques)),
sort=False,
xnull=dropna,
)
if dropna:
mask = group_index >= 0
if (~mask).any():
ids = ids[mask]
group_index = group_index[mask]
mask = duplicated(group_index, "first")
res = np.bincount(ids[~mask], minlength=ngroups)
res = ensure_int64(res)
ri = self._grouper.result_index
result: Series | DataFrame = self.obj._constructor(
res, index=ri, name=self.obj.name
)
if not self.as_index:
result = self._insert_inaxis_grouper(result)
result.index = default_index(len(result))
return result
|
Return number of unique elements in the group.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
Number of unique values within each group.
See Also
--------
core.resample.Resampler.nunique : Method nunique for Resampler.
Examples
--------
>>> lst = ["a", "a", "b", "b"]
>>> ser = pd.Series([1, 2, 3, 3], index=lst)
>>> ser
a 1
a 2
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).nunique()
a 2
b 1
dtype: int64
|
python
|
pandas/core/groupby/generic.py
| 962
|
[
"self",
"dropna"
] |
Series | DataFrame
| true
| 5
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_freeze_unroll
|
def _freeze_unroll(self, new_tasks, group_id, chord, root_id, parent_id):
"""Generator for the frozen flattened group tasks.
Creates a flattened list of the tasks in the group, and freezes
each task in the group. Nested groups will be recursively flattened.
Exhausting the generator will create a new list of the flattened
tasks in the group and will return it in the new_tasks argument.
Arguments:
new_tasks (list): The list to append the flattened tasks to.
group_id (str): The group_id to use for the tasks.
chord (Chord): The chord to use for the tasks.
root_id (str): The root_id to use for the tasks.
parent_id (str): The parent_id to use for the tasks.
Yields:
AsyncResult: The frozen task.
"""
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
stack = deque(self.tasks)
group_index = 0
while stack:
task = maybe_signature(stack.popleft(), app=self._app).clone()
# if this is a group, flatten it by adding all of the group's tasks to the stack
if isinstance(task, group):
stack.extendleft(task.tasks)
else:
new_tasks.append(task)
yield task.freeze(group_id=group_id,
chord=chord, root_id=root_id,
parent_id=parent_id,
group_index=group_index)
group_index += 1
|
Generator for the frozen flattened group tasks.
Creates a flattened list of the tasks in the group, and freezes
each task in the group. Nested groups will be recursively flattened.
Exhausting the generator will create a new list of the flattened
tasks in the group and will return it in the new_tasks argument.
Arguments:
new_tasks (list): The list to append the flattened tasks to.
group_id (str): The group_id to use for the tasks.
chord (Chord): The chord to use for the tasks.
root_id (str): The root_id to use for the tasks.
parent_id (str): The parent_id to use for the tasks.
Yields:
AsyncResult: The frozen task.
|
python
|
celery/canvas.py
| 1,892
|
[
"self",
"new_tasks",
"group_id",
"chord",
"root_id",
"parent_id"
] | false
| 4
| 6.08
|
celery/celery
| 27,741
|
google
| false
|
|
listGroups
|
ListGroupsResult listGroups(ListGroupsOptions options);
|
List the groups available in the cluster.
@param options The options to use when listing the groups.
@return The ListGroupsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,080
|
[
"options"
] |
ListGroupsResult
| true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
entryIterator
|
@Override
UnmodifiableIterator<Entry<K, V>> entryIterator() {
return new UnmodifiableIterator<Entry<K, V>>() {
final Iterator<? extends Entry<K, ? extends ImmutableCollection<V>>> asMapItr =
map.entrySet().iterator();
@Nullable K currentKey = null;
Iterator<V> valueItr = emptyIterator();
@Override
public boolean hasNext() {
return valueItr.hasNext() || asMapItr.hasNext();
}
@Override
public Entry<K, V> next() {
if (!valueItr.hasNext()) {
Entry<K, ? extends ImmutableCollection<V>> entry = asMapItr.next();
currentKey = entry.getKey();
valueItr = entry.getValue().iterator();
}
/*
* requireNonNull is safe: The first call to this method always enters the !hasNext() case
* and populates currentKey, after which it's never cleared.
*/
return immutableEntry(requireNonNull(currentKey), valueItr.next());
}
};
}
|
Returns an immutable collection of all key-value pairs in the multimap.
|
java
|
android/guava/src/com/google/common/collect/ImmutableMultimap.java
| 649
|
[] | true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
clearShort
|
public short clearShort(final short holder) {
return (short) clear(holder);
}
|
Clears the bits.
@param holder the short data containing the bits we're
interested in
@return the value of holder with the specified bits cleared
(set to {@code 0})
|
java
|
src/main/java/org/apache/commons/lang3/BitField.java
| 123
|
[
"holder"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
withPassword
|
public PemSslStoreDetails withPassword(@Nullable String password) {
return new PemSslStoreDetails(this.type, this.alias, password, this.certificates, this.privateKey,
this.privateKeyPassword);
}
|
Return a new {@link PemSslStoreDetails} instance with a new password.
@param password the new password
@return a new {@link PemSslStoreDetails} instance
@since 3.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStoreDetails.java
| 112
|
[
"password"
] |
PemSslStoreDetails
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
initializeBean
|
Object initializeBean(Object existingBean, String beanName) throws BeansException;
|
Initialize the given raw bean, applying factory callbacks
such as {@code setBeanName} and {@code setBeanFactory},
also applying all bean post processors (including ones which
might wrap the given raw bean).
<p>Note that no bean definition of the given name has to exist
in the bean factory. The passed-in bean name will simply be used
for callbacks but not checked against the registered bean definitions.
@param existingBean the existing bean instance
@param beanName the name of the bean, to be passed to it if necessary
(only passed to {@link BeanPostProcessor BeanPostProcessors};
can follow the {@link #ORIGINAL_INSTANCE_SUFFIX} convention in order to
enforce the given instance to be returned, i.e. no proxies etc)
@return the bean instance to use, either the original or a wrapped one
@throws BeansException if the initialization failed
@see #ORIGINAL_INSTANCE_SUFFIX
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/AutowireCapableBeanFactory.java
| 286
|
[
"existingBean",
"beanName"
] |
Object
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_get_provider_version_from_package_name
|
def _get_provider_version_from_package_name(provider_package_name: str) -> str | None:
"""
Get the current version of a provider from its pyproject.toml.
Args:
provider_package_name: The full package name (e.g., "apache-airflow-providers-common-compat")
Returns:
The version string if found, None otherwise
"""
# Convert package name to provider path
# apache-airflow-providers-common-compat -> common/compat
provider_id = provider_package_name.replace("apache-airflow-providers-", "").replace("-", "/")
provider_pyproject = AIRFLOW_PROVIDERS_ROOT_PATH / provider_id / "pyproject.toml"
if not provider_pyproject.exists():
get_console().print(f"[warning]Provider pyproject.toml not found: {provider_pyproject}")
return None
provider_toml = load_pyproject_toml(provider_pyproject)
provider_version = provider_toml.get("project", {}).get("version")
if not provider_version:
get_console().print(
f"[warning]Could not find version for {provider_package_name} in {provider_pyproject}"
)
return None
return provider_version
|
Get the current version of a provider from its pyproject.toml.
Args:
provider_package_name: The full package name (e.g., "apache-airflow-providers-common-compat")
Returns:
The version string if found, None otherwise
|
python
|
dev/breeze/src/airflow_breeze/utils/packages.py
| 1,187
|
[
"provider_package_name"
] |
str | None
| true
| 3
| 7.76
|
apache/airflow
| 43,597
|
google
| false
|
compile_fx_forward
|
def compile_fx_forward(
gm: GraphModule,
example_inputs: Sequence[InputType],
num_orig_model_outputs: int,
num_example_inputs: int,
compiler_config_extra: CompilerConfigExtra,
inner_compile: Callable[..., OutputCode] = compile_fx_inner,
is_inference: bool = False,
) -> OutputCode:
"""
Compile the forward graph of the given graph module.
Args:
gm: The graph module to compile.
example_inputs: The example inputs to use for compilation.
num_orig_model_outputs: The number of model outputs from the original dynamo graph.
num_example_inputs: The number of example inputs from the original dynamo graph.
compiler_config_extra: Extra configuration for the compiler.
inner_compile: The inner compile function to use.
is_inference: Whether this is an inference graph.
"""
if is_inference:
# partition_fn won't be called
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "before_joint_graph",
"encoding": "string",
},
payload_fn=lambda: gm.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
_recursive_joint_graph_passes(gm)
trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "after_joint_graph",
"encoding": "string",
},
payload_fn=lambda: gm.print_readable(
print_output=False, include_stride=True, include_device=True
),
)
fixed = torch._inductor.utils.num_fw_fixed_arguments(
num_example_inputs, len(example_inputs)
)
model_outputs_node = output_node(gm)
if config.keep_output_stride:
model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
num_model_outputs = len(model_outputs)
context = torch._guards.TracingContext.try_get()
# See Note [User Outputs in the inductor graph]
if context is not None and context.fw_metadata and not is_inference:
original_output_start_index = (
context.fw_metadata.num_mutated_inp_runtime_indices
)
else:
original_output_start_index = 0
assert num_orig_model_outputs <= num_model_outputs
# Note [User Outputs in the inductor graph]
# We makes the following assumption
# For inference
# len(orig_model_outputs) == len(model_outputs)
# For training
# len(orig_model_outputs) <= len(model_outputs)
# During training, most of the time the model_outputs starts with
# original module's outputs followed by saved activations.
# But this can be not true if the model have inplace updated tensors.
# AOTAutograd will make those tensors being returned before the original
# module's output.
# To make things safe, we'll use original_output_start_index field
# set by AOTAutograd to decide where the original module outputs start.
orig_output_end_idx = original_output_start_index + num_orig_model_outputs
# Sanity check: we are about to splice out the "user" outputs from the full set
# of "graph" outputs. Make sure we're within bounds.
assert orig_output_end_idx <= num_model_outputs
model_outputs_node.meta["user_visible_output_idxs"] = [
idx
for idx in range(original_output_start_index, orig_output_end_idx)
if isinstance(model_outputs[idx], torch.fx.Node)
]
else:
model_outputs_node.meta["user_visible_output_idxs"] = []
# We also mark the invoke_subgraph outputs as user_visible to
# force the outputs of invoke_subgraph subgraph to follow the
# original strides
_recursive_record_user_visible_output_idxs(gm)
return inner_compile(
gm,
example_inputs,
static_input_idxs=get_static_input_idxs(fixed),
cudagraphs=compiler_config_extra.cudagraphs,
graph_id=compiler_config_extra.graph_id,
is_inference=is_inference,
boxed_forward_device_index=compiler_config_extra.forward_device,
)
|
Compile the forward graph of the given graph module.
Args:
gm: The graph module to compile.
example_inputs: The example inputs to use for compilation.
num_orig_model_outputs: The number of model outputs from the original dynamo graph.
num_example_inputs: The number of example inputs from the original dynamo graph.
compiler_config_extra: Extra configuration for the compiler.
inner_compile: The inner compile function to use.
is_inference: Whether this is an inference graph.
|
python
|
torch/_inductor/compile_fx.py
| 2,258
|
[
"gm",
"example_inputs",
"num_orig_model_outputs",
"num_example_inputs",
"compiler_config_extra",
"inner_compile",
"is_inference"
] |
OutputCode
| true
| 8
| 6.32
|
pytorch/pytorch
| 96,034
|
google
| false
|
clearMergedBeanDefinition
|
protected void clearMergedBeanDefinition(String beanName) {
RootBeanDefinition bd = this.mergedBeanDefinitions.get(beanName);
if (bd != null) {
bd.stale = true;
}
}
|
Remove the merged bean definition for the specified bean,
recreating it on next access.
@param beanName the bean name to clear the merged definition for
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,523
|
[
"beanName"
] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
typename
|
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype
Examples
--------
>>> import numpy as np
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print(typechar, ' : ', np.typename(typechar))
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
"""
return _namefromtype[char]
|
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype
Examples
--------
>>> import numpy as np
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print(typechar, ' : ', np.typename(typechar))
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
|
python
|
numpy/lib/_type_check_impl.py
| 586
|
[
"char"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getBytes
|
public ByteBuffer getBytes(String name) {
Object result = get(name);
if (result instanceof byte[])
return ByteBuffer.wrap((byte[]) result);
return (ByteBuffer) result;
}
|
Check if the struct contains a field.
@param name
@return Whether a field exists.
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/Struct.java
| 120
|
[
"name"
] |
ByteBuffer
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
_get_providers_class_registry
|
def _get_providers_class_registry(
class_extras: dict[str, Callable] | None = None,
) -> dict[str, dict[str, Any]]:
"""
Builds a registry of classes from YAML configuration files.
This function scans through YAML configuration files to build a registry of classes.
It parses each YAML file to get the provider's name and registers classes from Python
module files within the provider's directory, excluding '__init__.py'.
:return: A dictionary with provider names as keys and a dictionary of classes as values.
"""
class_registry = {}
for provider_yaml_content in load_package_data():
provider_pkg_root = Path(provider_yaml_content["package-dir"])
for root, _, file_names in os.walk(provider_pkg_root):
folder = Path(root)
for file_name in file_names:
if not file_name.endswith(".py") or file_name == "__init__.py":
continue
module_filepath = folder.joinpath(file_name)
module_registry = _get_module_class_registry(
module_filepath=module_filepath,
module_name=(
provider_yaml_content["python-module"]
+ "."
+ module_filepath.relative_to(provider_pkg_root)
.with_suffix("")
.as_posix()
.replace("/", ".")
),
class_extras={
"provider_name": lambda **kwargs: provider_yaml_content["package-name"],
"provider_version": lambda **kwargs: provider_yaml_content["versions"][0],
**(class_extras or {}),
},
)
class_registry.update(module_registry)
return class_registry
|
Builds a registry of classes from YAML configuration files.
This function scans through YAML configuration files to build a registry of classes.
It parses each YAML file to get the provider's name and registers classes from Python
module files within the provider's directory, excluding '__init__.py'.
:return: A dictionary with provider names as keys and a dictionary of classes as values.
|
python
|
devel-common/src/sphinx_exts/providers_extensions.py
| 231
|
[
"class_extras"
] |
dict[str, dict[str, Any]]
| true
| 7
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
_maybe_convert_i8
|
def _maybe_convert_i8(self, key):
"""
Maybe convert a given key to its equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Index with an int64 dtype if converted list-like.
"""
if is_list_like(key):
key = ensure_index(key)
key = maybe_upcast_numeric_to_64bit(key)
if not self._needs_i8_conversion(key):
return key
scalar = is_scalar(key)
key_dtype = getattr(key, "dtype", None)
if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):
# convert left/right and reconstruct
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key)
if isinstance(key, Period):
key_i8 = key.ordinal
elif isinstance(key_i8, Timestamp):
key_i8 = key_i8._value
elif isinstance(key_i8, (np.datetime64, np.timedelta64)):
key_i8 = key_i8.view("i8")
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
# convert NaT from its i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
# error: Item "ExtensionDtype"/"dtype[Any]" of "Union[dtype[Any],
# ExtensionDtype]" has no attribute "subtype"
subtype = self.dtype.subtype # type: ignore[union-attr]
if subtype != key_dtype:
raise ValueError(
f"Cannot index an IntervalIndex of subtype {subtype} with "
f"values of dtype {key_dtype}"
)
return key_i8
|
Maybe convert a given key to its equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Index with an int64 dtype if converted list-like.
|
python
|
pandas/core/indexes/interval.py
| 650
|
[
"self",
"key"
] | false
| 13
| 6
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
chebcompanion
|
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0] / c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)] * (n - 1))
top = mat.reshape(-1)[1::n + 1]
bot = mat.reshape(-1)[n::n + 1]
top[0] = np.sqrt(.5)
top[1:] = 1 / 2
bot[...] = top
mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * .5
return mat
|
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
|
python
|
numpy/polynomial/chebyshev.py
| 1,627
|
[
"c"
] | false
| 3
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getDefaultFileTypeMap
|
protected FileTypeMap getDefaultFileTypeMap(MimeMessage mimeMessage) {
if (mimeMessage instanceof SmartMimeMessage smartMimeMessage) {
FileTypeMap fileTypeMap = smartMimeMessage.getDefaultFileTypeMap();
if (fileTypeMap != null) {
return fileTypeMap;
}
}
ConfigurableMimeFileTypeMap fileTypeMap = new ConfigurableMimeFileTypeMap();
fileTypeMap.afterPropertiesSet();
return fileTypeMap;
}
|
Determine the default Java Activation FileTypeMap for the given MimeMessage.
@param mimeMessage the passed-in MimeMessage
@return the default FileTypeMap associated with the MimeMessage,
or a default ConfigurableMimeFileTypeMap if none found for the message
@see ConfigurableMimeFileTypeMap
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 447
|
[
"mimeMessage"
] |
FileTypeMap
| true
| 3
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
reset_modules
|
def reset_modules(*modules):
"""Remove modules from :data:`sys.modules` by name,
and reset back again when the test/context returns.
Example::
>>> with conftest.reset_modules('celery.result', 'celery.app.base'):
... pass
"""
prev = {
k: sys.modules.pop(k) for k in modules if k in sys.modules
}
try:
for k in modules:
reload(import_module(k))
yield
finally:
sys.modules.update(prev)
|
Remove modules from :data:`sys.modules` by name,
and reset back again when the test/context returns.
Example::
>>> with conftest.reset_modules('celery.result', 'celery.app.base'):
... pass
|
python
|
t/unit/conftest.py
| 654
|
[] | false
| 2
| 7.04
|
celery/celery
| 27,741
|
unknown
| false
|
|
contains
|
public boolean contains(final StrMatcher matcher) {
return indexOf(matcher, 0) >= 0;
}
|
Checks if the string builder contains a string matched using the
specified matcher.
<p>
Matchers can be used to perform advanced searching behavior.
For example you could write a matcher to search for the character
'a' followed by a number.
</p>
@param matcher the matcher to use, null returns -1
@return true if the matcher finds a match in the builder
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,651
|
[
"matcher"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
ensureNotNull
|
public static void ensureNotNull(Object value, String message) {
if (value == null) {
throw new IllegalArgumentException(message);
}
}
|
Returns a version used for serialising a response.
@return a compatible version
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 1,303
|
[
"value",
"message"
] |
void
| true
| 2
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
timeout
|
public Optional<Duration> timeout() {
return timeout;
}
|
Fluent method to set the group membership operation upon shutdown.
@param operation the group membership operation to apply. Must be one of {@code LEAVE_GROUP}, {@code REMAIN_IN_GROUP}, or {@code DEFAULT}.
@return this {@code CloseOptions} instance.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java
| 109
|
[] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
supportedFeatures
|
public static Features<SupportedVersionRange> supportedFeatures(Map<String, SupportedVersionRange> features) {
return new Features<>(features);
}
|
@param features Map of feature name to SupportedVersionRange.
@return Returns a new Features object representing supported features.
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/Features.java
| 55
|
[
"features"
] | true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
onHeartbeatRequestGenerated
|
public void onHeartbeatRequestGenerated() {
MemberState state = state();
if (state == MemberState.ACKNOWLEDGING) {
if (targetAssignmentReconciled()) {
transitionTo(MemberState.STABLE);
} else {
log.debug("Member {} with epoch {} transitioned to {} after a heartbeat was sent " +
"to ack a previous reconciliation. \n" +
"\t\tCurrent assignment: {} \n" +
"\t\tTarget assignment: {}\n",
memberId, memberEpoch, MemberState.RECONCILING, currentAssignment, currentTargetAssignment);
transitionTo(MemberState.RECONCILING);
}
} else if (state == MemberState.LEAVING) {
if (isPollTimerExpired) {
log.debug("Member {} with epoch {} generated the heartbeat to leave due to expired poll timer. It will " +
"remain stale (no heartbeat) until it rejoins the group on the next consumer " +
"poll.", memberId, memberEpoch);
transitionToStale();
} else {
log.debug("Member {} with epoch {} generated the heartbeat to leave the group.", memberId, memberEpoch);
transitionTo(MemberState.UNSUBSCRIBED);
}
}
}
|
Update state when a heartbeat is generated. This will transition out of the states that end
when a heartbeat request is sent, without waiting for a response (ex.
{@link MemberState#ACKNOWLEDGING} and {@link MemberState#LEAVING}).
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 702
|
[] |
void
| true
| 5
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
validateResponse
|
private void validateResponse(ClassicHttpResponse httpResponse, String serviceUrl) {
if (httpResponse.getEntity() == null) {
throw new ReportableException("No content received from server '" + serviceUrl + "'");
}
if (httpResponse.getCode() != 200) {
throw createException(serviceUrl, httpResponse);
}
}
|
Loads the service capabilities of the service at the specified URL. If the service
supports generating a textual representation of the capabilities, it is returned,
otherwise {@link InitializrServiceMetadata} is returned.
@param serviceUrl to url of the initializer service
@return the service capabilities (as a String) or the
{@link InitializrServiceMetadata} describing the service
@throws IOException if the service capabilities cannot be loaded
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrService.java
| 143
|
[
"httpResponse",
"serviceUrl"
] |
void
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
reverse
|
public static String reverse(final String str) {
if (str == null) {
return null;
}
return new StringBuilder(str).reverse().toString();
}
|
Reverses a String as per {@link StringBuilder#reverse()}.
<p>
A {@code null} String returns {@code null}.
</p>
<pre>
StringUtils.reverse(null) = null
StringUtils.reverse("") = ""
StringUtils.reverse("bat") = "tab"
</pre>
@param str the String to reverse, may be null.
@return the reversed String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,792
|
[
"str"
] |
String
| true
| 2
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getAutoConfigurationReplacements
|
private AutoConfigurationReplacements getAutoConfigurationReplacements() {
AutoConfigurationReplacements autoConfigurationReplacements = this.autoConfigurationReplacements;
if (autoConfigurationReplacements == null) {
autoConfigurationReplacements = AutoConfigurationReplacements.load(this.autoConfigurationAnnotation,
this.beanClassLoader);
this.autoConfigurationReplacements = autoConfigurationReplacements;
}
return autoConfigurationReplacements;
}
|
Returns the auto-configurations excluded by the
{@code spring.autoconfigure.exclude} property.
@return excluded auto-configurations
@since 2.3.2
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
| 295
|
[] |
AutoConfigurationReplacements
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_db_instance_state
|
def get_db_instance_state(self, db_instance_id: str) -> str:
"""
Get the current state of a DB instance.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_instances`
:param db_instance_id: The ID of the target DB instance.
:return: Returns the status of the DB instance as a string (eg. "available")
:raises AirflowNotFoundException: If the DB instance does not exist.
"""
try:
response = self.conn.describe_db_instances(DBInstanceIdentifier=db_instance_id)
except self.conn.exceptions.DBInstanceNotFoundFault as e:
raise AirflowNotFoundException(e)
return response["DBInstances"][0]["DBInstanceStatus"].lower()
|
Get the current state of a DB instance.
.. seealso::
- :external+boto3:py:meth:`RDS.Client.describe_db_instances`
:param db_instance_id: The ID of the target DB instance.
:return: Returns the status of the DB instance as a string (eg. "available")
:raises AirflowNotFoundException: If the DB instance does not exist.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/rds.py
| 227
|
[
"self",
"db_instance_id"
] |
str
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
scheduleOnIdle
|
function scheduleOnIdle(fn: () => void): void {
if (typeof requestIdleCallback !== 'undefined') {
requestIdleCallback(fn);
} else {
setTimeout(fn, 0);
}
}
|
Schedules a function to be run in a new macrotask.
This is needed because the `requestIdleCallback` API is not available in all browsers.
@param fn
|
typescript
|
adev/src/app/features/references/api-reference-list/api-reference-list.component.ts
| 178
|
[
"fn"
] | true
| 3
| 6.72
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
semaphore
|
public static Striped<Semaphore> semaphore(int stripes, int permits) {
return custom(stripes, () -> new PaddedSemaphore(permits));
}
|
Creates a {@code Striped<Semaphore>} with eagerly initialized, strongly referenced semaphores,
with the specified number of permits.
@param stripes the minimum number of stripes (semaphores) required
@param permits the number of permits in each semaphore
@return a new {@code Striped<Semaphore>}
|
java
|
android/guava/src/com/google/common/util/concurrent/Striped.java
| 245
|
[
"stripes",
"permits"
] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
deleteAll
|
public StrBuilder deleteAll(final StrMatcher matcher) {
return replace(matcher, null, 0, size, -1);
}
|
Deletes all parts of the builder that the matcher matches.
<p>
Matchers can be used to perform advanced deletion behavior.
For example you could write a matcher to delete all occurrences
where the character 'a' is followed by a number.
</p>
@param matcher the matcher to use to find the deletion, null causes no action
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,724
|
[
"matcher"
] |
StrBuilder
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
resolve
|
private Stream<PropertyDescriptor> resolve(Bindable bindable, TypeElementMembers members) {
if (bindable.isConstructorBindingEnabled()) {
ExecutableElement bindConstructor = bindable.getBindConstructor();
return (bindConstructor != null)
? resolveConstructorBoundProperties(bindable.getType(), members, bindConstructor) : Stream.empty();
}
return resolveJavaBeanProperties(bindable.getType(), members, null);
}
|
Return the {@link PropertyDescriptor} instances that are valid candidates for the
specified {@link TypeElement type} based on the specified {@link ExecutableElement
factory method}, if any.
@param type the target type
@param factoryMethod the method that triggered the metadata for that {@code type}
or {@code null}
@return the candidate properties for metadata generation
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptorResolver.java
| 69
|
[
"bindable",
"members"
] | true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
replace_regex
|
def replace_regex(
values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
) -> None:
"""
Parameters
----------
values : ArrayLike
Object dtype.
rx : re.Pattern
value : Any
mask : np.ndarray[bool], optional
Notes
-----
Alters values in-place.
"""
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isna(value) or not isinstance(value, str):
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return value if rx.search(s) is not None else s
else:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
if is_re(rx) and isinstance(s, str):
return rx.sub(value, s)
else:
return s
f = np.vectorize(re_replacer, otypes=[np.object_])
if mask is None:
values[:] = f(values)
else:
if values.ndim != mask.ndim:
mask = np.broadcast_to(mask, values.shape)
values[mask] = f(values[mask])
|
Parameters
----------
values : ArrayLike
Object dtype.
rx : re.Pattern
value : Any
mask : np.ndarray[bool], optional
Notes
-----
Alters values in-place.
|
python
|
pandas/core/array_algos/replace.py
| 114
|
[
"values",
"rx",
"value",
"mask"
] |
None
| true
| 14
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.