function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
get
function get(object, path, defaultValue) { var result = object == null ? undefined : baseGet(object, path); return result === undefined ? defaultValue : result; }
Gets the value at `path` of `object`. If the resolved value is `undefined`, the `defaultValue` is returned in its place. @static @memberOf _ @since 3.7.0 @category Object @param {Object} object The object to query. @param {Array|string} path The path of the property to get. @param {*} [defaultValue] The value returned for `undefined` resolved values. @returns {*} Returns the resolved value. @example var object = { 'a': [{ 'b': { 'c': 3 } }] }; _.get(object, 'a[0].b.c'); // => 3 _.get(object, ['a', '0', 'b', 'c']); // => 3 _.get(object, 'a.b.c', 'default'); // => 'default'
javascript
lodash.js
13,233
[ "object", "path", "defaultValue" ]
false
3
7.6
lodash/lodash
61,490
jsdoc
false
lastIndexOf
public int lastIndexOf(final StrMatcher matcher, int startIndex) { startIndex = startIndex >= size ? size - 1 : startIndex; if (matcher == null || startIndex < 0) { return -1; } final char[] buf = buffer; final int endIndex = startIndex + 1; for (int i = startIndex; i >= 0; i--) { if (matcher.isMatch(buf, i, 0, endIndex) > 0) { return i; } } return -1; }
Searches the string builder using the matcher to find the last match searching from the given index. <p> Matchers can be used to perform advanced searching behavior. For example you could write a matcher to find the character 'a' followed by a number. </p> @param matcher the matcher to use, null returns -1 @param startIndex the index to start at, invalid index rounded to edge @return the last index matched, or -1 if not found
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
2,392
[ "matcher", "startIndex" ]
true
6
8.08
apache/commons-lang
2,896
javadoc
false
_sizeof_fmt
def _sizeof_fmt(num: float, size_qualifier: str) -> str: """ Return size in human readable format. Parameters ---------- num : int Size in bytes. size_qualifier : str Either empty, or '+' (if lower bound). Returns ------- str Size in human readable format. Examples -------- >>> _sizeof_fmt(23028, "") '22.5 KB' >>> _sizeof_fmt(23028, "+") '22.5+ KB' """ for x in ["bytes", "KB", "MB", "GB", "TB"]: if num < 1024.0: return f"{num:3.1f}{size_qualifier} {x}" num /= 1024.0 return f"{num:3.1f}{size_qualifier} PB"
Return size in human readable format. Parameters ---------- num : int Size in bytes. size_qualifier : str Either empty, or '+' (if lower bound). Returns ------- str Size in human readable format. Examples -------- >>> _sizeof_fmt(23028, "") '22.5 KB' >>> _sizeof_fmt(23028, "+") '22.5+ KB'
python
pandas/io/formats/info.py
324
[ "num", "size_qualifier" ]
str
true
3
8.32
pandas-dev/pandas
47,362
numpy
false
build
public ImmutableRangeMap<K, V> build() { sort(entries, Range.<K>rangeLexOrdering().onKeys()); ImmutableList.Builder<Range<K>> rangesBuilder = new ImmutableList.Builder<>(entries.size()); ImmutableList.Builder<V> valuesBuilder = new ImmutableList.Builder<>(entries.size()); for (int i = 0; i < entries.size(); i++) { Range<K> range = entries.get(i).getKey(); if (i > 0) { Range<K> prevRange = entries.get(i - 1).getKey(); if (range.isConnected(prevRange) && !range.intersection(prevRange).isEmpty()) { throw new IllegalArgumentException( "Overlapping ranges: range " + prevRange + " overlaps with entry " + range); } } rangesBuilder.add(range); valuesBuilder.add(entries.get(i).getValue()); } return new ImmutableRangeMap<>(rangesBuilder.build(), valuesBuilder.build()); }
Returns an {@code ImmutableRangeMap} containing the associations previously added to this builder. @throws IllegalArgumentException if any two ranges inserted into this builder overlap
java
android/guava/src/com/google/common/collect/ImmutableRangeMap.java
154
[]
true
5
6.08
google/guava
51,352
javadoc
false
equals
@Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ByteBufferLegacyRecordBatch that = (ByteBufferLegacyRecordBatch) o; return Objects.equals(buffer, that.buffer); }
LegacyRecordBatch does not implement this iterator and would hence fallback to the normal iterator. @return An iterator over the records contained within this batch
java
clients/src/main/java/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java
525
[ "o" ]
true
4
6.4
apache/kafka
31,560
javadoc
false
getAutoConfigurationEntry
protected AutoConfigurationEntry getAutoConfigurationEntry(AnnotationMetadata annotationMetadata) { if (!isEnabled(annotationMetadata)) { return EMPTY_ENTRY; } AnnotationAttributes attributes = getAttributes(annotationMetadata); List<String> configurations = getCandidateConfigurations(annotationMetadata, attributes); configurations = removeDuplicates(configurations); Set<String> exclusions = getExclusions(annotationMetadata, attributes); checkExcludedClasses(configurations, exclusions); configurations.removeAll(exclusions); configurations = getConfigurationClassFilter().filter(configurations); fireAutoConfigurationImportEvents(configurations, exclusions); return new AutoConfigurationEntry(configurations, exclusions); }
Return the {@link AutoConfigurationEntry} based on the {@link AnnotationMetadata} of the importing {@link Configuration @Configuration} class. @param annotationMetadata the annotation metadata of the configuration class @return the auto-configurations that should be imported
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
142
[ "annotationMetadata" ]
AutoConfigurationEntry
true
2
7.12
spring-projects/spring-boot
79,428
javadoc
false
maybe_convert_dtype
def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): """ Convert data based on dtype conventions, issuing errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool tz : tzinfo or None, default None Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if not hasattr(data, "dtype"): # e.g. collections.deque return data, copy if is_float_dtype(data.dtype): # pre-2.0 we treated these as wall-times, inconsistent with ints # GH#23675, GH#45573 deprecated to treat symmetrically with integer dtypes. # Note: data.astype(np.int64) fails ARM tests, see # https://github.com/pandas-dev/pandas/issues/49468. data = data.astype(DT64NS_DTYPE).view("i8") copy = False elif lib.is_np_dtype(data.dtype, "m") or is_bool_dtype(data.dtype): # GH#29794 enforcing deprecation introduced in GH#23539 raise TypeError(f"dtype {data.dtype} cannot be converted to datetime64[ns]") elif isinstance(data.dtype, PeriodDtype): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError( "Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead" ) elif isinstance(data.dtype, ExtensionDtype) and not isinstance( data.dtype, DatetimeTZDtype ): # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
Convert data based on dtype conventions, issuing errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool tz : tzinfo or None, default None Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed
python
pandas/core/arrays/datetimes.py
2,653
[ "data", "copy", "tz" ]
true
8
6.72
pandas-dev/pandas
47,362
numpy
false
transformMethodBody
function transformMethodBody(node: MethodDeclaration | AccessorDeclaration | ConstructorDeclaration): FunctionBody | undefined { Debug.assertIsDefined(node.body); const savedCapturedSuperProperties = capturedSuperProperties; const savedHasSuperElementAccess = hasSuperElementAccess; capturedSuperProperties = new Set(); hasSuperElementAccess = false; let updated = visitFunctionBody(node.body, visitor, context); // Minor optimization, emit `_super` helper to capture `super` access in an arrow. // This step isn't needed if we eventually transform this to ES5. const originalMethod = getOriginalNode(node, isFunctionLikeDeclaration); const emitSuperHelpers = languageVersion >= ScriptTarget.ES2015 && (resolver.hasNodeCheckFlag(node, NodeCheckFlags.MethodWithSuperPropertyAssignmentInAsync) || resolver.hasNodeCheckFlag(node, NodeCheckFlags.MethodWithSuperPropertyAccessInAsync)) && (getFunctionFlags(originalMethod) & FunctionFlags.AsyncGenerator) !== FunctionFlags.AsyncGenerator; if (emitSuperHelpers) { enableSubstitutionForAsyncMethodsWithSuper(); if (capturedSuperProperties.size) { const variableStatement = createSuperAccessVariableStatement(factory, resolver, node, capturedSuperProperties); substitutedSuperAccessors[getNodeId(variableStatement)] = true; const statements = updated.statements.slice(); insertStatementsAfterStandardPrologue(statements, [variableStatement]); updated = factory.updateBlock(updated, statements); } if (hasSuperElementAccess) { // Emit helpers for super element access expressions (`super[x]`). if (resolver.hasNodeCheckFlag(node, NodeCheckFlags.MethodWithSuperPropertyAssignmentInAsync)) { addEmitHelper(updated, advancedAsyncSuperHelper); } else if (resolver.hasNodeCheckFlag(node, NodeCheckFlags.MethodWithSuperPropertyAccessInAsync)) { addEmitHelper(updated, asyncSuperHelper); } } } capturedSuperProperties = savedCapturedSuperProperties; hasSuperElementAccess = savedHasSuperElementAccess; return updated; }
Visits an ArrowFunction. This function will be called when one of the following conditions are met: - The node is marked async @param node The node to visit.
typescript
src/compiler/transformers/es2017.ts
649
[ "node" ]
true
10
6.88
microsoft/TypeScript
107,154
jsdoc
false
List
function List() { const [items, setItems] = useState(['one', 'two', 'three']); const inputRef = useRef(null); const addItem = () => { const input = ((inputRef.current: any): HTMLInputElement); const text = input.value; input.value = ''; if (text) { setItems([...items, text]); } }; return ( <> <input ref={inputRef} data-testname="AddItemInput" /> <button data-testname="AddItemButton" onClick={addItem}> Add Item </button> <ul data-testname="List"> {items.map((label, index) => ( <ListItem key={index} label={label} /> ))} </ul> </> ); }
Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. @flow
javascript
packages/react-devtools-shell/src/e2e-apps/ListApp.js
17
[]
false
2
6.24
facebook/react
241,750
jsdoc
false
getMessageInterpolator
private MessageInterpolator getMessageInterpolator() { try { return Validation.byDefaultProvider().configure().getDefaultMessageInterpolator(); } catch (ValidationException ex) { MessageInterpolator fallback = getFallback(); if (fallback != null) { return fallback; } throw ex; } }
Creates a new {@link MessageInterpolatorFactory} that will produce a {@link MessageInterpolator} that uses the given {@code messageSource} to resolve any message parameters before final interpolation. @param messageSource message source to be used by the interpolator @since 2.6.0
java
core/spring-boot/src/main/java/org/springframework/boot/validation/MessageInterpolatorFactory.java
78
[]
MessageInterpolator
true
3
6.24
spring-projects/spring-boot
79,428
javadoc
false
getClassLoader
public ClassLoader getClassLoader() { if (this.resourceLoader != null) { ClassLoader classLoader = this.resourceLoader.getClassLoader(); Assert.state(classLoader != null, "No classloader found"); return classLoader; } ClassLoader classLoader = ClassUtils.getDefaultClassLoader(); Assert.state(classLoader != null, "No classloader found"); return classLoader; }
Either the ClassLoader that will be used in the ApplicationContext (if {@link #setResourceLoader(ResourceLoader) resourceLoader} is set), or the context class loader (if not null), or the loader of the Spring {@link ClassUtils} class. @return a ClassLoader (never null)
java
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
715
[]
ClassLoader
true
2
7.44
spring-projects/spring-boot
79,428
javadoc
false
equalsIgnoreCase
public boolean equalsIgnoreCase(final StrBuilder other) { if (this == other) { return true; } if (this.size != other.size) { return false; } final char[] thisBuf = this.buffer; final char[] otherBuf = other.buffer; for (int i = size - 1; i >= 0; i--) { final char c1 = thisBuf[i]; final char c2 = otherBuf[i]; if (c1 != c2 && Character.toUpperCase(c1) != Character.toUpperCase(c2)) { return false; } } return true; }
Checks the contents of this builder against another to see if they contain the same character content ignoring case. @param other the object to check, null returns false @return true if the builders contain the same characters in the same order
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
1,894
[ "other" ]
true
6
8.24
apache/commons-lang
2,896
javadoc
false
toArray
public static float[] toArray(Collection<? extends Number> collection) { if (collection instanceof FloatArrayAsList) { return ((FloatArrayAsList) collection).toFloatArray(); } Object[] boxedArray = collection.toArray(); int len = boxedArray.length; float[] array = new float[len]; for (int i = 0; i < len; i++) { // checkNotNull for GWT (do not optimize) array[i] = ((Number) checkNotNull(boxedArray[i])).floatValue(); } return array; }
Returns an array containing each value of {@code collection}, converted to a {@code float} value in the manner of {@link Number#floatValue}. <p>Elements are copied from the argument collection as if by {@code collection.toArray()}. Calling this method is as thread-safe as calling that method. @param collection a collection of {@code Number} instances @return an array containing the same values as {@code collection}, in the same order, converted to primitives @throws NullPointerException if {@code collection} or any of its elements is null @since 1.0 (parameter was {@code Collection<Float>} before 12.0)
java
android/guava/src/com/google/common/primitives/Floats.java
537
[ "collection" ]
true
3
8.08
google/guava
51,352
javadoc
false
valueToPromise
function valueToPromise<T>(thing: T): PrismaPromise<T> { if (typeof thing['then'] === 'function') { return thing as PrismaPromise<T> } return Promise.resolve(thing) as PrismaPromise<T> }
Creates a factory, that allows creating PrismaPromises, bound to a specific transactions @param transaction @returns
typescript
packages/client/src/runtime/core/request/createPrismaPromise.ts
76
[ "thing" ]
true
2
6.8
prisma/prisma
44,834
jsdoc
false
_backprop
def _backprop( self, X, y, sample_weight, activations, deltas, coef_grads, intercept_grads ): """Compute the MLP loss function and its corresponding derivatives with respect to each parameter: weights and bias vectors. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float coef_grads : list, length = n_layers - 1 intercept_grads : list, length = n_layers - 1 """ n_samples = X.shape[0] # Forward propagate activations = self._forward_pass(activations) # Get loss loss_func_name = self.loss if loss_func_name == "log_loss" and self.out_activation_ == "logistic": loss_func_name = "binary_log_loss" loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1], sample_weight) # Add L2 regularization term to loss values = 0 for s in self.coefs_: s = s.ravel() values += np.dot(s, s) if sample_weight is None: sw_sum = n_samples else: sw_sum = sample_weight.sum() loss += (0.5 * self.alpha) * values / sw_sum # Backward propagate last = self.n_layers_ - 2 # The calculation of delta[last] is as follows: # delta[last] = d/dz loss(y, act(z)) = act(z) - y # with z=x@w + b being the output of the last layer before passing through the # output activation, act(z) = activations[-1]. # The simple formula for delta[last] here works with following (canonical # loss-link) combinations of output activation and loss function: # sigmoid and binary cross entropy, softmax and categorical cross # entropy, and identity with squared loss deltas[last] = activations[-1] - y if sample_weight is not None: deltas[last] *= sample_weight.reshape(-1, 1) # Compute gradient for the last layer self._compute_loss_grad( last, sw_sum, activations, deltas, coef_grads, intercept_grads ) inplace_derivative = DERIVATIVES[self.activation] # Iterate over the hidden layers for i in range(last, 0, -1): deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T) inplace_derivative(activations[i], deltas[i - 1]) self._compute_loss_grad( i - 1, sw_sum, activations, deltas, coef_grads, intercept_grads ) return loss, coef_grads, intercept_grads
Compute the MLP loss function and its corresponding derivatives with respect to each parameter: weights and bias vectors. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. activations : list, length = n_layers - 1 The ith element of the list holds the values of the ith layer. deltas : list, length = n_layers - 1 The ith element of the list holds the difference between the activations of the i + 1 layer and the backpropagated error. More specifically, deltas are gradients of loss with respect to z in each layer, where z = wx + b is the value of a particular layer before passing through the activation function coef_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the coefficient parameters of the ith layer in an iteration. intercept_grads : list, length = n_layers - 1 The ith element contains the amount of change used to update the intercept parameters of the ith layer in an iteration. Returns ------- loss : float coef_grads : list, length = n_layers - 1 intercept_grads : list, length = n_layers - 1
python
sklearn/neural_network/_multilayer_perceptron.py
301
[ "self", "X", "y", "sample_weight", "activations", "deltas", "coef_grads", "intercept_grads" ]
false
8
6
scikit-learn/scikit-learn
64,340
numpy
false
getConnectionDetails
public <S> Map<Class<?>, ConnectionDetails> getConnectionDetails(S source, boolean required) throws ConnectionDetailsFactoryNotFoundException, ConnectionDetailsNotFoundException { List<Registration<S, ?>> registrations = getRegistrations(source, required); Map<Class<?>, ConnectionDetails> result = new LinkedHashMap<>(); for (Registration<S, ?> registration : registrations) { ConnectionDetails connectionDetails = registration.factory().getConnectionDetails(source); if (connectionDetails != null) { Class<?> connectionDetailsType = registration.connectionDetailsType(); ConnectionDetails previous = result.put(connectionDetailsType, connectionDetails); Assert.state(previous == null, () -> "Duplicate connection details supplied for %s" .formatted(connectionDetailsType.getName())); } } if (required && result.isEmpty()) { throw new ConnectionDetailsNotFoundException(source); } return Map.copyOf(result); }
Return a {@link Map} of {@link ConnectionDetails} interface type to {@link ConnectionDetails} instance created from the factories associated with the given source. @param <S> the source type @param source the source @param required if a connection details result is required @return a map of {@link ConnectionDetails} instances @throws ConnectionDetailsFactoryNotFoundException if a result is required but no connection details factory is registered for the source @throws ConnectionDetailsNotFoundException if a result is required but no connection details instance was created from a registered factory
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/service/connection/ConnectionDetailsFactories.java
82
[ "source", "required" ]
true
4
7.28
spring-projects/spring-boot
79,428
javadoc
false
indexable
def indexable(*iterables): """Make arrays indexable for cross-validation. Checks consistent length, passes through None, and ensures that everything can be indexed by converting sparse matrices to csr and converting non-iterable objects to arrays. Parameters ---------- *iterables : {lists, dataframes, ndarrays, sparse matrices} List of objects to ensure sliceability. Returns ------- result : list of {ndarray, sparse matrix, dataframe} or None Returns a list containing indexable arrays (i.e. NumPy array, sparse matrix, or dataframe) or `None`. Examples -------- >>> from sklearn.utils import indexable >>> from scipy.sparse import csr_matrix >>> import numpy as np >>> iterables = [ ... [1, 2, 3], np.array([2, 3, 4]), None, csr_matrix([[5], [6], [7]]) ... ] >>> indexable(*iterables) [[1, 2, 3], array([2, 3, 4]), None, <...Sparse...dtype 'int64'...shape (3, 1)>] """ result = [_make_indexable(X) for X in iterables] check_consistent_length(*result) return result
Make arrays indexable for cross-validation. Checks consistent length, passes through None, and ensures that everything can be indexed by converting sparse matrices to csr and converting non-iterable objects to arrays. Parameters ---------- *iterables : {lists, dataframes, ndarrays, sparse matrices} List of objects to ensure sliceability. Returns ------- result : list of {ndarray, sparse matrix, dataframe} or None Returns a list containing indexable arrays (i.e. NumPy array, sparse matrix, or dataframe) or `None`. Examples -------- >>> from sklearn.utils import indexable >>> from scipy.sparse import csr_matrix >>> import numpy as np >>> iterables = [ ... [1, 2, 3], np.array([2, 3, 4]), None, csr_matrix([[5], [6], [7]]) ... ] >>> indexable(*iterables) [[1, 2, 3], array([2, 3, 4]), None, <...Sparse...dtype 'int64'...shape (3, 1)>]
python
sklearn/utils/validation.py
490
[]
false
1
6
scikit-learn/scikit-learn
64,340
numpy
false
_asfreq_compat
def _asfreq_compat(index: FreqIndexT, freq) -> FreqIndexT: """ Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex. Parameters ---------- index : PeriodIndex, DatetimeIndex, or TimedeltaIndex freq : DateOffset Returns ------- same type as index """ if len(index) != 0: # This should never be reached, always checked by the caller raise ValueError( "Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex" ) if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) elif isinstance(index, DatetimeIndex): new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) elif isinstance(index, TimedeltaIndex): new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) else: # pragma: no cover raise TypeError(type(index)) return new_index
Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex. Parameters ---------- index : PeriodIndex, DatetimeIndex, or TimedeltaIndex freq : DateOffset Returns ------- same type as index
python
pandas/core/resample.py
3,117
[ "index", "freq" ]
FreqIndexT
true
6
6.24
pandas-dev/pandas
47,362
numpy
false
chebvander
def chebvander(x, deg): """Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points `x`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = T_i(x), where ``0 <= i <= deg``. The leading indices of `V` index the elements of `x` and the last index is the degree of the Chebyshev polynomial. If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and ``chebval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of Chebyshev series of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray The pseudo Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where The last index is the degree of the corresponding Chebyshev polynomial. The dtype will be the same as the converted `x`. """ ideg = pu._as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") x = np.array(x, copy=None, ndmin=1) + 0.0 dims = (ideg + 1,) + x.shape dtyp = x.dtype v = np.empty(dims, dtype=dtyp) # Use forward recursion to generate the entries. v[0] = x * 0 + 1 if ideg > 0: x2 = 2 * x v[1] = x for i in range(2, ideg + 1): v[i] = v[i - 1] * x2 - v[i - 2] return np.moveaxis(v, 0, -1)
Pseudo-Vandermonde matrix of given degree. Returns the pseudo-Vandermonde matrix of degree `deg` and sample points `x`. The pseudo-Vandermonde matrix is defined by .. math:: V[..., i] = T_i(x), where ``0 <= i <= deg``. The leading indices of `V` index the elements of `x` and the last index is the degree of the Chebyshev polynomial. If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and ``chebval(x, c)`` are the same up to roundoff. This equivalence is useful both for least squares fitting and for the evaluation of a large number of Chebyshev series of the same degree and sample points. Parameters ---------- x : array_like Array of points. The dtype is converted to float64 or complex128 depending on whether any of the elements are complex. If `x` is scalar it is converted to a 1-D array. deg : int Degree of the resulting matrix. Returns ------- vander : ndarray The pseudo Vandermonde matrix. The shape of the returned matrix is ``x.shape + (deg + 1,)``, where The last index is the degree of the corresponding Chebyshev polynomial. The dtype will be the same as the converted `x`.
python
numpy/polynomial/chebyshev.py
1,354
[ "x", "deg" ]
false
4
6.24
numpy/numpy
31,054
numpy
false
isOsMatch
static boolean isOsMatch(final String osName, final String osVersion, final String osNamePrefix, final String osVersionPrefix) { if (osName == null || osVersion == null) { return false; } return isOsNameMatch(osName, osNamePrefix) && isOsVersionMatch(osVersion, osVersionPrefix); }
Tests whether the operating system matches. <p> This method is package private instead of private to support unit test invocation. </p> @param osName the actual OS name. @param osVersion the actual OS version. @param osNamePrefix the prefix for the expected OS name. @param osVersionPrefix the prefix for the expected OS version. @return true if matches, or false if not or can't determine.
java
src/main/java/org/apache/commons/lang3/SystemUtils.java
2,416
[ "osName", "osVersion", "osNamePrefix", "osVersionPrefix" ]
true
4
7.92
apache/commons-lang
2,896
javadoc
false
_validate_names
def _validate_names(names: Sequence[Hashable] | None) -> None: """ Raise ValueError if the `names` parameter contains duplicates or has an invalid data type. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Raises ------ ValueError If names are not unique or are not ordered (e.g. set). """ if names is not None: if len(names) != len(set(names)): raise ValueError("Duplicate names are not allowed.") if not ( is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView) ): raise ValueError("Names should be an ordered collection.")
Raise ValueError if the `names` parameter contains duplicates or has an invalid data type. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Raises ------ ValueError If names are not unique or are not ordered (e.g. set).
python
pandas/io/parsers/readers.py
234
[ "names" ]
None
true
5
6.88
pandas-dev/pandas
47,362
numpy
false
printStackTrace
@Override public void printStackTrace(PrintWriter pw) { synchronized (pw) { super.printStackTrace(pw); if (this.relatedCauses != null) { for (Throwable relatedCause : this.relatedCauses) { pw.println("Related cause:"); relatedCause.printStackTrace(pw); } } } }
Return the related causes, if any. @return the array of related causes, or {@code null} if none
java
spring-beans/src/main/java/org/springframework/beans/factory/BeanCreationException.java
182
[ "pw" ]
void
true
2
7.04
spring-projects/spring-framework
59,386
javadoc
false
tz
def tz(self) -> tzinfo | None: """ Return the timezone. Returns ------- zoneinfo.ZoneInfo,, datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None Returns None when the array is tz-naive. See Also -------- DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- For Series: >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) >>> s = pd.to_datetime(s) >>> s 0 2020-01-01 10:00:00+00:00 1 2020-02-01 11:00:00+00:00 dtype: datetime64[us, UTC] >>> s.dt.tz datetime.timezone.utc For DatetimeIndex: >>> idx = pd.DatetimeIndex( ... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"] ... ) >>> idx.tz datetime.timezone.utc """ # noqa: E501 # GH 18595 return getattr(self.dtype, "tz", None)
Return the timezone. Returns ------- zoneinfo.ZoneInfo,, datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None Returns None when the array is tz-naive. See Also -------- DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- For Series: >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"]) >>> s = pd.to_datetime(s) >>> s 0 2020-01-01 10:00:00+00:00 1 2020-02-01 11:00:00+00:00 dtype: datetime64[us, UTC] >>> s.dt.tz datetime.timezone.utc For DatetimeIndex: >>> idx = pd.DatetimeIndex( ... ["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"] ... ) >>> idx.tz datetime.timezone.utc
python
pandas/core/arrays/datetimes.py
594
[ "self" ]
tzinfo | None
true
1
6.96
pandas-dev/pandas
47,362
unknown
false
equals
@Override public boolean equals(@Nullable Object other) { return (this == other || (other instanceof ChildBeanDefinition that && ObjectUtils.nullSafeEquals(this.parentName, that.parentName) && super.equals(other))); }
Create a new ChildBeanDefinition as deep copy of the given bean definition. @param original the original bean definition to copy from
java
spring-beans/src/main/java/org/springframework/beans/factory/support/ChildBeanDefinition.java
157
[ "other" ]
true
4
6.4
spring-projects/spring-framework
59,386
javadoc
false
endsWithAny
@Deprecated public static boolean endsWithAny(final CharSequence sequence, final CharSequence... searchStrings) { return Strings.CS.endsWithAny(sequence, searchStrings); }
Tests if a CharSequence ends with any of the provided case-sensitive suffixes. <pre> StringUtils.endsWithAny(null, null) = false StringUtils.endsWithAny(null, new String[] {"abc"}) = false StringUtils.endsWithAny("abcxyz", null) = false StringUtils.endsWithAny("abcxyz", new String[] {""}) = true StringUtils.endsWithAny("abcxyz", new String[] {"xyz"}) = true StringUtils.endsWithAny("abcxyz", new String[] {null, "xyz", "abc"}) = true StringUtils.endsWithAny("abcXYZ", "def", "XYZ") = true StringUtils.endsWithAny("abcXYZ", "def", "xyz") = false </pre> @param sequence the CharSequence to check, may be null. @param searchStrings the case-sensitive CharSequences to find, may be empty or contain {@code null}. @return {@code true} if the input {@code sequence} is {@code null} AND no {@code searchStrings} are provided, or the input {@code sequence} ends in any of the provided case-sensitive {@code searchStrings}. @see StringUtils#endsWith(CharSequence, CharSequence) @since 3.0 @deprecated Use {@link Strings#endsWithAny(CharSequence, CharSequence...) Strings.CS.endsWithAny(CharSequence, CharSequence...)}.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
1,730
[ "sequence" ]
true
1
6.48
apache/commons-lang
2,896
javadoc
false
poll
public List<RequestSpec<K>> poll() { List<RequestSpec<K>> requests = new ArrayList<>(); collectLookupRequests(requests); collectFulfillmentRequests(requests); return requests; }
Check whether any requests need to be sent. This should be called immediately after the driver is constructed and then again after each request returns (i.e. after {@link #onFailure(long, RequestSpec, Throwable)} or {@link #onResponse(long, RequestSpec, AbstractResponse, Node)}). @return A list of requests that need to be sent
java
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiDriver.java
212
[]
true
1
6.4
apache/kafka
31,560
javadoc
false
create_replication_task
def create_replication_task( self, replication_task_id: str, source_endpoint_arn: str, target_endpoint_arn: str, replication_instance_arn: str, migration_type: str, table_mappings: dict, **kwargs, ) -> str: """ Create DMS replication task. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.create_replication_task` :param replication_task_id: Replication task id :param source_endpoint_arn: Source endpoint ARN :param target_endpoint_arn: Target endpoint ARN :param replication_instance_arn: Replication instance ARN :param table_mappings: Table mappings :param migration_type: Migration type ('full-load'|'cdc'|'full-load-and-cdc'), full-load by default. :return: Replication task ARN """ dms_client = self.get_conn() create_task_response = dms_client.create_replication_task( ReplicationTaskIdentifier=replication_task_id, SourceEndpointArn=source_endpoint_arn, TargetEndpointArn=target_endpoint_arn, ReplicationInstanceArn=replication_instance_arn, MigrationType=migration_type, TableMappings=json.dumps(table_mappings), **kwargs, ) replication_task_arn = create_task_response["ReplicationTask"]["ReplicationTaskArn"] self.wait_for_task_status(replication_task_arn, DmsTaskWaiterStatus.READY) return replication_task_arn
Create DMS replication task. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.create_replication_task` :param replication_task_id: Replication task id :param source_endpoint_arn: Source endpoint ARN :param target_endpoint_arn: Target endpoint ARN :param replication_instance_arn: Replication instance ARN :param table_mappings: Table mappings :param migration_type: Migration type ('full-load'|'cdc'|'full-load-and-cdc'), full-load by default. :return: Replication task ARN
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/dms.py
114
[ "self", "replication_task_id", "source_endpoint_arn", "target_endpoint_arn", "replication_instance_arn", "migration_type", "table_mappings" ]
str
true
1
6.08
apache/airflow
43,597
sphinx
false
getAnnotationValue
private Object getAnnotationValue(AnnotationValue annotationValue) { Object value = annotationValue.getValue(); if (value instanceof List) { List<Object> values = new ArrayList<>(); ((List<?>) value).forEach((v) -> values.add(((AnnotationValue) v).getValue())); return values; } return value; }
Collect the annotations that are annotated or meta-annotated with the specified {@link TypeElement annotation}. @param element the element to inspect @param annotationType the annotation to discover @return the annotations that are annotated or meta-annotated with this annotation
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
329
[ "annotationValue" ]
Object
true
2
7.28
spring-projects/spring-boot
79,428
javadoc
false
getPropertyHandler
protected @Nullable PropertyHandler getPropertyHandler(String propertyName) throws BeansException { Assert.notNull(propertyName, "Property name must not be null"); AbstractNestablePropertyAccessor nestedPa = getPropertyAccessorForPropertyPath(propertyName); return nestedPa.getLocalPropertyHandler(getFinalPath(nestedPa, propertyName)); }
Return the {@link PropertyHandler} for the specified {@code propertyName}, navigating if necessary. Return {@code null} if not found rather than throwing an exception. @param propertyName the property to obtain the descriptor for @return the property descriptor for the specified property, or {@code null} if not found @throws BeansException in case of introspection failure
java
spring-beans/src/main/java/org/springframework/beans/AbstractNestablePropertyAccessor.java
730
[ "propertyName" ]
PropertyHandler
true
1
6.08
spring-projects/spring-framework
59,386
javadoc
false
registerJobsAndTriggers
@SuppressWarnings("NullAway") // Dataflow analysis limitation protected void registerJobsAndTriggers() throws SchedulerException { TransactionStatus transactionStatus = null; if (this.transactionManager != null) { transactionStatus = this.transactionManager.getTransaction(TransactionDefinition.withDefaults()); } try { if (this.jobSchedulingDataLocations != null) { ClassLoadHelper clh = new ResourceLoaderClassLoadHelper(this.resourceLoader); clh.initialize(); XMLSchedulingDataProcessor dataProcessor = new XMLSchedulingDataProcessor(clh); for (String location : this.jobSchedulingDataLocations) { dataProcessor.processFileAndScheduleJobs(location, getScheduler()); } } // Register JobDetails. if (this.jobDetails != null) { for (JobDetail jobDetail : this.jobDetails) { addJobToScheduler(jobDetail); } } else { // Create empty list for easier checks when registering triggers. this.jobDetails = new ArrayList<>(); } // Register Calendars. if (this.calendars != null) { for (String calendarName : this.calendars.keySet()) { Calendar calendar = this.calendars.get(calendarName); getScheduler().addCalendar(calendarName, calendar, true, true); } } // Register Triggers. if (this.triggers != null) { for (Trigger trigger : this.triggers) { addTriggerToScheduler(trigger); } } } catch (Throwable ex) { if (transactionStatus != null) { try { this.transactionManager.rollback(transactionStatus); } catch (TransactionException tex) { logger.error("Job registration exception overridden by rollback exception", ex); throw tex; } } if (ex instanceof SchedulerException schedulerException) { throw schedulerException; } if (ex instanceof Exception) { throw new SchedulerException("Registration of jobs and triggers failed: " + ex.getMessage(), ex); } throw new SchedulerException("Registration of jobs and triggers failed: " + ex.getMessage()); } if (transactionStatus != null) { this.transactionManager.commit(transactionStatus); } }
Register jobs and triggers (within a transaction, if possible).
java
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerAccessor.java
197
[]
void
true
12
6.48
spring-projects/spring-framework
59,386
javadoc
false
asReader
public Reader asReader() { return new StrBuilderReader(); }
Gets the contents of this builder as a Reader. <p> This method allows the contents of the builder to be read using any standard method that expects a Reader. </p> <p> To use, simply create a {@link StrBuilder}, populate it with data, call {@code asReader}, and then read away. </p> <p> The internal character array is shared between the builder and the reader. This allows you to append to the builder after creating the reader, and the changes will be picked up. Note however, that no synchronization occurs, so you must perform all operations with the builder and the reader in one thread. </p> <p> The returned reader supports marking, and ignores the flush method. </p> @return a reader that reads from this builder
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
1,485
[]
Reader
true
1
6.8
apache/commons-lang
2,896
javadoc
false
array_str
def array_str(a, max_line_width=None, precision=None, suppress_small=None): """ Return a string representation of the data in an array. The data in the array is returned as a single string. This function is similar to `array_repr`, the difference being that `array_repr` also returns information on the kind of array and its data type. Parameters ---------- a : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than `max_line_width`. Defaults to ``numpy.get_printoptions()['linewidth']``. precision : int, optional Floating point precision. Defaults to ``numpy.get_printoptions()['precision']``. suppress_small : bool, optional Represent numbers "very close" to zero as zero; default is False. Very close is defined by precision: if the precision is 8, e.g., numbers smaller (in absolute value) than 5e-9 are represented as zero. Defaults to ``numpy.get_printoptions()['suppress']``. See Also -------- array2string, array_repr, set_printoptions Examples -------- >>> import numpy as np >>> np.array_str(np.arange(3)) '[0 1 2]' """ return _array_str_implementation( a, max_line_width, precision, suppress_small)
Return a string representation of the data in an array. The data in the array is returned as a single string. This function is similar to `array_repr`, the difference being that `array_repr` also returns information on the kind of array and its data type. Parameters ---------- a : ndarray Input array. max_line_width : int, optional Inserts newlines if text is longer than `max_line_width`. Defaults to ``numpy.get_printoptions()['linewidth']``. precision : int, optional Floating point precision. Defaults to ``numpy.get_printoptions()['precision']``. suppress_small : bool, optional Represent numbers "very close" to zero as zero; default is False. Very close is defined by precision: if the precision is 8, e.g., numbers smaller (in absolute value) than 5e-9 are represented as zero. Defaults to ``numpy.get_printoptions()['suppress']``. See Also -------- array2string, array_repr, set_printoptions Examples -------- >>> import numpy as np >>> np.array_str(np.arange(3)) '[0 1 2]'
python
numpy/_core/arrayprint.py
1,734
[ "a", "max_line_width", "precision", "suppress_small" ]
false
1
6
numpy/numpy
31,054
numpy
false
extract
@SuppressWarnings("unchecked") private void extract(String name, Map<String, Object> result, Object value) { if (value instanceof Map<?, ?> map) { if (CollectionUtils.isEmpty(map)) { result.put(name, value); return; } flatten(name, result, (Map<String, Object>) value); } else if (value instanceof Collection<?> collection) { if (CollectionUtils.isEmpty(collection)) { result.put(name, value); return; } int index = 0; for (Object object : collection) { extract(name + "[" + index + "]", result, object); index++; } } else { result.put(name, value); } }
Flatten the map keys using period separator. @param map the map that should be flattened @return the flattened map
java
core/spring-boot/src/main/java/org/springframework/boot/support/SpringApplicationJsonEnvironmentPostProcessor.java
130
[ "name", "result", "value" ]
void
true
5
8.24
spring-projects/spring-boot
79,428
javadoc
false
notmasked_edges
def notmasked_edges(a, axis=None): """ Find the indices of the first and last unmasked values along an axis. If all values are masked, return None. Otherwise, return a list of two tuples, corresponding to the indices of the first and last unmasked values respectively. Parameters ---------- a : array_like The input array. axis : int, optional Axis along which to perform the operation. If None (default), applies to a flattened version of the array. Returns ------- edges : ndarray or list An array of start and end indexes if there are any masked data in the array. If there are no masked data in the array, `edges` is a list of the first and last index. See Also -------- flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous clump_masked, clump_unmasked Examples -------- >>> import numpy as np >>> a = np.arange(9).reshape((3, 3)) >>> m = np.zeros_like(a) >>> m[1:, 1:] = 1 >>> am = np.ma.array(a, mask=m) >>> np.array(am[~am.mask]) array([0, 1, 2, 3, 6]) >>> np.ma.notmasked_edges(am) array([0, 6]) """ a = asarray(a) if axis is None or a.ndim == 1: return flatnotmasked_edges(a) m = getmaskarray(a) idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) return [tuple(idx[i].min(axis).compressed() for i in range(a.ndim)), tuple(idx[i].max(axis).compressed() for i in range(a.ndim)), ]
Find the indices of the first and last unmasked values along an axis. If all values are masked, return None. Otherwise, return a list of two tuples, corresponding to the indices of the first and last unmasked values respectively. Parameters ---------- a : array_like The input array. axis : int, optional Axis along which to perform the operation. If None (default), applies to a flattened version of the array. Returns ------- edges : ndarray or list An array of start and end indexes if there are any masked data in the array. If there are no masked data in the array, `edges` is a list of the first and last index. See Also -------- flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous clump_masked, clump_unmasked Examples -------- >>> import numpy as np >>> a = np.arange(9).reshape((3, 3)) >>> m = np.zeros_like(a) >>> m[1:, 1:] = 1 >>> am = np.ma.array(a, mask=m) >>> np.array(am[~am.mask]) array([0, 1, 2, 3, 6]) >>> np.ma.notmasked_edges(am) array([0, 6])
python
numpy/ma/extras.py
1,925
[ "a", "axis" ]
false
3
7.52
numpy/numpy
31,054
numpy
false
get_indexer_dict
def get_indexer_dict( label_list: list[np.ndarray], keys: list[Index] ) -> dict[Hashable, npt.NDArray[np.intp]]: """ Returns ------- dict: Labels mapped to indexers. """ shape = tuple(len(x) for x in keys) group_index = get_group_index(label_list, shape, sort=True, xnull=True) if np.all(group_index == -1): # Short-circuit, lib.indices_fast will return the same return {} ngroups = ( ((group_index.size and group_index.max()) + 1) if is_int64_overflow_possible(shape) else np.prod(shape, dtype="i8") ) sorter = get_group_index_sorter(group_index, ngroups) sorted_labels = [lab.take(sorter) for lab in label_list] group_index = group_index.take(sorter) return lib.indices_fast(sorter, group_index, keys, sorted_labels)
Returns ------- dict: Labels mapped to indexers.
python
pandas/core/sorting.py
599
[ "label_list", "keys" ]
dict[Hashable, npt.NDArray[np.intp]]
true
4
6.4
pandas-dev/pandas
47,362
unknown
false
getCacheOperationMetadata
protected CacheOperationMetadata getCacheOperationMetadata( CacheOperation operation, Method method, Class<?> targetClass) { CacheOperationCacheKey cacheKey = new CacheOperationCacheKey(operation, method, targetClass); CacheOperationMetadata metadata = this.metadataCache.get(cacheKey); if (metadata == null) { KeyGenerator operationKeyGenerator; if (StringUtils.hasText(operation.getKeyGenerator())) { operationKeyGenerator = getBean(operation.getKeyGenerator(), KeyGenerator.class); } else { operationKeyGenerator = getKeyGenerator(); } CacheResolver operationCacheResolver; if (StringUtils.hasText(operation.getCacheResolver())) { operationCacheResolver = getBean(operation.getCacheResolver(), CacheResolver.class); } else if (StringUtils.hasText(operation.getCacheManager())) { CacheManager cacheManager = getBean(operation.getCacheManager(), CacheManager.class); operationCacheResolver = new SimpleCacheResolver(cacheManager); } else { operationCacheResolver = getCacheResolver(); Assert.state(operationCacheResolver != null, "No CacheResolver/CacheManager set"); } metadata = new CacheOperationMetadata(operation, method, targetClass, operationKeyGenerator, operationCacheResolver); this.metadataCache.put(cacheKey, metadata); } return metadata; }
Return the {@link CacheOperationMetadata} for the specified operation. <p>Resolve the {@link CacheResolver} and the {@link KeyGenerator} to be used for the operation. @param operation the operation @param method the method on which the operation is invoked @param targetClass the target type @return the resolved metadata for the operation
java
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
337
[ "operation", "method", "targetClass" ]
CacheOperationMetadata
true
5
7.28
spring-projects/spring-framework
59,386
javadoc
false
get_or_create_glue_job
def get_or_create_glue_job(self) -> str | None: """ Get (or creates) and returns the Job name. .. seealso:: - :external+boto3:py:meth:`Glue.Client.create_job` :return:Name of the Job """ if self.job_name is None: raise ValueError("job_name must be set to get or create a Glue job") if self.has_job(self.job_name): return self.job_name config = self.create_glue_job_config() self.log.info("Creating job: %s", self.job_name) self.conn.create_job(**config) return self.job_name
Get (or creates) and returns the Job name. .. seealso:: - :external+boto3:py:meth:`Glue.Client.create_job` :return:Name of the Job
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue.py
486
[ "self" ]
str | None
true
3
6.4
apache/airflow
43,597
unknown
false
maybeThrowSslAuthenticationException
private void maybeThrowSslAuthenticationException() { if (handshakeException != null) throw handshakeException; }
SSL exceptions are propagated as authentication failures so that clients can avoid retries and report the failure. If `flush` is true, exceptions are propagated after any pending outgoing bytes are flushed to ensure that the peer is notified of the failure.
java
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
940
[]
void
true
2
6.8
apache/kafka
31,560
javadoc
false
optionEquals
function optionEquals<K extends keyof FormatCodeSettings>(optionName: K, optionValue: FormatCodeSettings[K]): (context: FormattingContext) => boolean { return context => context.options && context.options[optionName] === optionValue; }
A rule takes a two tokens (left/right) and a particular context for which you're meant to look at them. You then declare what should the whitespace annotation be between these tokens via the action param. @param debugName Name to print @param left The left side of the comparison @param right The right side of the comparison @param context A set of filters to narrow down the space in which this formatter rule applies @param action a declaration of the expected whitespace @param flags whether the rule deletes a line or not, defaults to no-op
typescript
src/services/formatting/rules.ts
469
[ "optionName", "optionValue" ]
true
2
6.16
microsoft/TypeScript
107,154
jsdoc
false
advance
@CanIgnoreReturnValue public static int advance(Iterator<?> iterator, int numberToAdvance) { checkNotNull(iterator); checkArgument(numberToAdvance >= 0, "numberToAdvance must be nonnegative"); int i; for (i = 0; i < numberToAdvance && iterator.hasNext(); i++) { iterator.next(); } return i; }
Calls {@code next()} on {@code iterator}, either {@code numberToAdvance} times or until {@code hasNext()} returns {@code false}, whichever comes first. @return the number of elements the iterator was advanced @since 13.0 (since 3.0 as {@code Iterators.skip})
java
android/guava/src/com/google/common/collect/Iterators.java
934
[ "iterator", "numberToAdvance" ]
true
3
6.4
google/guava
51,352
javadoc
false
roll
def roll(a, shift, axis=None): """ Roll array elements along a given axis. Elements that roll beyond the last position are re-introduced at the first. Parameters ---------- a : array_like Input array. shift : int or tuple of ints The number of places by which elements are shifted. If a tuple, then `axis` must be a tuple of the same size, and each of the given axes is shifted by the corresponding number. If an int while `axis` is a tuple of ints, then the same value is used for all given axes. axis : int or tuple of ints, optional Axis or axes along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns ------- res : ndarray Output array, with the same shape as `a`. See Also -------- rollaxis : Roll the specified axis backwards, until it lies in a given position. Notes ----- Supports rolling over multiple dimensions simultaneously. Examples -------- >>> import numpy as np >>> x = np.arange(10) >>> np.roll(x, 2) array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) >>> np.roll(x, -2) array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) >>> x2 = np.reshape(x, (2, 5)) >>> x2 array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> np.roll(x2, 1) array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]) >>> np.roll(x2, -1) array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 0]]) >>> np.roll(x2, 1, axis=0) array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]) >>> np.roll(x2, -1, axis=0) array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]) >>> np.roll(x2, 1, axis=1) array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]) >>> np.roll(x2, -1, axis=1) array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]) >>> np.roll(x2, (1, 1), axis=(1, 0)) array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]) >>> np.roll(x2, (2, 1), axis=(1, 0)) array([[8, 9, 5, 6, 7], [3, 4, 0, 1, 2]]) """ a = asanyarray(a) if axis is None: return roll(a.ravel(), shift, 0).reshape(a.shape) else: axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) broadcasted = broadcast(shift, axis) if broadcasted.ndim > 1: raise ValueError( "'shift' and 'axis' should be scalars or 1D sequences") shifts = dict.fromkeys(range(a.ndim), 0) for sh, ax in broadcasted: shifts[ax] += int(sh) rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. if offset: # (original, result), (original, result) rolls[ax] = ((slice(None, -offset), slice(offset, None)), (slice(-offset, None), slice(None, offset))) result = empty_like(a) for indices in itertools.product(*rolls): arr_index, res_index = zip(*indices) result[res_index] = a[arr_index] return result
Roll array elements along a given axis. Elements that roll beyond the last position are re-introduced at the first. Parameters ---------- a : array_like Input array. shift : int or tuple of ints The number of places by which elements are shifted. If a tuple, then `axis` must be a tuple of the same size, and each of the given axes is shifted by the corresponding number. If an int while `axis` is a tuple of ints, then the same value is used for all given axes. axis : int or tuple of ints, optional Axis or axes along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns ------- res : ndarray Output array, with the same shape as `a`. See Also -------- rollaxis : Roll the specified axis backwards, until it lies in a given position. Notes ----- Supports rolling over multiple dimensions simultaneously. Examples -------- >>> import numpy as np >>> x = np.arange(10) >>> np.roll(x, 2) array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) >>> np.roll(x, -2) array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) >>> x2 = np.reshape(x, (2, 5)) >>> x2 array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> np.roll(x2, 1) array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]) >>> np.roll(x2, -1) array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 0]]) >>> np.roll(x2, 1, axis=0) array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]) >>> np.roll(x2, -1, axis=0) array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]) >>> np.roll(x2, 1, axis=1) array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]) >>> np.roll(x2, -1, axis=1) array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]) >>> np.roll(x2, (1, 1), axis=(1, 0)) array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]) >>> np.roll(x2, (2, 1), axis=(1, 0)) array([[8, 9, 5, 6, 7], [3, 4, 0, 1, 2]])
python
numpy/_core/numeric.py
1,218
[ "a", "shift", "axis" ]
false
9
7.76
numpy/numpy
31,054
numpy
false
process
private void process(final CurrentLagEvent event) { try { final TopicPartition topicPartition = event.partition(); final IsolationLevel isolationLevel = event.isolationLevel(); final Long lag = subscriptions.partitionLag(topicPartition, isolationLevel); final OptionalLong lagOpt; if (lag == null) { if (subscriptions.partitionEndOffset(topicPartition, isolationLevel) == null && !subscriptions.partitionEndOffsetRequested(topicPartition)) { // If the log end offset is unknown and there isn't already an in-flight list offset // request, issue one with the goal that the lag will be available the next time the // user calls currentLag(). log.info("Requesting the log end offset for {} in order to compute lag", topicPartition); subscriptions.requestPartitionEndOffset(topicPartition); // Emulates the Consumer.endOffsets() logic... Map<TopicPartition, Long> timestampToSearch = Collections.singletonMap( topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP ); requestManagers.offsetsRequestManager.fetchOffsets(timestampToSearch, false); } lagOpt = OptionalLong.empty(); } else { lagOpt = OptionalLong.of(lag); } event.future().complete(lagOpt); } catch (Exception e) { event.future().completeExceptionally(e); } }
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user. @param event Event containing a boolean to indicate if the callback handler is configured or not.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
651
[ "event" ]
void
true
5
6.24
apache/kafka
31,560
javadoc
false
isEligible
protected boolean isEligible(Class<?> targetClass) { Boolean eligible = this.eligibleBeans.get(targetClass); if (eligible != null) { return eligible; } if (this.advisor == null) { return false; } eligible = AopUtils.canApply(this.advisor, targetClass); this.eligibleBeans.put(targetClass, eligible); return eligible; }
Check whether the given class is eligible for advising with this post-processor's {@link Advisor}. <p>Implements caching of {@code canApply} results per bean target class. @param targetClass the class to check against @see AopUtils#canApply(Advisor, Class)
java
spring-aop/src/main/java/org/springframework/aop/framework/AbstractAdvisingBeanPostProcessor.java
162
[ "targetClass" ]
true
3
6.24
spring-projects/spring-framework
59,386
javadoc
false
getlincoef
def getlincoef(e, xset): # e = a*x+b ; x in xset """ Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in xset. >>> getlincoef('2*x + 1', {'x'}) (2, 1, 'x') >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) (5, 3, 'x') >>> getlincoef('0', {'x'}) (0, 0, None) >>> getlincoef('0*x', {'x'}) (0, 0, 'x') >>> getlincoef('x*x', {'x'}) (None, None, None) This can be tricked by sufficiently complex expressions >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) (2.0, 3.0, 'x') """ try: c = int(myeval(e, {}, {})) return 0, c, None except Exception: pass if getlincoef_re_1.match(e): return 1, 0, e len_e = len(e) for x in xset: if len(x) > len_e: continue if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): # skip function calls having x as an argument, e.g max(1, x) continue re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I) m = re_1.match(e) if m: try: m1 = re_1.match(e) while m1: ee = f"{m1.group('before')}({0}){m1.group('after')}" m1 = re_1.match(ee) b = myeval(ee, {}, {}) m1 = re_1.match(e) while m1: ee = f"{m1.group('before')}({1}){m1.group('after')}" m1 = re_1.match(ee) a = myeval(ee, {}, {}) - b m1 = re_1.match(e) while m1: ee = f"{m1.group('before')}({0.5}){m1.group('after')}" m1 = re_1.match(ee) c = myeval(ee, {}, {}) # computing another point to be sure that expression is linear m1 = re_1.match(e) while m1: ee = f"{m1.group('before')}({1.5}){m1.group('after')}" m1 = re_1.match(ee) c2 = myeval(ee, {}, {}) if (a * 0.5 + b == c and a * 1.5 + b == c2): return a, b, x except Exception: pass break return None, None, None
Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in xset. >>> getlincoef('2*x + 1', {'x'}) (2, 1, 'x') >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) (5, 3, 'x') >>> getlincoef('0', {'x'}) (0, 0, None) >>> getlincoef('0*x', {'x'}) (0, 0, 'x') >>> getlincoef('x*x', {'x'}) (None, None, None) This can be tricked by sufficiently complex expressions >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) (2.0, 3.0, 'x')
python
numpy/f2py/crackfortran.py
2,280
[ "e", "xset" ]
false
12
6.48
numpy/numpy
31,054
unknown
false
rebootstrap
public synchronized void rebootstrap() { log.info("Rebootstrapping with {}", this.bootstrapAddresses); this.bootstrap(this.bootstrapAddresses); }
@return a mapping from topic names to topic IDs for all topics with valid IDs in the cache
java
clients/src/main/java/org/apache/kafka/clients/Metadata.java
313
[]
void
true
1
6.48
apache/kafka
31,560
javadoc
false
isAssignable
private static boolean isAssignable(final Type type, final TypeVariable<?> toTypeVariable, final Map<TypeVariable<?>, Type> typeVarAssigns) { if (type == null) { return true; } // only a null type can be assigned to null type which // would have cause the previous to return true if (toTypeVariable == null) { return false; } // all types are assignable to themselves if (toTypeVariable.equals(type)) { return true; } if (type instanceof TypeVariable<?>) { // a type variable is assignable to another type variable, if // and only if the former is the latter, extends the latter, or // is otherwise a descendant of the latter. final Type[] bounds = getImplicitBounds((TypeVariable<?>) type); for (final Type bound : bounds) { if (isAssignable(bound, toTypeVariable, typeVarAssigns)) { return true; } } } if (type instanceof Class<?> || type instanceof ParameterizedType || type instanceof GenericArrayType || type instanceof WildcardType) { return false; } throw new IllegalStateException("found an unhandled type: " + type); }
Tests if the subject type may be implicitly cast to the target type variable following the Java generics rules. @param type the subject type to be assigned to the target type. @param toTypeVariable the target type variable. @param typeVarAssigns a map with type variables. @return {@code true} if {@code type} is assignable to {@code toTypeVariable}.
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
1,148
[ "type", "toTypeVariable", "typeVarAssigns" ]
true
10
8.08
apache/commons-lang
2,896
javadoc
false
truncate
public static String truncate(CharSequence seq, int maxLength, String truncationIndicator) { checkNotNull(seq); // length to truncate the sequence to, not including the truncation indicator int truncationLength = maxLength - truncationIndicator.length(); // in this worst case, this allows a maxLength equal to the length of the truncationIndicator, // meaning that a string will be truncated to just the truncation indicator itself checkArgument( truncationLength >= 0, "maxLength (%s) must be >= length of the truncation indicator (%s)", maxLength, truncationIndicator.length()); if (seq.length() <= maxLength) { String string = seq.toString(); if (string.length() <= maxLength) { return string; } // if the length of the toString() result was > maxLength for some reason, truncate that seq = string; } return new StringBuilder(maxLength) .append(seq, 0, truncationLength) .append(truncationIndicator) .toString(); }
Truncates the given character sequence to the given maximum length. If the length of the sequence is greater than {@code maxLength}, the returned string will be exactly {@code maxLength} chars in length and will end with the given {@code truncationIndicator}. Otherwise, the sequence will be returned as a string with no changes to the content. <p>Examples: {@snippet : Ascii.truncate("foobar", 7, "..."); // returns "foobar" Ascii.truncate("foobar", 5, "..."); // returns "fo..." } <p><b>Note:</b> This method <i>may</i> work with certain non-ASCII text but is not safe for use with arbitrary Unicode text. It is mostly intended for use with text that is known to be safe for use with it (such as all-ASCII text) and for simple debugging text. When using this method, consider the following: <ul> <li>it may split surrogate pairs <li>it may split characters and combining characters <li>it does not consider word boundaries <li>if truncating for display to users, there are other considerations that must be taken into account <li>the appropriate truncation indicator may be locale-dependent <li>it is safe to use non-ASCII characters in the truncation indicator </ul> @throws IllegalArgumentException if {@code maxLength} is less than the length of {@code truncationIndicator} @since 16.0
java
android/guava/src/com/google/common/base/Ascii.java
550
[ "seq", "maxLength", "truncationIndicator" ]
String
true
3
7.28
google/guava
51,352
javadoc
false
getVersionString
private @Nullable String getVersionString(@Nullable String version, boolean format, @Nullable String fallback) { if (version == null) { return fallback; } return format ? " (v" + version + ")" : version; }
Return the application title that should be used for the source class. By default will use {@link Package#getImplementationTitle()}. @param sourceClass the source class @return the application title
java
core/spring-boot/src/main/java/org/springframework/boot/ResourceBanner.java
162
[ "version", "format", "fallback" ]
String
true
3
7.44
spring-projects/spring-boot
79,428
javadoc
false
toString
@Override public String toString() { return Long.toString(sum()); }
Returns the String representation of the {@link #sum}. @return the String representation of the {@link #sum}
java
android/guava/src/com/google/common/cache/LongAdder.java
151
[]
String
true
1
6.96
google/guava
51,352
javadoc
false
falsePredicate
@SuppressWarnings("unchecked") // method name cannot be "false". public static <T> Predicate<T> falsePredicate() { return (Predicate<T>) FALSE; }
Gets the Predicate singleton that always returns false. @param <T> the type of the input to the predicate. @return the Predicate singleton.
java
src/main/java/org/apache/commons/lang3/function/Predicates.java
38
[]
true
1
7.2
apache/commons-lang
2,896
javadoc
false
repeat
public static String repeat(String string, int count) { checkNotNull(string); // eager for GWT. if (count <= 1) { checkArgument(count >= 0, "invalid count: %s", count); return (count == 0) ? "" : string; } // IF YOU MODIFY THE CODE HERE, you must update StringsRepeatBenchmark int len = string.length(); long longSize = (long) len * (long) count; int size = (int) longSize; if (size != longSize) { throw new ArrayIndexOutOfBoundsException("Required array size too large: " + longSize); } char[] array = new char[size]; string.getChars(0, len, array, 0); int n; for (n = len; n < size - n; n <<= 1) { System.arraycopy(array, 0, array, n, n); } System.arraycopy(array, 0, array, n, size - n); return new String(array); }
Returns a string consisting of a specific number of concatenated copies of an input string. For example, {@code repeat("hey", 3)} returns the string {@code "heyheyhey"}. @param string any non-null string @param count the number of times to repeat it; a nonnegative integer @return a string containing {@code string} repeated {@code count} times (the empty string if {@code count} is zero) @throws IllegalArgumentException if {@code count} is negative
java
android/guava/src/com/google/common/base/Strings.java
144
[ "string", "count" ]
String
true
5
7.92
google/guava
51,352
javadoc
false
hideInternalStackFrames
function hideInternalStackFrames(error) { overrideStackTrace.set(error, (error, stackFrames) => { let frames = stackFrames; if (typeof stackFrames === 'object') { frames = ArrayPrototypeFilter( stackFrames, (frm) => !StringPrototypeStartsWith(frm.getFileName() || '', 'node:internal'), ); } ArrayPrototypeUnshift(frames, error); return ArrayPrototypeJoin(frames, '\n at '); }); }
Returns true if `err.name` and `err.message` are equal to engine-specific values indicating max call stack size has been exceeded. "Maximum call stack size exceeded" in V8. @param {Error} err @returns {boolean}
javascript
lib/internal/errors.js
961
[ "error" ]
false
3
6.24
nodejs/node
114,839
jsdoc
false
getParameterNames
@Override public String @Nullable [] getParameterNames(Constructor<?> ctor) { if (this.raiseExceptions) { throw new UnsupportedOperationException("An advice method can never be a constructor"); } else { // we return null rather than throw an exception so that we behave well // in a chain-of-responsibility. return null; } }
An advice method can never be a constructor in Spring. @return {@code null} @throws UnsupportedOperationException if {@link #setRaiseExceptions(boolean) raiseExceptions} has been set to {@code true}
java
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAdviceParameterNameDiscoverer.java
287
[ "ctor" ]
true
2
6.24
spring-projects/spring-framework
59,386
javadoc
false
findTemplateSource
@Override public @Nullable Object findTemplateSource(String name) throws IOException { if (logger.isDebugEnabled()) { logger.debug("Looking for FreeMarker template with name [" + name + "]"); } Resource resource = this.resourceLoader.getResource(this.templateLoaderPath + name); return (resource.exists() ? resource : null); }
Create a new {@code SpringTemplateLoader}. @param resourceLoader the Spring ResourceLoader to use @param templateLoaderPath the template loader path to use
java
spring-context-support/src/main/java/org/springframework/ui/freemarker/SpringTemplateLoader.java
70
[ "name" ]
Object
true
3
6.4
spring-projects/spring-framework
59,386
javadoc
false
isotonic_regression
def isotonic_regression( y, *, sample_weight=None, y_min=None, y_max=None, increasing=True ): """Solve the isotonic regression model. Read more in the :ref:`User Guide <isotonic>`. Parameters ---------- y : array-like of shape (n_samples,) The data. sample_weight : array-like of shape (n_samples,), default=None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : float, default=None Lower bound on the lowest predicted value (the minimum value may still be higher). If not set, defaults to -inf. y_max : float, default=None Upper bound on the highest predicted value (the maximum may still be lower). If not set, defaults to +inf. increasing : bool, default=True Whether to compute ``y_`` is increasing (if set to True) or decreasing (if set to False). Returns ------- y_ : ndarray of shape (n_samples,) Isotonic fit of y. References ---------- "Active set algorithms for isotonic regression; A unifying framework" by Michael J. Best and Nilotpal Chakravarti, section 3. Examples -------- >>> from sklearn.isotonic import isotonic_regression >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) array([2.75 , 2.75 , 2.75 , 2.75 , 7.33, 7.33, 7.33, 7.33, 7.33, 7.33]) """ y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32]) if sp_base_version >= parse_version("1.12.0"): res = optimize.isotonic_regression( y=y, weights=sample_weight, increasing=increasing ) y = np.asarray(res.x, dtype=y.dtype) else: # TODO: remove this branch when Scipy 1.12 is the minimum supported version # Also remove _inplace_contiguous_isotonic_regression. order = np.s_[:] if increasing else np.s_[::-1] y = np.array(y[order], dtype=y.dtype) sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True) sample_weight = np.ascontiguousarray(sample_weight[order]) _inplace_contiguous_isotonic_regression(y, sample_weight) y = y[order] if y_min is not None or y_max is not None: # Older versions of np.clip don't accept None as a bound, so use np.inf if y_min is None: y_min = -np.inf if y_max is None: y_max = np.inf np.clip(y, y_min, y_max, y) return y
Solve the isotonic regression model. Read more in the :ref:`User Guide <isotonic>`. Parameters ---------- y : array-like of shape (n_samples,) The data. sample_weight : array-like of shape (n_samples,), default=None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : float, default=None Lower bound on the lowest predicted value (the minimum value may still be higher). If not set, defaults to -inf. y_max : float, default=None Upper bound on the highest predicted value (the maximum may still be lower). If not set, defaults to +inf. increasing : bool, default=True Whether to compute ``y_`` is increasing (if set to True) or decreasing (if set to False). Returns ------- y_ : ndarray of shape (n_samples,) Isotonic fit of y. References ---------- "Active set algorithms for isotonic regression; A unifying framework" by Michael J. Best and Nilotpal Chakravarti, section 3. Examples -------- >>> from sklearn.isotonic import isotonic_regression >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) array([2.75 , 2.75 , 2.75 , 2.75 , 7.33, 7.33, 7.33, 7.33, 7.33, 7.33])
python
sklearn/isotonic.py
110
[ "y", "sample_weight", "y_min", "y_max", "increasing" ]
false
8
7.6
scikit-learn/scikit-learn
64,340
numpy
false
assignRoundRobin
private void assignRoundRobin(List<TopicPartition> unassignedPartitions) { Iterator<String> unfilledConsumerIter = unfilledMembersWithUnderMinQuotaPartitions.iterator(); // Round-Robin filling remaining members up to the expected numbers of maxQuota, otherwise, to minQuota for (TopicPartition unassignedPartition : unassignedPartitions) { String consumer; if (unfilledConsumerIter.hasNext()) { consumer = unfilledConsumerIter.next(); } else { if (unfilledMembersWithUnderMinQuotaPartitions.isEmpty() && unfilledMembersWithExactlyMinQuotaPartitions.isEmpty()) { // Should not enter here since we have calculated the exact number to assign to each consumer. // This indicates issues in the assignment algorithm int currentPartitionIndex = unassignedPartitions.indexOf(unassignedPartition); log.error("No more unfilled consumers to be assigned. The remaining unassigned partitions are: {}", unassignedPartitions.subList(currentPartitionIndex, unassignedPartitions.size())); throw new IllegalStateException("No more unfilled consumers to be assigned."); } else if (unfilledMembersWithUnderMinQuotaPartitions.isEmpty()) { consumer = unfilledMembersWithExactlyMinQuotaPartitions.poll(); } else { unfilledConsumerIter = unfilledMembersWithUnderMinQuotaPartitions.iterator(); consumer = unfilledConsumerIter.next(); } } int currentAssignedCount = assignNewPartition(unassignedPartition, consumer); if (currentAssignedCount == minQuota) { unfilledConsumerIter.remove(); unfilledMembersWithExactlyMinQuotaPartitions.add(consumer); } else if (currentAssignedCount == maxQuota) { currentNumMembersWithOverMinQuotaPartitions++; if (currentNumMembersWithOverMinQuotaPartitions == expectedNumMembersWithOverMinQuotaPartitions) { // We only start to iterate over the "potentially unfilled" members at minQuota after we've filled // all members up to at least minQuota, so once the last minQuota member reaches maxQuota, we // should be done. But in case of some algorithmic error, just log a warning and continue to // assign any remaining partitions within the assignment constraints if (unassignedPartitions.indexOf(unassignedPartition) != unassignedPartitions.size() - 1) { log.error("Filled the last member up to maxQuota but still had partitions remaining to assign, " + "will continue but this indicates a bug in the assignment."); } } } } }
Constructs a constrained assignment builder. @param partitionsPerTopic The partitions for each subscribed topic @param rackInfo Rack information for consumers and racks @param consumerToOwnedPartitions Each consumer's previously owned and still-subscribed partitions @param partitionsWithMultiplePreviousOwners The partitions being claimed in the previous assignment of multiple consumers
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
781
[ "unassignedPartitions" ]
void
true
9
6.08
apache/kafka
31,560
javadoc
false
generateBitVector
@SafeVarargs public static <E extends Enum<E>> long generateBitVector(final Class<E> enumClass, final E... values) { Validate.noNullElements(values); return generateBitVector(enumClass, Arrays.asList(values)); }
Creates a long bit vector representation of the given array of Enum values. <p>This generates a value that is usable by {@link EnumUtils#processBitVector}.</p> <p>Do not use this method if you have more than 64 values in your Enum, as this would create a value greater than a long can hold.</p> @param enumClass the class of the enum we are working with, not {@code null}. @param values the values we want to convert, not {@code null}. @param <E> the type of the enumeration. @return a long whose value provides a binary representation of the given set of enum values. @throws NullPointerException if {@code enumClass} or {@code values} is {@code null}. @throws IllegalArgumentException if {@code enumClass} is not an enum class or has more than 64 values. @since 3.0.1 @see #generateBitVectors(Class, Iterable)
java
src/main/java/org/apache/commons/lang3/EnumUtils.java
97
[ "enumClass" ]
true
1
6.88
apache/commons-lang
2,896
javadoc
false
getValue
@Deprecated @Override public T getValue() { return this.value; }
Gets the value. @return the value, may be null. @deprecated Use {@link #get()}.
java
src/main/java/org/apache/commons/lang3/mutable/MutableObject.java
92
[]
T
true
1
7.04
apache/commons-lang
2,896
javadoc
false
toString
@Override public String toString() { return String.format(FMT_TO_STRING, Integer.valueOf(System.identityHashCode(this)), getObject()); }
Returns a string representation for this object. This string also contains a string representation of the object managed by this initializer. @return a string for this object
java
src/main/java/org/apache/commons/lang3/concurrent/ConstantInitializer.java
134
[]
String
true
1
6.8
apache/commons-lang
2,896
javadoc
false
hasUpperCase
private boolean hasUpperCase() { for (char c : chars) { if (Ascii.isUpperCase(c)) { return true; } } return false; }
Returns an equivalent {@code Alphabet} except it ignores case.
java
android/guava/src/com/google/common/io/BaseEncoding.java
557
[]
true
2
6.4
google/guava
51,352
javadoc
false
main
def main( function: Callable[[IO[str]], Iterable[tuple[int, str]]], source_path: str, output_format: str, ) -> bool: """ Main entry point of the script. Parameters ---------- function : Callable Function to execute for the specified validation type. source_path : str Source path representing path to a file/directory. output_format : str Output format of the error message. file_extensions_to_check : str Comma separated values of what file extensions to check. excluded_file_paths : str Comma separated values of what file paths to exclude during the check. Returns ------- bool True if found any patterns are found related to the given function. Raises ------ ValueError If the `source_path` is not pointing to existing file/directory. """ is_failed: bool = False for file_path in source_path: with open(file_path, encoding="utf-8") as file_obj: for line_number, msg in function(file_obj): is_failed = True print( output_format.format( source_path=file_path, line_number=line_number, msg=msg ) ) return is_failed
Main entry point of the script. Parameters ---------- function : Callable Function to execute for the specified validation type. source_path : str Source path representing path to a file/directory. output_format : str Output format of the error message. file_extensions_to_check : str Comma separated values of what file extensions to check. excluded_file_paths : str Comma separated values of what file paths to exclude during the check. Returns ------- bool True if found any patterns are found related to the given function. Raises ------ ValueError If the `source_path` is not pointing to existing file/directory.
python
scripts/validate_unwanted_patterns.py
399
[ "function", "source_path", "output_format" ]
bool
true
3
6.72
pandas-dev/pandas
47,362
numpy
false
getNext
@ParametricNullness public static <T extends @Nullable Object> T getNext( Iterator<? extends T> iterator, @ParametricNullness T defaultValue) { return iterator.hasNext() ? iterator.next() : defaultValue; }
Returns the next element in {@code iterator} or {@code defaultValue} if the iterator is empty. The {@link Iterables} analog to this method is {@link Iterables#getFirst}. @param defaultValue the default value to return if the iterator is empty @return the next element of {@code iterator} or the default value @since 7.0
java
android/guava/src/com/google/common/collect/Iterators.java
891
[ "iterator", "defaultValue" ]
T
true
2
7.76
google/guava
51,352
javadoc
false
computedPropertyLayer
function computedPropertyLayer(field: ComputedField, result: object): CompositeProxyLayer { return cacheProperties(addProperty(field.name, () => field.compute(result))) }
Given a part of a query result, it's model name and a list of extension, applies computed fields to the results. Fields are computed lazily on a first access, after that the result of computation is cached. In case `select` is used, all dependencies of the computed fields would be excluded from final result, unless they are also specified in the select. This function applies computed fields to a single object only: it does not traverse relationships. For full functionality, it is meant to be combined with `visitQueryResult`. @param params @returns
typescript
packages/client/src/runtime/core/extensions/applyResultExtensions.ts
80
[ "field", "result" ]
true
1
6.64
prisma/prisma
44,834
jsdoc
false
forcePut
@CanIgnoreReturnValue @Override public @Nullable V forcePut(@ParametricNullness K key, @ParametricNullness V value) { return put(key, value, true); }
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or, equivalently, if this inverse view contains a key that is equal to {@code value}). <p>Due to the property that values in a BiMap are unique, this will tend to execute in faster-than-linear time. @param value the object to search for in the values of this BiMap @return true if a mapping exists from a key to the specified value
java
android/guava/src/com/google/common/collect/HashBiMap.java
324
[ "key", "value" ]
V
true
1
6.72
google/guava
51,352
javadoc
false
of
static LibraryCoordinates of(@Nullable String groupId, @Nullable String artifactId, @Nullable String version) { return new DefaultLibraryCoordinates(groupId, artifactId, version); }
Factory method to create {@link LibraryCoordinates} with the specified values. @param groupId the group ID @param artifactId the artifact ID @param version the version @return a new {@link LibraryCoordinates} instance
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/LibraryCoordinates.java
55
[ "groupId", "artifactId", "version" ]
LibraryCoordinates
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
autoLink
function autoLink($) { $('.doc-container code:not(:header code)').each(function() { const $code = $(this); const html = $code.html(); if (/^_\.\w+$/.test(html)) { const id = html.split('.')[1]; $code.replaceWith(`<a href="#${ id }"><code>_.${ id }</code></a>`); } }); }
Converts Lodash method references into documentation links. Searches for inline code references matching `_.word` (e.g., `_.map`, `_.filter`) in documentation body text and wraps them in anchor links. Excludes code within headers as those already have proper anchors. @private @param {Object} $ The Cheerio object. @example // Body text: <code>_.map</code> → <a href="#map"><code>_.map</code></a> // Headers: <h3><code>_.VERSION</code></h3> → unchanged (excluded)
javascript
lib/main/build-site.js
48
[ "$" ]
false
2
6.16
lodash/lodash
61,490
jsdoc
false
fromString
public static AclOperation fromString(String str) throws IllegalArgumentException { try { return AclOperation.valueOf(str.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { return UNKNOWN; } }
Parse the given string as an ACL operation. @param str The string to parse. @return The AclOperation, or UNKNOWN if the string could not be matched.
java
clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
140
[ "str" ]
AclOperation
true
2
7.92
apache/kafka
31,560
javadoc
false
dstack
def dstack(tup): """ Stack arrays in sequence depth wise (along third axis). This is equivalent to concatenation along the third axis after 2-D arrays of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by `dsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of arrays The arrays must have the same shape along all but the third axis. 1-D or 2-D arrays must have the same shape. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 3-D. See Also -------- concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. block : Assemble an nd-array from nested lists of blocks. vstack : Stack arrays in sequence vertically (row wise). hstack : Stack arrays in sequence horizontally (column wise). column_stack : Stack 1-D arrays as columns into a 2-D array. dsplit : Split array along third axis. Examples -------- >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) array([[[1, 4], [2, 5], [3, 6]]]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) array([[[1, 4]], [[2, 5]], [[3, 6]]]) """ arrs = atleast_3d(*tup) if not isinstance(arrs, tuple): arrs = (arrs,) return _nx.concatenate(arrs, 2)
Stack arrays in sequence depth wise (along third axis). This is equivalent to concatenation along the third axis after 2-D arrays of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by `dsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of arrays The arrays must have the same shape along all but the third axis. 1-D or 2-D arrays must have the same shape. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 3-D. See Also -------- concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. block : Assemble an nd-array from nested lists of blocks. vstack : Stack arrays in sequence vertically (row wise). hstack : Stack arrays in sequence horizontally (column wise). column_stack : Stack 1-D arrays as columns into a 2-D array. dsplit : Split array along third axis. Examples -------- >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((4,5,6)) >>> np.dstack((a,b)) array([[[1, 4], [2, 5], [3, 6]]]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[4],[5],[6]]) >>> np.dstack((a,b)) array([[[1, 4]], [[2, 5]], [[3, 6]]])
python
numpy/lib/_shape_base_impl.py
656
[ "tup" ]
false
2
7.68
numpy/numpy
31,054
numpy
false
stat
function stat(filename) { // Guard against internal bugs where a non-string filename is passed in by mistake. assert(typeof filename === 'string'); filename = path.toNamespacedPath(filename); if (statCache !== null) { const result = statCache.get(filename); if (result !== undefined) { return result; } } const result = internalFsBinding.internalModuleStat(filename); if (statCache !== null && result >= 0) { // Only set cache when `internalModuleStat(filename)` succeeds. statCache.set(filename, result); } return result; }
Get a path's properties, using an in-memory cache to minimize lookups. @param {string} filename Absolute path to the file @returns {number}
javascript
lib/internal/modules/cjs/loader.js
260
[ "filename" ]
false
5
6.24
nodejs/node
114,839
jsdoc
false
handleBindError
private <T> @Nullable T handleBindError(ConfigurationPropertyName name, Bindable<T> target, BindHandler handler, Context context, Exception error) { try { Object result = handler.onFailure(name, target, context, error); return context.getConverter().convert(result, target); } catch (Exception ex) { if (ex instanceof BindException bindException) { throw bindException; } throw new BindException(name, target, context.getConfigurationProperty(), ex); } }
Bind the specified target {@link Bindable} using this binder's {@link ConfigurationPropertySource property sources} or create a new instance using the type of the {@link Bindable} if the result of the binding is {@code null}. @param name the configuration property name to bind @param target the target bindable @param handler the bind handler (may be {@code null}) @param <T> the bound or created type @return the bound or created object @since 2.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
408
[ "name", "target", "handler", "context", "error" ]
T
true
3
7.92
spring-projects/spring-boot
79,428
javadoc
false
isAtLeastOneCondVarChanged
static bool isAtLeastOneCondVarChanged(const Decl *Func, const Stmt *LoopStmt, const Stmt *Cond, ASTContext *Context) { if (isVarThatIsPossiblyChanged(Func, LoopStmt, Cond, Context)) return true; return llvm::any_of(Cond->children(), [&](const Stmt *Child) { return Child && isAtLeastOneCondVarChanged(Func, LoopStmt, Child, Context); }); }
Return whether at least one variable of `Cond` changed in `LoopStmt`.
cpp
clang-tools-extra/clang-tidy/bugprone/InfiniteLoopCheck.cpp
117
[]
true
3
6.72
llvm/llvm-project
36,021
doxygen
false
visitAwaitExpression
function visitAwaitExpression(node: AwaitExpression): Expression { // do not downlevel a top-level await as it is module syntax... if (inTopLevelContext()) { return visitEachChild(node, visitor, context); } return setOriginalNode( setTextRange( factory.createYieldExpression( /*asteriskToken*/ undefined, visitNode(node.expression, visitor, isExpression), ), node, ), node, ); }
Visits an AwaitExpression node. This function will be called any time a ES2017 await expression is encountered. @param node The node to visit.
typescript
src/compiler/transformers/es2017.ts
394
[ "node" ]
true
2
7.04
microsoft/TypeScript
107,154
jsdoc
false
resolve
private List<ConfigDataResolutionResult> resolve(ConfigDataLocationResolverContext locationResolverContext, @Nullable Profiles profiles, List<ConfigDataLocation> locations) { List<ConfigDataResolutionResult> resolved = new ArrayList<>(locations.size()); for (ConfigDataLocation location : locations) { resolved.addAll(resolve(locationResolverContext, profiles, location)); } return Collections.unmodifiableList(resolved); }
Resolve and load the given list of locations, filtering any that have been previously loaded. @param activationContext the activation context @param locationResolverContext the location resolver context @param loaderContext the loader context @param locations the locations to resolve @return a map of the loaded locations and data
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataImporter.java
95
[ "locationResolverContext", "profiles", "locations" ]
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
toLongArray
public long[] toLongArray() { return bitSet.toLongArray(); }
Returns a new byte array containing all the bits in this bit set. <p> More precisely, if: </p> <ol> <li>{@code long[] longs = s.toLongArray();}</li> <li>then {@code longs.length == (s.length()+63)/64} and</li> <li>{@code s.get(n) == ((longs[n/64] & (1L<<(n%64))) != 0)}</li> <li>for all {@code n < 64 * longs.length}.</li> </ol> @return a byte array containing a little-endian representation of all the bits in this bit set
java
src/main/java/org/apache/commons/lang3/util/FluentBitSet.java
564
[]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
extract_weights
def extract_weights(mod: nn.Module) -> tuple[tuple[Tensor, ...], list[str]]: """ This function removes all the Parameters from the model and return them as a tuple as well as their original attribute names. The weights must be re-loaded with `load_weights` before the model can be used again. Note that this function modifies the model in place and after this call, mod.parameters() will be empty. """ orig_params = tuple(mod.parameters()) # Remove all the parameters in the model names = [] for name, p in list(mod.named_parameters()): _del_nested_attr(mod, name.split(".")) names.append(name) # Make params regular Tensors instead of nn.Parameter params = tuple(p.detach().requires_grad_() for p in orig_params) return params, names
This function removes all the Parameters from the model and return them as a tuple as well as their original attribute names. The weights must be re-loaded with `load_weights` before the model can be used again. Note that this function modifies the model in place and after this call, mod.parameters() will be empty.
python
benchmarks/functional_autograd_benchmark/utils.py
48
[ "mod" ]
tuple[tuple[Tensor, ...], list[str]]
true
2
6
pytorch/pytorch
96,034
unknown
false
handleTimeoutFailure
private void handleTimeoutFailure(long now, Throwable cause) { if (log.isDebugEnabled()) { log.debug("{} timed out at {} after {} attempt(s)", this, now, tries, new Exception(prettyPrintException(cause))); } if (cause instanceof TimeoutException) { handleFailure(cause); } else { handleFailure(new TimeoutException(this + " timed out at " + now + " after " + tries + " attempt(s)", cause)); } }
Handle a failure. <p> Depending on what the exception is and how many times we have already tried, we may choose to fail the Call, or retry it. It is important to print the stack traces here in some cases, since they are not necessarily preserved in ApiVersionException objects. @param now The current time in milliseconds. @param throwable The failure exception.
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
953
[ "now", "cause" ]
void
true
3
6.88
apache/kafka
31,560
javadoc
false
munchIfFull
private void munchIfFull() { if (buffer.remaining() < 8) { // buffer is full; not enough room for a primitive. We have at least one full chunk. munch(); } }
Computes a hash code based on the data that have been provided to this hasher. This is called after all chunks are handled with {@link #process} and any leftover bytes that did not make a complete chunk are handled with {@link #processRemaining}.
java
android/guava/src/com/google/common/hash/AbstractStreamingHasher.java
205
[]
void
true
2
6.88
google/guava
51,352
javadoc
false
checkName
private BackgroundInitializer<?> checkName(final String name) { final BackgroundInitializer<?> init = initializers.get(name); if (init == null) { throw new NoSuchElementException("No child initializer with name " + name); } return init; }
Checks whether an initializer with the given name exists. If not, throws an exception. If it exists, the associated child initializer is returned. @param name the name to check. @return the initializer with this name. @throws NoSuchElementException if the name is unknown.
java
src/main/java/org/apache/commons/lang3/concurrent/MultiBackgroundInitializer.java
142
[ "name" ]
true
2
8.08
apache/commons-lang
2,896
javadoc
false
initialize
@Override protected MultiBackgroundInitializerResults initialize() throws Exception { final Map<String, BackgroundInitializer<?>> inits; synchronized (this) { // create a snapshot to operate on inits = new HashMap<>(childInitializers); } // start the child initializers final ExecutorService exec = getActiveExecutor(); inits.values().forEach(bi -> { if (bi.getExternalExecutor() == null) { // share the executor service if necessary bi.setExternalExecutor(exec); } bi.start(); }); // collect the results final Map<String, Object> results = new HashMap<>(); final Map<String, ConcurrentException> excepts = new HashMap<>(); inits.forEach((k, v) -> { try { results.put(k, v.get()); } catch (final ConcurrentException cex) { excepts.put(k, cex); } }); return new MultiBackgroundInitializerResults(inits, results, excepts); }
Creates the results object. This implementation starts all child {@code BackgroundInitializer} objects. Then it collects their results and creates a {@link MultiBackgroundInitializerResults} object with this data. If a child initializer throws a checked exceptions, it is added to the results object. Unchecked exceptions are propagated. @return the results object. @throws Exception if an error occurs.
java
src/main/java/org/apache/commons/lang3/concurrent/MultiBackgroundInitializer.java
319
[]
MultiBackgroundInitializerResults
true
3
7.76
apache/commons-lang
2,896
javadoc
false
initMacSandbox
private void initMacSandbox() { // write rules to a temporary file, which will be passed to sandbox_init() Path rules; try { rules = createTempRulesFile(); Files.write(rules, Collections.singleton(SANDBOX_RULES)); } catch (IOException e) { throw new UncheckedIOException(e); } try { var errorRef = macLibc.newErrorReference(); int ret = macLibc.sandbox_init(rules.toAbsolutePath().toString(), SANDBOX_NAMED, errorRef); // if sandbox_init() fails, add the message from the OS (e.g. syntax error) and free the buffer if (ret != 0) { RuntimeException e = new UnsupportedOperationException("sandbox_init(): " + errorRef.toString()); macLibc.sandbox_free_error(errorRef); throw e; } logger.debug("OS X seatbelt initialization successful"); } finally { IOUtils.deleteFilesIgnoringExceptions(rules); } }
Installs exec system call filtering on MacOS. <p> Two different methods of filtering are used. Since MacOS is BSD based, process creation is first restricted with {@code setrlimit(RLIMIT_NPROC)}. <p> Additionally, on Mac OS X Leopard or above, a custom {@code sandbox(7)} ("Seatbelt") profile is installed that denies the following rules: <ul> <li>{@code process-fork}</li> <li>{@code process-exec}</li> </ul> @see <a href="https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf"> * https://reverse.put.as/wp-content/uploads/2011/06/The-Apple-Sandbox-BHDC2011-Paper.pdf</a>
java
libs/native/src/main/java/org/elasticsearch/nativeaccess/MacNativeAccess.java
105
[]
void
true
3
6.24
elastic/elasticsearch
75,680
javadoc
false
asList
public static List<Long> asList(long... backingArray) { if (backingArray.length == 0) { return Collections.emptyList(); } return new LongArrayAsList(backingArray); }
Returns a fixed-size list backed by the specified array, similar to {@link Arrays#asList(Object[])}. The list supports {@link List#set(int, Object)}, but any attempt to set a value to {@code null} will result in a {@link NullPointerException}. <p>The returned list maintains the values, but not the identities, of {@code Long} objects written to or read from it. For example, whether {@code list.get(0) == list.get(0)} is true for the returned list is unspecified. <p>The returned list is serializable. <p><b>Note:</b> when possible, you should represent your data as an {@link ImmutableLongArray} instead, which has an {@link ImmutableLongArray#asList asList} view. @param backingArray the array to back the list @return a list view of the array
java
android/guava/src/com/google/common/primitives/Longs.java
709
[]
true
2
7.92
google/guava
51,352
javadoc
false
is_array_like
def is_array_like(obj: object) -> bool: """ Check if the object is array-like. For an object to be considered array-like, it must be list-like and have a `dtype` attribute. Parameters ---------- obj : The object to check Returns ------- is_array_like : bool Whether `obj` has array-like properties. Examples -------- >>> is_array_like(np.array([1, 2, 3])) True >>> is_array_like(pd.Series(["a", "b"])) True >>> is_array_like(pd.Index(["2016-01-01"])) True >>> is_array_like([1, 2, 3]) False >>> is_array_like(("a", "b")) False """ return is_list_like(obj) and hasattr(obj, "dtype")
Check if the object is array-like. For an object to be considered array-like, it must be list-like and have a `dtype` attribute. Parameters ---------- obj : The object to check Returns ------- is_array_like : bool Whether `obj` has array-like properties. Examples -------- >>> is_array_like(np.array([1, 2, 3])) True >>> is_array_like(pd.Series(["a", "b"])) True >>> is_array_like(pd.Index(["2016-01-01"])) True >>> is_array_like([1, 2, 3]) False >>> is_array_like(("a", "b")) False
python
pandas/core/dtypes/inference.py
227
[ "obj" ]
bool
true
2
8
pandas-dev/pandas
47,362
numpy
false
toChar
public static char toChar(final String str) { Validate.notEmpty(str, "The String must not be empty"); return str.charAt(0); }
Converts the String to a char using the first character, throwing an exception on empty Strings. <pre> CharUtils.toChar("A") = 'A' CharUtils.toChar("BA") = 'B' CharUtils.toChar(null) throws IllegalArgumentException CharUtils.toChar("") throws IllegalArgumentException </pre> @param str the character to convert @return the char value of the first letter of the String @throws NullPointerException if the string is null @throws IllegalArgumentException if the String is empty
java
src/main/java/org/apache/commons/lang3/CharUtils.java
316
[ "str" ]
true
1
6.32
apache/commons-lang
2,896
javadoc
false
close
@Override public void close() { try { generator.close(); } catch (IOException e) { throw new IllegalStateException("Failed to close the XContentBuilder", e); } }
Returns a version used for serialising a response. @return a compatible version
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
1,286
[]
void
true
2
7.92
elastic/elasticsearch
75,680
javadoc
false
_insert_update_blklocs_and_blknos
def _insert_update_blklocs_and_blknos(self, loc) -> None: """ When inserting a new Block at location 'loc', we update our _blklocs and _blknos. """ # Accessing public blklocs ensures the public versions are initialized if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: # As of numpy 1.26.4, np.concatenate faster than np.append self._blklocs = np.concatenate([[0], self._blklocs]) self._blknos = np.concatenate([[len(self.blocks)], self._blknos]) else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) ) self._blklocs = new_blklocs self._blknos = new_blknos
When inserting a new Block at location 'loc', we update our _blklocs and _blknos.
python
pandas/core/internals/managers.py
1,559
[ "self", "loc" ]
None
true
4
6
pandas-dev/pandas
47,362
unknown
false
adapt
static ConfigurationPropertyName adapt(CharSequence name, char separator, @Nullable Function<CharSequence, CharSequence> elementValueProcessor) { Assert.notNull(name, "Name must not be null"); if (name.isEmpty()) { return EMPTY; } Elements elements = new ElementsParser(name, separator).parse(elementValueProcessor); if (elements.getSize() == 0) { return EMPTY; } return new ConfigurationPropertyName(elements); }
Create a {@link ConfigurationPropertyName} by adapting the given source. The name is split into elements around the given {@code separator}. This method is more lenient than {@link #of} in that it allows mixed case names and '{@code _}' characters. Other invalid characters are stripped out during parsing. <p> The {@code elementValueProcessor} function may be used if additional processing is required on the extracted element values. @param name the name to parse @param separator the separator used to split the name @param elementValueProcessor a function to process element values @return a {@link ConfigurationPropertyName}
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
736
[ "name", "separator", "elementValueProcessor" ]
ConfigurationPropertyName
true
3
7.76
spring-projects/spring-boot
79,428
javadoc
false
get_loc
def get_loc(self, key) -> int | slice | np.ndarray: """ Get integer location, slice or boolean mask for requested label. The `get_loc` method is used to retrieve the integer index, a slice for slicing objects, or a boolean mask indicating the presence of the label in the `IntervalIndex`. Parameters ---------- key : label The value or range to find in the IntervalIndex. Returns ------- int if unique index, slice if monotonic index, else mask The position or positions found. This could be a single number, a range, or an array of true/false values indicating the position(s) of the label. See Also -------- IntervalIndex.get_indexer_non_unique : Compute indexer and mask for new index given the current index. Index.get_loc : Similar method in the base Index class. Examples -------- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) >>> index = pd.IntervalIndex([i1, i2]) >>> index.get_loc(1) 0 You can also supply a point inside an interval. >>> index.get_loc(1.5) 1 If a label is in several intervals, you get the locations of all the relevant intervals. >>> i3 = pd.Interval(0, 2) >>> overlapping_index = pd.IntervalIndex([i1, i2, i3]) >>> overlapping_index.get_loc(0.5) array([ True, False, True]) Only exact matches will be returned if an interval is provided. >>> index.get_loc(pd.Interval(0, 1)) 0 """ self._check_indexing_error(key) if isinstance(key, Interval): if self.closed != key.closed: raise KeyError(key) mask = (self.left == key.left) & (self.right == key.right) elif is_valid_na_for_dtype(key, self.dtype): mask = self.isna() else: # assume scalar op_left = le if self.closed_left else lt op_right = le if self.closed_right else lt try: mask = op_left(self.left, key) & op_right(key, self.right) except TypeError as err: # scalar is not comparable to II subtype --> invalid label raise KeyError(key) from err matches = mask.sum() if matches == 0: raise KeyError(key) if matches == 1: return mask.argmax() res = lib.maybe_booleans_to_slice(mask.view("u1")) if isinstance(res, slice) and res.stop is None: # TODO: DO this in maybe_booleans_to_slice? res = slice(res.start, len(self), res.step) return res
Get integer location, slice or boolean mask for requested label. The `get_loc` method is used to retrieve the integer index, a slice for slicing objects, or a boolean mask indicating the presence of the label in the `IntervalIndex`. Parameters ---------- key : label The value or range to find in the IntervalIndex. Returns ------- int if unique index, slice if monotonic index, else mask The position or positions found. This could be a single number, a range, or an array of true/false values indicating the position(s) of the label. See Also -------- IntervalIndex.get_indexer_non_unique : Compute indexer and mask for new index given the current index. Index.get_loc : Similar method in the base Index class. Examples -------- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) >>> index = pd.IntervalIndex([i1, i2]) >>> index.get_loc(1) 0 You can also supply a point inside an interval. >>> index.get_loc(1.5) 1 If a label is in several intervals, you get the locations of all the relevant intervals. >>> i3 = pd.Interval(0, 2) >>> overlapping_index = pd.IntervalIndex([i1, i2, i3]) >>> overlapping_index.get_loc(0.5) array([ True, False, True]) Only exact matches will be returned if an interval is provided. >>> index.get_loc(pd.Interval(0, 1)) 0
python
pandas/core/indexes/interval.py
742
[ "self", "key" ]
int | slice | np.ndarray
true
11
8.4
pandas-dev/pandas
47,362
numpy
false
emit_state_change_metric
def emit_state_change_metric(self, new_state: TaskInstanceState) -> None: """ Send a time metric representing how much time a given state transition took. The previous state and metric name is deduced from the state the task was put in. :param new_state: The state that has just been set for this task. We do not use `self.state`, because sometimes the state is updated directly in the DB and not in the local TaskInstance object. Supported states: QUEUED and RUNNING """ if self.end_date: # if the task has an end date, it means that this is not its first round. # we send the state transition time metric only on the first try, otherwise it gets more complex. return # switch on state and deduce which metric to send if new_state == TaskInstanceState.RUNNING: metric_name = "queued_duration" if self.queued_dttm is None: # this should not really happen except in tests or rare cases, # but we don't want to create errors just for a metric, so we just skip it self.log.warning( "cannot record %s for task %s because previous state change time has not been saved", metric_name, self.task_id, ) return timing = timezone.utcnow() - self.queued_dttm elif new_state == TaskInstanceState.QUEUED: metric_name = "scheduled_duration" if self.scheduled_dttm is None: self.log.warning( "cannot record %s for task %s because previous state change time has not been saved", metric_name, self.task_id, ) return timing = timezone.utcnow() - self.scheduled_dttm else: raise NotImplementedError("no metric emission setup for state %s", new_state) # send metric twice, once (legacy) with tags in the name and once with tags as tags Stats.timing(f"dag.{self.dag_id}.{self.task_id}.{metric_name}", timing) Stats.timing( f"task.{metric_name}", timing, tags={"task_id": self.task_id, "dag_id": self.dag_id, "queue": self.queue}, )
Send a time metric representing how much time a given state transition took. The previous state and metric name is deduced from the state the task was put in. :param new_state: The state that has just been set for this task. We do not use `self.state`, because sometimes the state is updated directly in the DB and not in the local TaskInstance object. Supported states: QUEUED and RUNNING
python
airflow-core/src/airflow/models/taskinstance.py
1,201
[ "self", "new_state" ]
None
true
7
7.04
apache/airflow
43,597
sphinx
false
exists
def exists(self, path): """ Test if path exists. Test if `path` exists as (and in this order): - a local file. - a remote URL that has been downloaded and stored locally in the `DataSource` directory. - a remote URL that has not been downloaded, but is valid and accessible. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. Returns ------- out : bool True if `path` exists. Notes ----- When `path` is an URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location. """ # First test for local path if os.path.exists(path): return True # We import this here because importing urllib is slow and # a significant fraction of numpy's total import time. from urllib.error import URLError from urllib.request import urlopen # Test cached url upath = self.abspath(path) if os.path.exists(upath): return True # Test remote url if self._isurl(path): try: netfile = urlopen(path) netfile.close() del netfile return True except URLError: return False return False
Test if path exists. Test if `path` exists as (and in this order): - a local file. - a remote URL that has been downloaded and stored locally in the `DataSource` directory. - a remote URL that has not been downloaded, but is valid and accessible. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. Returns ------- out : bool True if `path` exists. Notes ----- When `path` is an URL, `exists` will return True if it's either stored locally in the `DataSource` directory, or is a valid remote URL. `DataSource` does not discriminate between the two, the file is accessible if it exists in either location.
python
numpy/lib/_datasource.py
427
[ "self", "path" ]
false
4
6.24
numpy/numpy
31,054
numpy
false
notAvailable
public ConditionMessage notAvailable(String item) { return because(item + " is not available"); }
Indicates something is not available. For example {@code notAvailable("time")} results in the message "time is not available". @param item the item that is not available @return a built {@link ConditionMessage}
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
291
[ "item" ]
ConditionMessage
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
isServletEnvironment
private boolean isServletEnvironment(Class<?> conversionType, ClassLoader classLoader) { try { Class<?> webEnvironmentClass = ClassUtils.forName(CONFIGURABLE_WEB_ENVIRONMENT_CLASS, classLoader); return webEnvironmentClass.isAssignableFrom(conversionType); } catch (Throwable ex) { return false; } }
Converts the given {@code environment} to the given {@link StandardEnvironment} type. If the environment is already of the same type, no conversion is performed and it is returned unchanged. @param environment the Environment to convert @param type the type to convert the Environment to @return the converted Environment
java
core/spring-boot/src/main/java/org/springframework/boot/EnvironmentConverter.java
110
[ "conversionType", "classLoader" ]
true
2
7.6
spring-projects/spring-boot
79,428
javadoc
false
when
public Source<T> when(Predicate<T> predicate) { Assert.notNull(predicate, "'predicate' must not be null"); return new Source<>(this.supplier, this.predicate.and(predicate)); }
Return a filtered version of the source that won't map values that don't match the given predicate. @param predicate the predicate used to filter values @return a new filtered source instance
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
277
[ "predicate" ]
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
offload_activation_fw
def offload_activation_fw(graph: fx.Graph) -> None: """ Insert CPU offload operations in the forward pass graph. Offload operations are placed after the last effective use of each tensor marked for offloading. This ensures the tensor is no longer needed on the GPU before transferring it to CPU memory. NOTE: An alternative approach would offload tensors immediately after generation to maximize compute-communication overlap. However, this requires additional synchronization to ensure tensor deletion (which occurs on the default stream) waits for the asynchronous offload operation to complete. This would necessitate more complex tracking to separate operation scheduling from memory cleanup. Args: graph: The forward graph to modify """ op_types: OpTypes = get_default_op_list() def find_all_effective_users(node: fx.Node) -> OrderedSet[fx.Node]: """ Find all effective users of a node, where view ops extend the lifetime of the original node. If a user is a view op, recursively find users of the view. """ effective_users: OrderedSet[fx.Node] = OrderedSet() for user in node.users: if user.op == "output": continue effective_users.add(user) if op_types.is_view(user): effective_users.update(find_all_effective_users(user)) return effective_users output_node: fx.Node = graph.find_nodes(op="output")[0] # pyrefly: ignore [bad-assignment] fwd_outputs: tuple[fx.Node, ...] = output_node.args[ 0 ] # pyrefly: ignore [bad-assignment] node_to_offload: dict[fx.Node, fx.Node] = dict() node_to_index: dict[fx.Node, int] = { node: idx for idx, node in enumerate(graph.nodes) } for node in fwd_outputs: if node.meta.get("saved_for_offloading", False) is False: continue # Find insertion point, which is the last use all_effective_users: OrderedSet[fx.Node] = find_all_effective_users(node) if all_effective_users := find_all_effective_users(node): last_user = max(all_effective_users, key=lambda n: node_to_index[n]) else: last_user: fx.Node = node # Insert the CPU offload operation after the last user with graph.inserting_after(last_user): cpu_node: fx.Node = graph.call_function( torch.ops.prims.device_put.default, args=(node, torch.device("cpu")), kwargs={"non_blocking": True}, name=CPU_OFFLOAD_PREFIX + str(node.name), ) cpu_node.meta["val"] = node.meta["val"].to(torch.device("cpu")) cpu_node.meta["tensor_meta"] = extract_tensor_metadata(cpu_node.meta["val"]) node_to_offload[node] = cpu_node # Update the return node args output_node.update_arg( 0, tuple(node_to_offload.get(node, node) for node in fwd_outputs) )
Insert CPU offload operations in the forward pass graph. Offload operations are placed after the last effective use of each tensor marked for offloading. This ensures the tensor is no longer needed on the GPU before transferring it to CPU memory. NOTE: An alternative approach would offload tensors immediately after generation to maximize compute-communication overlap. However, this requires additional synchronization to ensure tensor deletion (which occurs on the default stream) waits for the asynchronous offload operation to complete. This would necessitate more complex tracking to separate operation scheduling from memory cleanup. Args: graph: The forward graph to modify
python
torch/_functorch/_activation_offloading/activation_offloading.py
75
[ "graph" ]
None
true
8
6.64
pytorch/pytorch
96,034
google
false
parse
private static long parse(final String initialInput, final String normalized, final String suffix, String settingName) { final String s = normalized.substring(0, normalized.length() - suffix.length()).trim(); try { final long value = Long.parseLong(s); if (value < -1) { // -1 is magic, but reject any other negative values throw new IllegalArgumentException( "failed to parse setting [" + settingName + "] with value [" + initialInput + "] as a time value: negative durations are not supported" ); } return value; } catch (final NumberFormatException e) { try { @SuppressWarnings("unused") final double ignored = Double.parseDouble(s); throw new IllegalArgumentException("failed to parse [" + initialInput + "], fractional time values are not supported", e); } catch (final NumberFormatException ignored) { throw new IllegalArgumentException("failed to parse [" + initialInput + "]", e); } } }
@param sValue Value to parse, which may be {@code null}. @param defaultValue Value to return if {@code sValue} is {@code null}. @param settingName Name of the parameter or setting. On invalid input, this value is included in the exception message. Otherwise, this parameter is unused. @return The {@link TimeValue} which the input string represents, or {@code defaultValue} if the input is {@code null}.
java
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
409
[ "initialInput", "normalized", "suffix", "settingName" ]
true
4
8.24
elastic/elasticsearch
75,680
javadoc
false
disconnected
public void disconnected(String id, long now) { NodeConnectionState nodeState = nodeState(id); nodeState.lastConnectAttemptMs = now; updateReconnectBackoff(nodeState); if (nodeState.state == ConnectionState.CONNECTING) { updateConnectionSetupTimeout(nodeState); connectingNodes.remove(id); } else { resetConnectionSetupTimeout(nodeState); if (nodeState.state.isConnected()) { // If a connection had previously been established, clear the addresses to trigger a new DNS resolution // because the node IPs may have changed nodeState.clearAddresses(); } } nodeState.state = ConnectionState.DISCONNECTED; }
Enter the disconnected state for the given node. @param id the connection we have disconnected @param now the current time in ms
java
clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
181
[ "id", "now" ]
void
true
3
7.2
apache/kafka
31,560
javadoc
false
dateTimeFormat
public DateTimeFormatters dateTimeFormat(@Nullable String pattern) { this.dateTimeFormatter = isIso(pattern) ? DateTimeFormatter.ISO_LOCAL_DATE_TIME : (isIsoOffset(pattern) ? DateTimeFormatter.ISO_OFFSET_DATE_TIME : formatter(pattern)); return this; }
Configures the date-time format using the given {@code pattern}. @param pattern the pattern for formatting date-times @return {@code this} for chained method invocation
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/format/DateTimeFormatters.java
76
[ "pattern" ]
DateTimeFormatters
true
3
7.76
spring-projects/spring-boot
79,428
javadoc
false
serializeParams
function serializeParams(params: unknown): string { return toUrlParams(params, false); }
Converts params into a URL-encoded query string. @param params data to serialize @returns A URL-encoded string representing the provided data.
typescript
packages/grafana-data/src/utils/url.ts
106
[ "params" ]
true
1
6.32
grafana/grafana
71,362
jsdoc
false
create_pidlock
def create_pidlock(pidfile): """Create and verify pidfile. If the pidfile already exists the program exits with an error message, however if the process it refers to isn't running anymore, the pidfile is deleted and the program continues. This function will automatically install an :mod:`atexit` handler to release the lock at exit, you can skip this by calling :func:`_create_pidlock` instead. Returns: Pidfile: used to manage the lock. Example: >>> pidlock = create_pidlock('/var/run/app.pid') """ pidlock = _create_pidlock(pidfile) atexit.register(pidlock.release) return pidlock
Create and verify pidfile. If the pidfile already exists the program exits with an error message, however if the process it refers to isn't running anymore, the pidfile is deleted and the program continues. This function will automatically install an :mod:`atexit` handler to release the lock at exit, you can skip this by calling :func:`_create_pidlock` instead. Returns: Pidfile: used to manage the lock. Example: >>> pidlock = create_pidlock('/var/run/app.pid')
python
celery/platforms.py
244
[ "pidfile" ]
false
1
7.52
celery/celery
27,741
unknown
false
isExcludedFromDependencyCheck
public static boolean isExcludedFromDependencyCheck(PropertyDescriptor pd) { Method wm = pd.getWriteMethod(); if (wm == null) { return false; } if (!wm.getDeclaringClass().getName().contains("$$")) { // Not a CGLIB method so it's OK. return false; } // It was declared by CGLIB, but we might still want to autowire it // if it was actually declared by the superclass. Class<?> superclass = wm.getDeclaringClass().getSuperclass(); return !ClassUtils.hasMethod(superclass, wm); }
Determine whether the given bean property is excluded from dependency checks. <p>This implementation excludes properties defined by CGLIB. @param pd the PropertyDescriptor of the bean property @return whether the bean property is excluded
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AutowireUtils.java
92
[ "pd" ]
true
3
7.2
spring-projects/spring-framework
59,386
javadoc
false
findIndex
function findIndex(array, predicate, fromIndex) { var length = array == null ? 0 : array.length; if (!length) { return -1; } var index = fromIndex == null ? 0 : toInteger(fromIndex); if (index < 0) { index = nativeMax(length + index, 0); } return baseFindIndex(array, getIteratee(predicate, 3), index); }
This method is like `_.find` except that it returns the index of the first element `predicate` returns truthy for instead of the element itself. @static @memberOf _ @since 1.1.0 @category Array @param {Array} array The array to inspect. @param {Function} [predicate=_.identity] The function invoked per iteration. @param {number} [fromIndex=0] The index to search from. @returns {number} Returns the index of the found element, else `-1`. @example var users = [ { 'user': 'barney', 'active': false }, { 'user': 'fred', 'active': false }, { 'user': 'pebbles', 'active': true } ]; _.findIndex(users, function(o) { return o.user == 'barney'; }); // => 0 // The `_.matches` iteratee shorthand. _.findIndex(users, { 'user': 'fred', 'active': false }); // => 1 // The `_.matchesProperty` iteratee shorthand. _.findIndex(users, ['active', false]); // => 0 // The `_.property` iteratee shorthand. _.findIndex(users, 'active'); // => 2
javascript
lodash.js
7,352
[ "array", "predicate", "fromIndex" ]
false
5
7.52
lodash/lodash
61,490
jsdoc
false