function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
get_git_log_command
def get_git_log_command( verbose: bool, from_commit: str | None = None, to_commit: str | None = None ) -> list[str]: """ Get git command to run for the current repo from the current folder (which is the package folder). :param verbose: whether to print verbose info while getting the command :param from_commit: if present - base commit from which to start the log from :param to_commit: if present - final commit which should be the start of the log :return: git command to run """ git_cmd = [ "git", "log", "--pretty=format:%H %h %cd %s", "--date=short", ] if from_commit and to_commit: git_cmd.append(f"{from_commit}...{to_commit}") elif from_commit: git_cmd.append(from_commit) git_cmd.extend(["--", "."]) if verbose: console.print(f"Command to run: '{' '.join(git_cmd)}'") return git_cmd
Get git command to run for the current repo from the current folder (which is the package folder). :param verbose: whether to print verbose info while getting the command :param from_commit: if present - base commit from which to start the log from :param to_commit: if present - final commit which should be the start of the log :return: git command to run
python
dev/assign_cherry_picked_prs_with_milestone.py
170
[ "verbose", "from_commit", "to_commit" ]
list[str]
true
5
8.08
apache/airflow
43,597
sphinx
false
wait_for_crawler_completion
def wait_for_crawler_completion(self, crawler_name: str, poll_interval: int = 5) -> str: """ Wait until Glue crawler completes; returns the status of the latest crawl or raises AirflowException. :param crawler_name: unique crawler name per AWS account :param poll_interval: Time (in seconds) to wait between two consecutive calls to check crawler status :return: Crawler's status """ self.get_waiter("crawler_ready").wait(Name=crawler_name, WaiterConfig={"Delay": poll_interval}) # query one extra time to log some info crawler = self.get_crawler(crawler_name) self.log.info("crawler_config: %s", crawler) crawler_status = crawler["LastCrawl"]["Status"] metrics_response = self.glue_client.get_crawler_metrics(CrawlerNameList=[crawler_name]) metrics = metrics_response["CrawlerMetricsList"][0] self.log.info("Status: %s", crawler_status) self.log.info("Last Runtime Duration (seconds): %s", metrics["LastRuntimeSeconds"]) self.log.info("Median Runtime Duration (seconds): %s", metrics["MedianRuntimeSeconds"]) self.log.info("Tables Created: %s", metrics["TablesCreated"]) self.log.info("Tables Updated: %s", metrics["TablesUpdated"]) self.log.info("Tables Deleted: %s", metrics["TablesDeleted"]) return crawler_status
Wait until Glue crawler completes; returns the status of the latest crawl or raises AirflowException. :param crawler_name: unique crawler name per AWS account :param poll_interval: Time (in seconds) to wait between two consecutive calls to check crawler status :return: Crawler's status
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_crawler.py
173
[ "self", "crawler_name", "poll_interval" ]
str
true
1
6.4
apache/airflow
43,597
sphinx
false
newProxy
<T> T newProxy(T target, Class<T> interfaceType, long timeoutDuration, TimeUnit timeoutUnit);
Returns an instance of {@code interfaceType} that delegates all method calls to the {@code target} object, enforcing the specified time limit on each call. This time-limited delegation is also performed for calls to {@link Object#equals}, {@link Object#hashCode}, and {@link Object#toString}. <p>If the target method call finishes before the limit is reached, the return value or exception is propagated to the caller exactly as-is. If, on the other hand, the time limit is reached, the proxy will attempt to abort the call to the target, and will throw an {@link UncheckedTimeoutException} to the caller. <p>It is important to note that the primary purpose of the proxy object is to return control to the caller when the timeout elapses; aborting the target method call is of secondary concern. The particular nature and strength of the guarantees made by the proxy is implementation-dependent. However, it is important that each of the methods on the target object behaves appropriately when its thread is interrupted. <p>For example, to return the value of {@code target.someMethod()}, but substitute {@code DEFAULT_VALUE} if this method call takes over 50 ms, you can use this code: <pre> TimeLimiter limiter = . . .; TargetType proxy = limiter.newProxy( target, TargetType.class, 50, TimeUnit.MILLISECONDS); try { return proxy.someMethod(); } catch (UncheckedTimeoutException e) { return DEFAULT_VALUE; } </pre> @param target the object to proxy @param interfaceType the interface you wish the returned proxy to implement @param timeoutDuration with timeoutUnit, the maximum length of time that callers are willing to wait on each method call to the proxy @param timeoutUnit with timeoutDuration, the maximum length of time that callers are willing to wait on each method call to the proxy @return a time-limiting proxy @throws IllegalArgumentException if {@code interfaceType} is a regular class, enum, or annotation type, rather than an interface
java
android/guava/src/com/google/common/util/concurrent/TimeLimiter.java
81
[ "target", "interfaceType", "timeoutDuration", "timeoutUnit" ]
T
true
1
6.32
google/guava
51,352
javadoc
false
argmax
def argmax( self, axis: AxisInt | None = None, skipna: bool = True, *args, **kwargs ) -> int: """ Return int position of the {value} value in the Series. If the {op}imum is achieved in multiple locations, the first row position is returned. Parameters ---------- axis : {{None}} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` and there is an NA value, this method will raise a ``ValueError``. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- int Row position of the {op}imum value. See Also -------- Series.arg{op} : Return position of the {op}imum value. Series.arg{oppose} : Return position of the {oppose}imum value. numpy.ndarray.arg{op} : Equivalent method for numpy arrays. Series.idxmax : Return index label of the maximum values. Series.idxmin : Return index label of the minimum values. Examples -------- Consider dataset containing cereal calories >>> s = pd.Series( ... [100.0, 110.0, 120.0, 110.0], ... index=[ ... "Corn Flakes", ... "Almond Delight", ... "Cinnamon Toast Crunch", ... "Cocoa Puff", ... ], ... ) >>> s Corn Flakes 100.0 Almond Delight 110.0 Cinnamon Toast Crunch 120.0 Cocoa Puff 110.0 dtype: float64 >>> s.argmax() np.int64(2) >>> s.argmin() np.int64(0) The maximum cereal calories is the third element and the minimum cereal calories is the first element, since series is zero-indexed. """ delegate = self._values nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): return delegate.argmax(skipna=skipna) else: result = nanops.nanargmax(delegate, skipna=skipna) # error: Incompatible return value type (got "Union[int, ndarray]", expected # "int") return result # type: ignore[return-value]
Return int position of the {value} value in the Series. If the {op}imum is achieved in multiple locations, the first row position is returned. Parameters ---------- axis : {{None}} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, or if ``skipna=False`` and there is an NA value, this method will raise a ``ValueError``. *args, **kwargs Additional arguments and keywords for compatibility with NumPy. Returns ------- int Row position of the {op}imum value. See Also -------- Series.arg{op} : Return position of the {op}imum value. Series.arg{oppose} : Return position of the {oppose}imum value. numpy.ndarray.arg{op} : Equivalent method for numpy arrays. Series.idxmax : Return index label of the maximum values. Series.idxmin : Return index label of the minimum values. Examples -------- Consider dataset containing cereal calories >>> s = pd.Series( ... [100.0, 110.0, 120.0, 110.0], ... index=[ ... "Corn Flakes", ... "Almond Delight", ... "Cinnamon Toast Crunch", ... "Cocoa Puff", ... ], ... ) >>> s Corn Flakes 100.0 Almond Delight 110.0 Cinnamon Toast Crunch 120.0 Cocoa Puff 110.0 dtype: float64 >>> s.argmax() np.int64(2) >>> s.argmin() np.int64(0) The maximum cereal calories is the third element and the minimum cereal calories is the first element, since series is zero-indexed.
python
pandas/core/base.py
754
[ "self", "axis", "skipna" ]
int
true
3
8.4
pandas-dev/pandas
47,362
numpy
false
unifyCollisionChars
public static String unifyCollisionChars(String topic) { return topic.replace('.', '_'); }
Unify topic name with a period ('.') or underscore ('_'), this is only used to check collision and will not be used to really change topic name. @param topic A topic to unify @return A unified topic name
java
clients/src/main/java/org/apache/kafka/common/internals/Topic.java
95
[ "topic" ]
String
true
1
6.96
apache/kafka
31,560
javadoc
false
refreshCheckDelayElapsed
private boolean refreshCheckDelayElapsed() { if (this.refreshCheckDelay < 0) { return false; } long currentTimeMillis = System.currentTimeMillis(); if (this.lastRefreshCheck < 0 || currentTimeMillis - this.lastRefreshCheck > this.refreshCheckDelay) { // Going to perform a refresh check - update the timestamp. this.lastRefreshCheck = currentTimeMillis; logger.debug("Refresh check delay elapsed - checking whether refresh is required"); return true; } return false; }
Set the delay between refresh checks, in milliseconds. Default is -1, indicating no refresh checks at all. <p>Note that an actual refresh will only happen when {@link #requiresRefresh()} returns {@code true}.
java
spring-aop/src/main/java/org/springframework/aop/target/dynamic/AbstractRefreshableTargetSource.java
107
[]
true
4
6.88
spring-projects/spring-framework
59,386
javadoc
false
findCause
@SuppressWarnings("unchecked") protected final <E extends Throwable> @Nullable E findCause(@Nullable Throwable failure, Class<E> type) { while (failure != null) { if (type.isInstance(failure)) { return (E) failure; } failure = failure.getCause(); } return null; }
Return the cause type being handled by the analyzer. By default the class generic is used. @return the cause type
java
core/spring-boot/src/main/java/org/springframework/boot/diagnostics/AbstractFailureAnalyzer.java
63
[ "failure", "type" ]
E
true
3
7.04
spring-projects/spring-boot
79,428
javadoc
false
callHasExpired
boolean callHasExpired(Call call) { int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs); if (remainingMs < 0) return true; nextTimeoutMs = Math.min(nextTimeoutMs, remainingMs); return false; }
Check whether a call should be timed out. The remaining milliseconds until the next timeout will be updated. @param call The call. @return True if the call should be timed out.
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
1,068
[ "call" ]
true
2
8.24
apache/kafka
31,560
javadoc
false
equals
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } KafkaChannel that = (KafkaChannel) o; return id.equals(that.id); }
@return true if underlying transport has bytes remaining to be read from any underlying intermediate buffers.
java
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
479
[ "o" ]
true
4
6.72
apache/kafka
31,560
javadoc
false
contains
public static boolean contains(int[] array, int target) { for (int value : array) { if (value == target) { return true; } } return false; }
Returns {@code true} if {@code target} is present as an element anywhere in {@code array}. @param array an array of {@code int} values, possibly empty @param target a primitive {@code int} value @return {@code true} if {@code array[i] == target} for some value of {@code i}
java
android/guava/src/com/google/common/primitives/Ints.java
141
[ "array", "target" ]
true
2
8.08
google/guava
51,352
javadoc
false
contains
public static boolean contains(final short[] array, final short valueToFind) { return indexOf(array, valueToFind) != INDEX_NOT_FOUND; }
Checks if the value is in the given array. <p> The method returns {@code false} if a {@code null} array is passed in. </p> <p> If the {@code array} elements you are searching implement {@link Comparator}, consider whether it is worth using {@link Arrays#sort(short[])} and {@link Arrays#binarySearch(short[], short)}. </p> @param array the array to search. @param valueToFind the value to find. @return {@code true} if the array contains the object.
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
1,742
[ "array", "valueToFind" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
trimEnd
function trimEnd(string, chars, guard) { string = toString(string); if (string && (guard || chars === undefined)) { return string.slice(0, trimmedEndIndex(string) + 1); } if (!string || !(chars = baseToString(chars))) { return string; } var strSymbols = stringToArray(string), end = charsEndIndex(strSymbols, stringToArray(chars)) + 1; return castSlice(strSymbols, 0, end).join(''); }
Removes trailing whitespace or specified characters from `string`. @static @memberOf _ @since 4.0.0 @category String @param {string} [string=''] The string to trim. @param {string} [chars=whitespace] The characters to trim. @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. @returns {string} Returns the trimmed string. @example _.trimEnd(' abc '); // => ' abc' _.trimEnd('-_-abc-_-', '_-'); // => '-_-abc'
javascript
lodash.js
15,101
[ "string", "chars", "guard" ]
false
6
7.36
lodash/lodash
61,490
jsdoc
false
toUnescapedString
public String toUnescapedString() { return toString(false); }
Return a string representation of the path without any escaping. @return the unescaped string representation
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
827
[]
String
true
1
6.32
spring-projects/spring-boot
79,428
javadoc
false
applyInvalidArgumentTypeError
function applyInvalidArgumentTypeError(error: InvalidArgumentTypeError, args: ArgumentsRenderingTree) { const argName = error.argument.name const selection = args.arguments.getDeepSubSelectionValue(error.selectionPath)?.asObject() if (selection) { selection.getDeepFieldValue(error.argumentPath)?.markAsError() } args.addErrorMessage((colors) => { const expected = joinWithPreposition( 'or', error.argument.typeNames.map((type) => colors.green(type)), ) // TODO: print value return `Argument \`${colors.bold(argName)}\`: Invalid value provided. Expected ${expected}, provided ${colors.red( error.inferredType, )}.` }) }
Given the validation error and arguments rendering tree, applies corresponding formatting to an error tree and adds all relevant messages. @param error @param args
typescript
packages/client/src/runtime/core/errorRendering/applyValidationError.ts
418
[ "error", "args" ]
false
2
6.08
prisma/prisma
44,834
jsdoc
false
estimator_checks_generator
def estimator_checks_generator( estimator, *, legacy: bool = True, expected_failed_checks: dict[str, str] | None = None, mark: Literal["xfail", "skip", None] = None, xfail_strict: bool | None = None, ): """Iteratively yield all check callables for an estimator. This function is used by :func:`~sklearn.utils.estimator_checks.parametrize_with_checks` and :func:`~sklearn.utils.estimator_checks.check_estimator` to yield all check callables for an estimator. In most cases, these functions should be used instead. When implementing a custom equivalent, please refer to their source code to understand how `estimator_checks_generator` is intended to be used. .. versionadded:: 1.6 Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. legacy : bool, default=True Whether to include legacy checks. Over time we remove checks from this category and move them into their specific category. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : {"xfail", "skip"} or None, default=None Whether to mark the checks that are expected to fail as xfail(`pytest.mark.xfail`) or skip. Marking a test as "skip" is done via wrapping the check in a function that raises a :class:`~sklearn.exceptions.SkipTest` exception. xfail_strict : bool, default=None Whether to run checks in xfail strict mode. This option is ignored unless `mark="xfail"`. If True, checks that are expected to fail but actually pass will lead to a test failure. If False, unexpectedly passing tests will be marked as xpass. If None, the default pytest behavior is used. .. versionadded:: 1.8 Returns ------- estimator_checks_generator : generator Generator that yields (estimator, check) tuples. """ if mark == "xfail": import pytest else: pytest = None # type: ignore[assignment] name = type(estimator).__name__ # First check that the estimator is cloneable which is needed for the rest # of the checks to run yield estimator, partial(check_estimator_cloneable, name) for check in _yield_all_checks(estimator, legacy=legacy): check_with_name = partial(check, name) for check_instance in _yield_instances_for_check(check, estimator): yield _maybe_mark( check_instance, check_with_name, expected_failed_checks=expected_failed_checks, mark=mark, pytest=pytest, xfail_strict=xfail_strict, )
Iteratively yield all check callables for an estimator. This function is used by :func:`~sklearn.utils.estimator_checks.parametrize_with_checks` and :func:`~sklearn.utils.estimator_checks.check_estimator` to yield all check callables for an estimator. In most cases, these functions should be used instead. When implementing a custom equivalent, please refer to their source code to understand how `estimator_checks_generator` is intended to be used. .. versionadded:: 1.6 Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. legacy : bool, default=True Whether to include legacy checks. Over time we remove checks from this category and move them into their specific category. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : {"xfail", "skip"} or None, default=None Whether to mark the checks that are expected to fail as xfail(`pytest.mark.xfail`) or skip. Marking a test as "skip" is done via wrapping the check in a function that raises a :class:`~sklearn.exceptions.SkipTest` exception. xfail_strict : bool, default=None Whether to run checks in xfail strict mode. This option is ignored unless `mark="xfail"`. If True, checks that are expected to fail but actually pass will lead to a test failure. If False, unexpectedly passing tests will be marked as xpass. If None, the default pytest behavior is used. .. versionadded:: 1.8 Returns ------- estimator_checks_generator : generator Generator that yields (estimator, check) tuples.
python
sklearn/utils/estimator_checks.py
513
[ "estimator", "legacy", "expected_failed_checks", "mark", "xfail_strict" ]
true
5
6.48
scikit-learn/scikit-learn
64,340
numpy
false
partial_fit
def partial_fit(self, X, y=None, W=None, H=None): """Update the model using the data in `X` as a mini-batch. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once (see :ref:`scaling_strategies`). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data matrix to be decomposed. y : Ignored Not used, present here for API consistency by convention. W : array-like of shape (n_samples, n_components), default=None If `init='custom'`, it is used as initial guess for the solution. Only used for the first call to `partial_fit`. H : array-like of shape (n_components, n_features), default=None If `init='custom'`, it is used as initial guess for the solution. Only used for the first call to `partial_fit`. Returns ------- self Returns the instance itself. """ has_components = hasattr(self, "components_") X = validate_data( self, X, accept_sparse=("csr", "csc"), dtype=[np.float64, np.float32], reset=not has_components, ) if not has_components: # This instance has not been fitted yet (fit or partial_fit) self._check_params(X) _, H = self._check_w_h(X, W=W, H=H, update_H=True) self._components_numerator = H.copy() self._components_denominator = np.ones(H.shape, dtype=H.dtype) self.n_steps_ = 0 else: H = self.components_ self._minibatch_step(X, None, H, update_H=True) self.n_components_ = H.shape[0] self.components_ = H self.n_steps_ += 1 return self
Update the model using the data in `X` as a mini-batch. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once (see :ref:`scaling_strategies`). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data matrix to be decomposed. y : Ignored Not used, present here for API consistency by convention. W : array-like of shape (n_samples, n_components), default=None If `init='custom'`, it is used as initial guess for the solution. Only used for the first call to `partial_fit`. H : array-like of shape (n_components, n_features), default=None If `init='custom'`, it is used as initial guess for the solution. Only used for the first call to `partial_fit`. Returns ------- self Returns the instance itself.
python
sklearn/decomposition/_nmf.py
2,343
[ "self", "X", "y", "W", "H" ]
false
3
6.08
scikit-learn/scikit-learn
64,340
numpy
false
transformAndEmitBlock
function transformAndEmitBlock(node: Block): void { if (containsYield(node)) { transformAndEmitStatements(node.statements); } else { emitStatement(visitNode(node, visitor, isStatement)); } }
Visits an ElementAccessExpression that contains a YieldExpression. @param node The node to visit.
typescript
src/compiler/transformers/generators.ts
1,343
[ "node" ]
true
3
6.08
microsoft/TypeScript
107,154
jsdoc
false
randomDouble
public double randomDouble(final double startInclusive, final double endExclusive) { Validate.isTrue(endExclusive >= startInclusive, "Start value must be smaller or equal to end value."); Validate.isTrue(startInclusive >= 0, "Both range values must be non-negative."); if (startInclusive == endExclusive) { return startInclusive; } return startInclusive + (endExclusive - startInclusive) * random().nextDouble(); }
Generates a random double within the specified range. @param startInclusive the smallest value that can be returned, must be non-negative. @param endExclusive the upper bound (not included). @throws IllegalArgumentException if {@code startInclusive > endExclusive} or if {@code startInclusive} is negative. @return the random double @since 3.16.0
java
src/main/java/org/apache/commons/lang3/RandomUtils.java
342
[ "startInclusive", "endExclusive" ]
true
2
7.44
apache/commons-lang
2,896
javadoc
false
supportsEventType
@Override public boolean supportsEventType(ResolvableType eventType) { return (this.delegate == null || this.delegate.supportsEventType(eventType)); }
Create a SourceFilteringListener for the given event source, expecting subclasses to override the {@link #onApplicationEventInternal} method (instead of specifying a delegate listener). @param source the event source that this listener filters for, only processing events from this source
java
spring-context/src/main/java/org/springframework/context/event/SourceFilteringListener.java
77
[ "eventType" ]
true
2
6
spring-projects/spring-framework
59,386
javadoc
false
doResolveBeanClass
private @Nullable Class<?> doResolveBeanClass(RootBeanDefinition mbd, Class<?>... typesToMatch) throws ClassNotFoundException { ClassLoader beanClassLoader = getBeanClassLoader(); ClassLoader dynamicLoader = beanClassLoader; boolean freshResolve = false; if (!ObjectUtils.isEmpty(typesToMatch)) { // When just doing type checks (i.e. not creating an actual instance yet), // use the specified temporary class loader (for example, in a weaving scenario). ClassLoader tempClassLoader = getTempClassLoader(); if (tempClassLoader != null) { dynamicLoader = tempClassLoader; freshResolve = true; if (tempClassLoader instanceof DecoratingClassLoader dcl) { for (Class<?> typeToMatch : typesToMatch) { dcl.excludeClass(typeToMatch.getName()); } } } } String className = mbd.getBeanClassName(); if (className != null) { Object evaluated = evaluateBeanDefinitionString(className, mbd); if (!className.equals(evaluated)) { // A dynamically resolved expression, supported as of 4.2... if (evaluated instanceof Class<?> clazz) { return clazz; } else if (evaluated instanceof String name) { className = name; freshResolve = true; } else { throw new IllegalStateException("Invalid class name expression result: " + evaluated); } } if (freshResolve) { // When resolving against a temporary class loader, exit early in order // to avoid storing the resolved Class in the bean definition. if (dynamicLoader != null) { try { return dynamicLoader.loadClass(className); } catch (ClassNotFoundException ex) { if (logger.isTraceEnabled()) { logger.trace("Could not load class [" + className + "] from " + dynamicLoader + ": " + ex); } } } return ClassUtils.forName(className, dynamicLoader); } } // Resolve regularly, caching the result in the BeanDefinition... return mbd.resolveBeanClass(beanClassLoader); }
Resolve the bean class for the specified bean definition, resolving a bean class name into a Class reference (if necessary) and storing the resolved Class in the bean definition for further use. @param mbd the merged bean definition to determine the class for @param beanName the name of the bean (for error handling purposes) @param typesToMatch the types to match in case of internal type matching purposes (also signals that the returned {@code Class} will never be exposed to application code) @return the resolved bean class (or {@code null} if none) @throws CannotLoadBeanClassException if we failed to load the class
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
1,582
[ "mbd" ]
true
12
7.68
spring-projects/spring-framework
59,386
javadoc
false
is_lazy_array
def is_lazy_array(x: object) -> bool: """Return True if x is potentially a future or it may be otherwise impossible or expensive to eagerly read its contents, regardless of their size, e.g. by calling ``bool(x)`` or ``float(x)``. Return False otherwise; e.g. ``bool(x)`` etc. is guaranteed to succeed and to be cheap as long as the array has the right dtype and size. Note ---- This function errs on the side of caution for array types that may or may not be lazy, e.g. JAX arrays, by always returning True for them. """ # **JAX note:** while it is possible to determine if you're inside or outside # jax.jit by testing the subclass of a jax.Array object, as well as testing bool() # as we do below for unknown arrays, this is not recommended by JAX best practices. # **Dask note:** Dask eagerly computes the graph on __bool__, __float__, and so on. # This behaviour, while impossible to change without breaking backwards # compatibility, is highly detrimental to performance as the whole graph will end # up being computed multiple times. # Note: skipping reclassification of JAX zero gradient arrays, as one will # exclusively get them once they leave a jax.grad JIT context. cls = cast(Hashable, type(x)) res = _is_lazy_cls(cls) if res is not None: return res if not hasattr(x, "__array_namespace__"): return False # Unknown Array API compatible object. Note that this test may have dire consequences # in terms of performance, e.g. for a lazy object that eagerly computes the graph # on __bool__ (dask is one such example, which however is special-cased above). # Select a single point of the array s = size(cast("HasShape[Collection[SupportsIndex | None]]", x)) if s is None: return True xp = array_namespace(x) if s > 1: x = xp.reshape(x, (-1,))[0] # Cast to dtype=bool and deal with size 0 arrays x = xp.any(x) try: bool(x) return False # The Array API standard dictactes that __bool__ should raise TypeError if the # output cannot be defined. # Here we allow for it to raise arbitrary exceptions, e.g. like Dask does. except Exception: return True
Return True if x is potentially a future or it may be otherwise impossible or expensive to eagerly read its contents, regardless of their size, e.g. by calling ``bool(x)`` or ``float(x)``. Return False otherwise; e.g. ``bool(x)`` etc. is guaranteed to succeed and to be cheap as long as the array has the right dtype and size. Note ---- This function errs on the side of caution for array types that may or may not be lazy, e.g. JAX arrays, by always returning True for them.
python
sklearn/externals/array_api_compat/common/_helpers.py
973
[ "x" ]
bool
true
5
6
scikit-learn/scikit-learn
64,340
unknown
false
maybe_lift_tracked_freevar_to_input
def maybe_lift_tracked_freevar_to_input(self, arg: Any) -> Any: """ If arg is a free variable, then lift it to be an input. Returns the new lifted arg (if arg was a freevar), else the original arg. """ if not isinstance(arg, torch.fx.Proxy): # Note: arg can be a python built-in slice type e.g. # x[:max_seq] is represented as get_item(t, (slice(None, max_seq, None))) # we need to also look into the slice variable itself to lift the # proxies there. if isinstance(arg, slice): return slice( *( self.maybe_lift_tracked_freevar_to_input(sub_arg) for sub_arg in (arg.start, arg.stop, arg.step) ) ) else: return arg elif arg.tracer == self: return arg return self.lift_tracked_freevar_to_input(arg)
If arg is a free variable, then lift it to be an input. Returns the new lifted arg (if arg was a freevar), else the original arg.
python
torch/_dynamo/output_graph.py
3,502
[ "self", "arg" ]
Any
true
5
6
pytorch/pytorch
96,034
unknown
false
allequal
def allequal(a, b, fill_value=True): """ Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. Parameters ---------- a, b : array_like Input arrays to compare. fill_value : bool, optional Whether masked values in a or b are considered equal (True) or not (False). Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.ma.allclose Examples -------- >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], mask=[False, False, True], fill_value=1e+20) >>> b = np.array([1e10, 1e-7, -42.0]) >>> b array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) >>> np.ma.allequal(a, b, fill_value=False) False >>> np.ma.allequal(a, b) True """ m = mask_or(getmask(a), getmask(b)) if m is nomask: x = getdata(a) y = getdata(b) d = umath.equal(x, y) return d.all() elif fill_value: x = getdata(a) y = getdata(b) d = umath.equal(x, y) dm = array(d, mask=m, copy=False) return dm.filled(True).all(None) else: return False
Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. Parameters ---------- a, b : array_like Input arrays to compare. fill_value : bool, optional Whether masked values in a or b are considered equal (True) or not (False). Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.ma.allclose Examples -------- >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], mask=[False, False, True], fill_value=1e+20) >>> b = np.array([1e10, 1e-7, -42.0]) >>> b array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) >>> np.ma.allequal(a, b, fill_value=False) False >>> np.ma.allequal(a, b) True
python
numpy/ma/core.py
8,387
[ "a", "b", "fill_value" ]
false
4
7.6
numpy/numpy
31,054
numpy
false
skipTimeZone
static boolean skipTimeZone(final String tzId) { return tzId.equalsIgnoreCase(TimeZones.GMT_ID); }
Tests whether to skip the given time zone, true if TimeZone.getTimeZone(). <p> On Java 25 and up, skips short IDs if {@code ignoreTimeZoneShortIDs} is true. </p> <p> This method is package private only for testing. </p> @param tzId the ID to test. @return Whether to skip the given time zone ID.
java
src/main/java/org/apache/commons/lang3/time/FastDateParser.java
522
[ "tzId" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
_reorder_for_extension_array_stack
def _reorder_for_extension_array_stack( arr: ExtensionArray, n_rows: int, n_columns: int ) -> ExtensionArray: """ Re-orders the values when stacking multiple extension-arrays. The indirect stacking method used for EAs requires a followup take to get the order correct. Parameters ---------- arr : ExtensionArray n_rows, n_columns : int The number of rows and columns in the original DataFrame. Returns ------- taken : ExtensionArray The original `arr` with elements re-ordered appropriately Examples -------- >>> arr = np.array(["a", "b", "c", "d", "e", "f"]) >>> _reorder_for_extension_array_stack(arr, 2, 3) array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1') >>> _reorder_for_extension_array_stack(arr, 3, 2) array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1') """ # final take to get the order correct. # idx is an indexer like # [c0r0, c1r0, c2r0, ..., # c0r1, c1r1, c2r1, ...] idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.reshape(-1) return arr.take(idx)
Re-orders the values when stacking multiple extension-arrays. The indirect stacking method used for EAs requires a followup take to get the order correct. Parameters ---------- arr : ExtensionArray n_rows, n_columns : int The number of rows and columns in the original DataFrame. Returns ------- taken : ExtensionArray The original `arr` with elements re-ordered appropriately Examples -------- >>> arr = np.array(["a", "b", "c", "d", "e", "f"]) >>> _reorder_for_extension_array_stack(arr, 2, 3) array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='<U1') >>> _reorder_for_extension_array_stack(arr, 3, 2) array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')
python
pandas/core/reshape/reshape.py
933
[ "arr", "n_rows", "n_columns" ]
ExtensionArray
true
1
6.8
pandas-dev/pandas
47,362
numpy
false
hasActiveExternalCalls
private boolean hasActiveExternalCalls(Collection<Call> calls) { for (Call call : calls) { if (!call.isInternal()) { return true; } } return false; }
Unassign calls that have not yet been sent based on some predicate. For example, this is used to reassign the calls that have been assigned to a disconnected node. @param shouldUnassign Condition for reassignment. If the predicate is true, then the calls will be put back in the pendingCalls collection and they will be reassigned
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
1,418
[ "calls" ]
true
2
6.88
apache/kafka
31,560
javadoc
false
shuffleSelf
function shuffleSelf(array, size) { var index = -1, length = array.length, lastIndex = length - 1; size = size === undefined ? length : size; while (++index < size) { var rand = baseRandom(index, lastIndex), value = array[rand]; array[rand] = array[index]; array[index] = value; } array.length = size; return array; }
A specialized version of `_.shuffle` which mutates and sets the size of `array`. @private @param {Array} array The array to shuffle. @param {number} [size=array.length] The size of `array`. @returns {Array} Returns `array`.
javascript
lodash.js
6,814
[ "array", "size" ]
false
3
6.08
lodash/lodash
61,490
jsdoc
false
excluding
public ErrorAttributeOptions excluding(Include... excludes) { EnumSet<Include> updated = copyIncludes(); Arrays.stream(excludes).forEach(updated::remove); return new ErrorAttributeOptions(Collections.unmodifiableSet(updated)); }
Return an {@code ErrorAttributeOptions} that excludes the specified attribute {@link Include} options. @param excludes error attributes to exclude @return an {@code ErrorAttributeOptions}
java
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java
79
[]
ErrorAttributeOptions
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
get_api_items
def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper'). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = "pandas" previous_line = current_section = current_subsection = "" position = None for line in api_doc_fd: line_stripped = line.strip() if len(line_stripped) == len(previous_line): if set(line_stripped) == set("-"): current_section = previous_line continue if set(line_stripped) == set("~"): current_subsection = previous_line continue if line_stripped.startswith(".. currentmodule::"): current_module = line_stripped.replace(".. currentmodule::", "").strip() continue if line_stripped == ".. autosummary::": position = "autosummary" continue if position == "autosummary": if line_stripped == "": position = "items" continue if position == "items": if line_stripped == "": position = None continue if line_stripped in IGNORE_VALIDATION: continue func = importlib.import_module(current_module) for part in line_stripped.split("."): func = getattr(func, part) yield ( f"{current_module}.{line_stripped}", func, current_section, current_subsection, ) previous_line = line_stripped
Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper'). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located.
python
scripts/validate_docstrings.py
87
[ "api_doc_fd" ]
false
13
6
pandas-dev/pandas
47,362
numpy
false
_parse_simple_yaml
def _parse_simple_yaml(yaml_str: str) -> dict: """Simple YAML parser for basic configs (without external dependencies). Supports: - key: value pairs - booleans (true/false) - null values - integers and floats - strings (quoted and unquoted) - lists in JSON format [item1, item2, ...] - comments (lines starting with # or after #) Args: yaml_str: YAML content as string Returns: Dictionary containing parsed YAML content """ config = {} for line in yaml_str.split("\n"): # Remove comments line = line.split("#")[0].strip() if not line or ":" not in line: continue key, value = line.split(":", 1) key = key.strip() value = value.strip() # Parse value based on type if value.lower() == "true": config[key] = True elif value.lower() == "false": config[key] = False elif value.lower() in ("null", "none", ""): config[key] = None elif value.startswith("[") and value.endswith("]"): # Parse list - handle quoted strings properly pattern = r'"([^"]+)"|\'([^\']+)\'|([^,\[\]\s]+)' matches = re.findall(pattern, value[1:-1]) # Remove [ ] parsed_items = [] for match in matches: # match is a tuple of (double_quoted, single_quoted, unquoted) item = match[0] or match[1] or match[2] item = item.strip() if item: try: parsed_items.append(int(item)) except ValueError: parsed_items.append(item) config[key] = parsed_items elif value.startswith(('"', "'")): config[key] = value.strip("\"'") else: # Try to parse as number try: config[key] = int(value) except ValueError: try: config[key] = float(value) except ValueError: config[key] = value return config
Simple YAML parser for basic configs (without external dependencies). Supports: - key: value pairs - booleans (true/false) - null values - integers and floats - strings (quoted and unquoted) - lists in JSON format [item1, item2, ...] - comments (lines starting with # or after #) Args: yaml_str: YAML content as string Returns: Dictionary containing parsed YAML content
python
benchmarks/transformer/config_utils.py
70
[ "yaml_str" ]
dict
true
15
8
pytorch/pytorch
96,034
google
false
median
def median(self, numeric_only: bool = False): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None`` and defaults to False. Returns ------- Series or DataFrame Median of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 3, 4, 5], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").median() 2023-01-01 2.0 2023-02-01 4.0 Freq: MS, dtype: float64 """ return self._downsample("median", numeric_only=numeric_only)
Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts ``None`` and defaults to False. Returns ------- Series or DataFrame Median of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 3, 4, 5], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").median() 2023-01-01 2.0 2023-02-01 4.0 Freq: MS, dtype: float64
python
pandas/core/resample.py
1,429
[ "self", "numeric_only" ]
true
1
7.12
pandas-dev/pandas
47,362
numpy
false
getTypeForFactoryBean
protected @Nullable Class<?> getTypeForFactoryBean(FactoryBean<?> factoryBean) { try { return factoryBean.getObjectType(); } catch (Throwable ex) { // Thrown from the FactoryBean's getObjectType implementation. logger.info("FactoryBean threw exception from getObjectType, despite the contract saying " + "that it should return null if the type of its object cannot be determined yet", ex); return null; } }
Determine the type for the given FactoryBean. @param factoryBean the FactoryBean instance to check @return the FactoryBean's object type, or {@code null} if the type cannot be determined yet
java
spring-beans/src/main/java/org/springframework/beans/factory/support/FactoryBeanRegistrySupport.java
55
[ "factoryBean" ]
true
2
7.92
spring-projects/spring-framework
59,386
javadoc
false
load
static @Nullable PemContent load(@Nullable String content, ResourceLoader resourceLoader) throws IOException { if (!StringUtils.hasLength(content)) { return null; } if (isPresentInText(content)) { return new PemContent(content); } try (InputStream in = resourceLoader.getResource(content).getInputStream()) { return load(in); } catch (IOException | UncheckedIOException ex) { throw new IOException("Error reading certificate or key from file '%s'".formatted(content), ex); } }
Load {@link PemContent} from the given content (either the PEM content itself or a reference to the resource to load). @param content the content to load @param resourceLoader the resource loader used to load content @return a new {@link PemContent} instance or {@code null} @throws IOException on IO error
java
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemContent.java
119
[ "content", "resourceLoader" ]
PemContent
true
4
7.76
spring-projects/spring-boot
79,428
javadoc
false
createWithExpectedSize
public static <E extends @Nullable Object> CompactLinkedHashSet<E> createWithExpectedSize( int expectedSize) { return new CompactLinkedHashSet<>(expectedSize); }
Creates a {@code CompactLinkedHashSet} instance, with a high enough "initial capacity" that it <i>should</i> hold {@code expectedSize} elements without rebuilding internal data structures. @param expectedSize the number of elements you expect to add to the returned set @return a new, empty {@code CompactLinkedHashSet} with enough capacity to hold {@code expectedSize} elements without resizing @throws IllegalArgumentException if {@code expectedSize} is negative
java
android/guava/src/com/google/common/collect/CompactLinkedHashSet.java
95
[ "expectedSize" ]
true
1
6
google/guava
51,352
javadoc
false
ensureExclusiveFields
private static void ensureExclusiveFields(List<List<String>> exclusiveFields) { StringBuilder message = null; for (List<String> fieldset : exclusiveFields) { if (fieldset.size() > 1) { if (message == null) { message = new StringBuilder(); } message.append("The following fields are not allowed together: ").append(fieldset).append(" "); } } if (message != null && message.length() > 0) { throw new IllegalArgumentException(message.toString()); } }
Parses a Value from the given {@link XContentParser} @param parser the parser to build a value from @param value the value to fill from the parser @param context a context that is passed along to all declared field parsers @return the parsed value @throws IOException if an IOException occurs.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
345
[ "exclusiveFields" ]
void
true
5
7.6
elastic/elasticsearch
75,680
javadoc
false
fromInteger
public static Inet4Address fromInteger(int address) { return getInet4Address(Ints.toByteArray(address)); }
Returns an Inet4Address having the integer value specified by the argument. @param address {@code int}, the 32bit integer address to be converted @return {@link Inet4Address} equivalent of the argument
java
android/guava/src/com/google/common/net/InetAddresses.java
1,082
[ "address" ]
Inet4Address
true
1
6.64
google/guava
51,352
javadoc
false
checkStrictModePostfixUnaryExpression
function checkStrictModePostfixUnaryExpression(node: PostfixUnaryExpression) { // Grammar checking // The identifier eval or arguments may not appear as the LeftHandSideExpression of an // Assignment operator(11.13) or of a PostfixExpression(11.3) or as the UnaryExpression // operated upon by a Prefix Increment(11.4.4) or a Prefix Decrement(11.4.5) operator. if (inStrictMode) { checkStrictModeEvalOrArguments(node, node.operand as Identifier); } }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
2,710
[ "node" ]
false
2
6.08
microsoft/TypeScript
107,154
jsdoc
false
asWriter
public static Writer asWriter(Appendable target) { if (target instanceof Writer) { return (Writer) target; } return new AppendableWriter(target); }
Returns a Writer that sends all output to the given {@link Appendable} target. Closing the writer will close the target if it is {@link Closeable}, and flushing the writer will flush the target if it is {@link java.io.Flushable}. @param target the object to which output will be sent @return a new Writer object, unless target is a Writer, in which case the target is returned
java
android/guava/src/com/google/common/io/CharStreams.java
359
[ "target" ]
Writer
true
2
8.24
google/guava
51,352
javadoc
false
adapt
@Nullable R adapt(T value);
Adapt the given value. @param value the value to adapt @return an adapted value or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
386
[ "value" ]
R
true
1
6.8
spring-projects/spring-boot
79,428
javadoc
false
toRootLowerCase
public static String toRootLowerCase(final String source) { return source == null ? null : source.toLowerCase(Locale.ROOT); }
Converts the given source String as a lower-case using the {@link Locale#ROOT} locale in a null-safe manner. @param source A source String or null. @return the given source String as a lower-case using the {@link Locale#ROOT} locale or null. @since 3.10
java
src/main/java/org/apache/commons/lang3/StringUtils.java
8,672
[ "source" ]
String
true
2
8.16
apache/commons-lang
2,896
javadoc
false
matchesProperty
public static boolean matchesProperty(String registeredPath, String propertyPath) { if (!registeredPath.startsWith(propertyPath)) { return false; } if (registeredPath.length() == propertyPath.length()) { return true; } if (registeredPath.charAt(propertyPath.length()) != PropertyAccessor.PROPERTY_KEY_PREFIX_CHAR) { return false; } return (registeredPath.indexOf(PropertyAccessor.PROPERTY_KEY_SUFFIX_CHAR, propertyPath.length() + 1) == registeredPath.length() - 1); }
Determine whether the given registered path matches the given property path, either indicating the property itself or an indexed element of the property. @param propertyPath the property path (typically without index) @param registeredPath the registered path (potentially with index) @return whether the paths match
java
spring-beans/src/main/java/org/springframework/beans/PropertyAccessorUtils.java
120
[ "registeredPath", "propertyPath" ]
true
4
7.28
spring-projects/spring-framework
59,386
javadoc
false
registerCandidateTypeForIncludeFilter
private void registerCandidateTypeForIncludeFilter(String className, TypeFilter filter) { if (this.componentsIndex != null) { if (filter instanceof AnnotationTypeFilter annotationTypeFilter) { Class<? extends Annotation> annotationType = annotationTypeFilter.getAnnotationType(); if (isStereotypeAnnotationForIndex(annotationType)) { this.componentsIndex.registerCandidateType(className, annotationType.getName()); } } else if (filter instanceof AssignableTypeFilter assignableTypeFilter) { Class<?> target = assignableTypeFilter.getTargetType(); if (AnnotationUtils.isAnnotationDeclaredLocally(Indexed.class, target)) { this.componentsIndex.registerCandidateType(className, target.getName()); } } } }
Register the given class as a candidate type with the runtime-populated index, if any. @param className the fully-qualified class name of the candidate type @param filter the include filter to introspect for the associated stereotype
java
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
364
[ "className", "filter" ]
void
true
6
6.4
spring-projects/spring-framework
59,386
javadoc
false
resolveAddresses
private void resolveAddresses() throws UnknownHostException { // (Re-)initialize list addresses = ClientUtils.resolve(host, hostResolver); if (log.isDebugEnabled()) { log.debug("Resolved host {} to addresses {}", host, addresses); } addressIndex = 0; // We re-resolve DNS after disconnecting, but we don't want to immediately reconnect to the address we // just disconnected from, in case we disconnected due to a problem with that IP (such as a load // balancer instance failure). Check the first address in the list and skip it if it was the last address // we tried and there are multiple addresses to choose from. if (addresses.size() > 1 && addresses.get(addressIndex).equals(lastAttemptedAddress)) { addressIndex++; } }
Jumps to the next available resolved address for this node. If no other addresses are available, marks the list to be refreshed on the next {@link #currentAddress()} call.
java
clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
534
[]
void
true
4
6.88
apache/kafka
31,560
javadoc
false
patch_dynamo_config
def patch_dynamo_config( arg1: Optional[Union[str, dict[str, Any], tuple[tuple[str, Any], ...]]] = None, arg2: Any = None, **kwargs: Any, ) -> DynamoConfigPatchProxy: """ A wrapper around torch._dynamo.config.patch that can be traced by Dynamo to temporarily change config values DURING tracing. See _allowed_config_patches for the list of allowed config patches. Arguments are the same as with torch._dynamo.config.patch. Can be used as a decorator or a context manager. User code SHOULD NOT MODIFY the return value of this function. WARNING: changing Dynamo config during tracing can lead to unpredictable tracing behavior! Proceed only as advised! """ if isinstance(arg1, tuple): arg1 = dict(arg1) config_patch = torch._dynamo.config.patch(arg1, arg2, **kwargs) _patch_dynamo_config_check(config_patch.changes) # check for valid patching using config_patch.changes return DynamoConfigPatchProxy(config_patch)
A wrapper around torch._dynamo.config.patch that can be traced by Dynamo to temporarily change config values DURING tracing. See _allowed_config_patches for the list of allowed config patches. Arguments are the same as with torch._dynamo.config.patch. Can be used as a decorator or a context manager. User code SHOULD NOT MODIFY the return value of this function. WARNING: changing Dynamo config during tracing can lead to unpredictable tracing behavior! Proceed only as advised!
python
torch/_dynamo/decorators.py
936
[ "arg1", "arg2" ]
DynamoConfigPatchProxy
true
2
6.72
pytorch/pytorch
96,034
unknown
false
fit
def fit(self, X, y=None): """Learn the idf vector (global term weights). Parameters ---------- X : sparse matrix of shape (n_samples, n_features) A matrix of term/token counts. y : None This parameter is not needed to compute tf-idf. Returns ------- self : object Fitted transformer. """ # large sparse data is not supported for 32bit platforms because # _document_frequency uses np.bincount which works on arrays of # dtype NPY_INTP which is int32 for 32bit platforms. See #20923 X = validate_data( self, X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT ) if not sp.issparse(X): X = sp.csr_matrix(X) dtype = X.dtype if X.dtype in (np.float64, np.float32) else np.float64 if self.use_idf: n_samples, _ = X.shape df = _document_frequency(X) df = df.astype(dtype, copy=False) # perform idf smoothing if required df += float(self.smooth_idf) n_samples += int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. # Force the dtype of `idf_` to be the same as `df`. In NumPy < 2, the dtype # was depending on the value of `n_samples`. self.idf_ = np.full_like(df, fill_value=n_samples, dtype=dtype) self.idf_ /= df # `np.log` preserves the dtype of `df` and thus `dtype`. np.log(self.idf_, out=self.idf_) self.idf_ += 1.0 return self
Learn the idf vector (global term weights). Parameters ---------- X : sparse matrix of shape (n_samples, n_features) A matrix of term/token counts. y : None This parameter is not needed to compute tf-idf. Returns ------- self : object Fitted transformer.
python
sklearn/feature_extraction/text.py
1,645
[ "self", "X", "y" ]
false
4
6.24
scikit-learn/scikit-learn
64,340
numpy
false
get_bin_seeds
def get_bin_seeds(X, bin_size, min_bin_freq=1): """Find seeds for mean_shift. Finds seeds by first binning data onto a grid whose lines are spaced bin_size apart, and then choosing those bins with at least min_bin_freq points. Parameters ---------- X : array-like of shape (n_samples, n_features) Input points, the same points that will be used in mean_shift. bin_size : float Controls the coarseness of the binning. Smaller values lead to more seeding (which is computationally more expensive). If you're not sure how to set this, set it to the value of the bandwidth used in clustering.mean_shift. min_bin_freq : int, default=1 Only bins with at least min_bin_freq will be selected as seeds. Raising this value decreases the number of seeds found, which makes mean_shift computationally cheaper. Returns ------- bin_seeds : array-like of shape (n_samples, n_features) Points used as initial kernel positions in clustering.mean_shift. """ if bin_size == 0: return X # Bin points bin_sizes = defaultdict(int) for point in X: binned_point = np.round(point / bin_size) bin_sizes[tuple(binned_point)] += 1 # Select only those bins as seeds which have enough members bin_seeds = np.array( [point for point, freq in bin_sizes.items() if freq >= min_bin_freq], dtype=np.float32, ) if len(bin_seeds) == len(X): warnings.warn( "Binning data failed with provided bin_size=%f, using data points as seeds." % bin_size ) return X bin_seeds = bin_seeds * bin_size return bin_seeds
Find seeds for mean_shift. Finds seeds by first binning data onto a grid whose lines are spaced bin_size apart, and then choosing those bins with at least min_bin_freq points. Parameters ---------- X : array-like of shape (n_samples, n_features) Input points, the same points that will be used in mean_shift. bin_size : float Controls the coarseness of the binning. Smaller values lead to more seeding (which is computationally more expensive). If you're not sure how to set this, set it to the value of the bandwidth used in clustering.mean_shift. min_bin_freq : int, default=1 Only bins with at least min_bin_freq will be selected as seeds. Raising this value decreases the number of seeds found, which makes mean_shift computationally cheaper. Returns ------- bin_seeds : array-like of shape (n_samples, n_features) Points used as initial kernel positions in clustering.mean_shift.
python
sklearn/cluster/_mean_shift.py
247
[ "X", "bin_size", "min_bin_freq" ]
false
4
6.24
scikit-learn/scikit-learn
64,340
numpy
false
appendIfMissing
@Deprecated public static String appendIfMissing(final String str, final CharSequence suffix, final CharSequence... suffixes) { return Strings.CS.appendIfMissing(str, suffix, suffixes); }
Appends the suffix to the end of the string if the string does not already end with any of the suffixes. <pre> StringUtils.appendIfMissing(null, null) = null StringUtils.appendIfMissing("abc", null) = "abc" StringUtils.appendIfMissing("", "xyz" = "xyz" StringUtils.appendIfMissing("abc", "xyz") = "abcxyz" StringUtils.appendIfMissing("abcxyz", "xyz") = "abcxyz" StringUtils.appendIfMissing("abcXYZ", "xyz") = "abcXYZxyz" </pre> <p> With additional suffixes, </p> <pre> StringUtils.appendIfMissing(null, null, null) = null StringUtils.appendIfMissing("abc", null, null) = "abc" StringUtils.appendIfMissing("", "xyz", null) = "xyz" StringUtils.appendIfMissing("abc", "xyz", new CharSequence[]{null}) = "abcxyz" StringUtils.appendIfMissing("abc", "xyz", "") = "abc" StringUtils.appendIfMissing("abc", "xyz", "mno") = "abcxyz" StringUtils.appendIfMissing("abcxyz", "xyz", "mno") = "abcxyz" StringUtils.appendIfMissing("abcmno", "xyz", "mno") = "abcmno" StringUtils.appendIfMissing("abcXYZ", "xyz", "mno") = "abcXYZxyz" StringUtils.appendIfMissing("abcMNO", "xyz", "mno") = "abcMNOxyz" </pre> @param str The string. @param suffix The suffix to append to the end of the string. @param suffixes Additional suffixes that are valid terminators. @return A new String if suffix was appended, the same string otherwise. @since 3.2 @deprecated Use {@link Strings#appendIfMissing(String, CharSequence, CharSequence...) Strings.CS.appendIfMissing(String, CharSequence, CharSequence...)}.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
464
[ "str", "suffix" ]
String
true
1
6.32
apache/commons-lang
2,896
javadoc
false
getPropertyDescriptor
@Override public PropertyDescriptor getPropertyDescriptor(String propertyName) throws InvalidPropertyException { BeanWrapperImpl nestedBw = (BeanWrapperImpl) getPropertyAccessorForPropertyPath(propertyName); String finalPath = getFinalPath(nestedBw, propertyName); PropertyDescriptor pd = nestedBw.getCachedIntrospectionResults().getPropertyDescriptor(finalPath); if (pd == null) { throw new InvalidPropertyException(getRootClass(), getNestedPath() + propertyName, "No property '" + propertyName + "' found"); } return pd; }
Convert the given value for the specified property to the latter's type. <p>This method is only intended for optimizations in a BeanFactory. Use the {@code convertIfNecessary} methods for programmatic conversion. @param value the value to convert @param propertyName the target property (note that nested or indexed properties are not supported here) @return the new value, possibly the result of type conversion @throws TypeMismatchException if type conversion failed
java
spring-beans/src/main/java/org/springframework/beans/BeanWrapperImpl.java
214
[ "propertyName" ]
PropertyDescriptor
true
2
7.44
spring-projects/spring-framework
59,386
javadoc
false
toArray
public static boolean[] toArray(Collection<Boolean> collection) { if (collection instanceof BooleanArrayAsList) { return ((BooleanArrayAsList) collection).toBooleanArray(); } Object[] boxedArray = collection.toArray(); int len = boxedArray.length; boolean[] array = new boolean[len]; for (int i = 0; i < len; i++) { // checkNotNull for GWT (do not optimize) array[i] = (Boolean) checkNotNull(boxedArray[i]); } return array; }
Copies a collection of {@code Boolean} instances into a new array of primitive {@code boolean} values. <p>Elements are copied from the argument collection as if by {@code collection.toArray()}. Calling this method is as thread-safe as calling that method. <p><b>Note:</b> consider representing the collection as a {@link java.util.BitSet} instead. @param collection a collection of {@code Boolean} objects @return an array containing the same values as {@code collection}, in the same order, converted to primitives @throws NullPointerException if {@code collection} or any of its elements is null
java
android/guava/src/com/google/common/primitives/Booleans.java
355
[ "collection" ]
true
3
7.6
google/guava
51,352
javadoc
false
createExportExpression
function createExportExpression(name: ModuleExportName, value: Expression, location?: TextRange, liveBinding?: boolean) { return setTextRange( liveBinding ? factory.createCallExpression( factory.createPropertyAccessExpression( factory.createIdentifier("Object"), "defineProperty", ), /*typeArguments*/ undefined, [ factory.createIdentifier("exports"), factory.createStringLiteralFromNode(name), factory.createObjectLiteralExpression([ factory.createPropertyAssignment("enumerable", factory.createTrue()), factory.createPropertyAssignment( "get", factory.createFunctionExpression( /*modifiers*/ undefined, /*asteriskToken*/ undefined, /*name*/ undefined, /*typeParameters*/ undefined, /*parameters*/ [], /*type*/ undefined, factory.createBlock([factory.createReturnStatement(value)]), ), ), ]), ], ) : factory.createAssignment( name.kind === SyntaxKind.StringLiteral ? factory.createElementAccessExpression( factory.createIdentifier("exports"), factory.cloneNode(name), ) : factory.createPropertyAccessExpression( factory.createIdentifier("exports"), factory.cloneNode(name), ), value, ), location, ); }
Creates a call to the current file's export function to export a value. @param name The bound name of the export. @param value The exported value. @param location The location to use for source maps and comments for the export.
typescript
src/compiler/transformers/module/module.ts
2,185
[ "name", "value", "location?", "liveBinding?" ]
false
3
6.24
microsoft/TypeScript
107,154
jsdoc
false
_wrap_aggregated_output
def _wrap_aggregated_output( self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None = None, ): """ Wraps the output of GroupBy aggregations into the expected result. Parameters ---------- result : Series, DataFrame Returns ------- Series or DataFrame """ # ATM we do not get here for SeriesGroupBy; when we do, we will # need to require that result.name already match self.obj.name if not self.as_index: # `not self.as_index` is only relevant for DataFrameGroupBy, # enforced in __init__ result = self._insert_inaxis_grouper(result, qs=qs) result = result._consolidate() result.index = default_index(len(result)) else: index = self._grouper.result_index if qs is not None: # We get here with len(qs) != 1 and not self.as_index # in test_pass_args_kwargs index = _insert_quantile_level(index, qs) result.index = index return result
Wraps the output of GroupBy aggregations into the expected result. Parameters ---------- result : Series, DataFrame Returns ------- Series or DataFrame
python
pandas/core/groupby/groupby.py
1,306
[ "self", "result", "qs" ]
true
4
6.24
pandas-dev/pandas
47,362
numpy
false
flatten_descr
def flatten_descr(ndtype): """ Flatten a structured data-type description. Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])]) >>> rfn.flatten_descr(ndtype) (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) """ names = ndtype.names if names is None: return (('', ndtype),) else: descr = [] for field in names: (typ, _) = ndtype.fields[field] if typ.names is not None: descr.extend(flatten_descr(typ)) else: descr.append((field, typ)) return tuple(descr)
Flatten a structured data-type description. Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])]) >>> rfn.flatten_descr(ndtype) (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
python
numpy/lib/recfunctions.py
169
[ "ndtype" ]
false
6
6.32
numpy/numpy
31,054
unknown
false
extractTopicPartition
public static <K, V> TopicPartition extractTopicPartition(ProducerRecord<K, V> record) { return new TopicPartition(record.topic(), record.partition() == null ? RecordMetadata.UNKNOWN_PARTITION : record.partition()); }
This method is called when sending the record fails in {@link ProducerInterceptor#onSend (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception, Headers)} method for each interceptor @param record The record from client @param interceptTopicPartition The topic/partition for the record if an error occurred after partition gets assigned; the topic part of interceptTopicPartition is the same as in record. @param exception The exception thrown during processing of this record.
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java
140
[ "record" ]
TopicPartition
true
2
6.16
apache/kafka
31,560
javadoc
false
cluster_status
def cluster_status(self, cluster_identifier: str) -> str | None: """ Get status of a cluster. .. seealso:: - :external+boto3:py:meth:`Redshift.Client.describe_clusters` :param cluster_identifier: unique identifier of a cluster """ try: response = self.conn.describe_clusters(ClusterIdentifier=cluster_identifier)["Clusters"] return response[0]["ClusterStatus"] if response else None except self.conn.exceptions.ClusterNotFoundFault: return "cluster_not_found"
Get status of a cluster. .. seealso:: - :external+boto3:py:meth:`Redshift.Client.describe_clusters` :param cluster_identifier: unique identifier of a cluster
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_cluster.py
79
[ "self", "cluster_identifier" ]
str | None
true
2
6.08
apache/airflow
43,597
sphinx
false
equals
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } return this.name.equals(((Option) obj).name); }
Return a description of the option. @return the option description
java
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
331
[ "obj" ]
true
4
6.88
spring-projects/spring-boot
79,428
javadoc
false
isReturnVoidStatementInConstructorWithCapturedSuper
function isReturnVoidStatementInConstructorWithCapturedSuper(node: Node): boolean { return (hierarchyFacts & HierarchyFacts.ConstructorWithSuperCall) !== 0 && node.kind === SyntaxKind.ReturnStatement && !(node as ReturnStatement).expression; }
Restores the `HierarchyFacts` for this node's ancestor after visiting this node's subtree, propagating specific facts from the subtree. @param ancestorFacts The `HierarchyFacts` of the ancestor to restore after visiting the subtree. @param excludeFacts The existing `HierarchyFacts` of the subtree that should not be propagated. @param includeFacts The new `HierarchyFacts` of the subtree that should be propagated.
typescript
src/compiler/transformers/es2015.ts
571
[ "node" ]
true
3
6.24
microsoft/TypeScript
107,154
jsdoc
false
removeFromChain
@GuardedBy("this") @Nullable E removeFromChain(E first, E entry) { int newCount = count; E newFirst = entry.getNext(); for (E e = first; e != entry; e = e.getNext()) { E next = copyEntry(e, newFirst); if (next != null) { newFirst = next; } else { newCount--; } } this.count = newCount; return newFirst; }
Removes an entry from within a table. All entries following the removed node can stay, but all preceding ones need to be cloned. <p>This method does not decrement count for the removed entry, but does decrement count for all partially collected entries which are skipped. As such callers which are modifying count must re-read it after calling removeFromChain. @param first the first entry of the table @param entry the entry being removed from the table @return the new first entry for the table
java
android/guava/src/com/google/common/collect/MapMakerInternalMap.java
1,825
[ "first", "entry" ]
E
true
3
8.08
google/guava
51,352
javadoc
false
tryWithResources
@SafeVarargs public static void tryWithResources(final FailableRunnable<? extends Throwable> action, final FailableConsumer<Throwable, ? extends Throwable> errorHandler, final FailableRunnable<? extends Throwable>... resources) { final org.apache.commons.lang3.function.FailableRunnable<?>[] fr = new org.apache.commons.lang3.function.FailableRunnable[resources.length]; Arrays.setAll(fr, i -> () -> resources[i].run()); Failable.tryWithResources(action::run, errorHandler != null ? errorHandler::accept : null, fr); }
A simple try-with-resources implementation, that can be used, if your objects do not implement the {@link AutoCloseable} interface. The method executes the {@code action}. The method guarantees, that <em>all</em> the {@code resources} are being executed, in the given order, afterwards, and regardless of success, or failure. If either the original action, or any of the resource action fails, then the <em>first</em> failure (AKA {@link Throwable}) is rethrown. Example use: <pre> {@code final FileInputStream fis = new FileInputStream("my.file"); Functions.tryWithResources(useInputStream(fis), null, () -> fis.close()); }</pre> @param action The action to execute. This object <em>will</em> always be invoked. @param errorHandler An optional error handler, which will be invoked finally, if any error occurred. The error handler will receive the first error, AKA {@link Throwable}. @param resources The resource actions to execute. <em>All</em> resource actions will be invoked, in the given order. A resource action is an instance of {@link FailableRunnable}, which will be executed. @see #tryWithResources(FailableRunnable, FailableRunnable...)
java
src/main/java/org/apache/commons/lang3/Functions.java
626
[ "action", "errorHandler" ]
void
true
2
6.4
apache/commons-lang
2,896
javadoc
false
trySend
long trySend(long now) { long pollDelayMs = maxPollTimeoutMs; // send any requests that can be sent now for (Node node : unsent.nodes()) { Iterator<ClientRequest> iterator = unsent.requestIterator(node); if (iterator.hasNext()) pollDelayMs = Math.min(pollDelayMs, client.pollDelayMs(node, now)); while (iterator.hasNext()) { ClientRequest request = iterator.next(); if (client.ready(node, now)) { client.send(request, now); iterator.remove(); } else { // try next node when current node is not ready break; } } } return pollDelayMs; }
Check whether there is pending request. This includes both requests that have been transmitted (i.e. in-flight requests) and those which are awaiting transmission. @return A boolean indicating whether there is pending request
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
504
[ "now" ]
true
4
6.88
apache/kafka
31,560
javadoc
false
getRegistrations
@SuppressWarnings("unchecked") <S> List<Registration<S, ?>> getRegistrations(S source, boolean required) { Class<S> sourceType = (Class<S>) source.getClass(); List<Registration<S, ?>> result = new ArrayList<>(); for (Registration<?, ?> candidate : this.registrations) { if (candidate.sourceType().isAssignableFrom(sourceType)) { result.add((Registration<S, ?>) candidate); } } if (required && result.isEmpty()) { throw new ConnectionDetailsFactoryNotFoundException(source); } result.sort(Comparator.comparing(Registration::factory, AnnotationAwareOrderComparator.INSTANCE)); return List.copyOf(result); }
Return a {@link Map} of {@link ConnectionDetails} interface type to {@link ConnectionDetails} instance created from the factories associated with the given source. @param <S> the source type @param source the source @param required if a connection details result is required @return a map of {@link ConnectionDetails} instances @throws ConnectionDetailsFactoryNotFoundException if a result is required but no connection details factory is registered for the source @throws ConnectionDetailsNotFoundException if a result is required but no connection details instance was created from a registered factory
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/service/connection/ConnectionDetailsFactories.java
101
[ "source", "required" ]
true
4
7.28
spring-projects/spring-boot
79,428
javadoc
false
set_alignment
def set_alignment(torch_layout, op_element) -> bool: """ Helper method to update the alignment of a given CUTLASS GEMM op operand's element. This method modifies the alignment of the given Cutlass GEMM op operand's element to match the layout of the corresponding ir.Buffer node. Args: torch_layout: The layout of the corresponding ir.Buffer node. op_element: The Cutlass GEMM op operand's element whose alignment is to be updated. Returns: bool: True if the alignment was successfully updated, False otherwise. """ alignment = cutlass_utils.get_max_alignment(torch_layout) cuda_arch = cutlass_utils.get_cuda_arch() if cuda_arch and int(cuda_arch) >= 90 and alignment < op_element.alignment: return False else: op_element.alignment = alignment return True
Helper method to update the alignment of a given CUTLASS GEMM op operand's element. This method modifies the alignment of the given Cutlass GEMM op operand's element to match the layout of the corresponding ir.Buffer node. Args: torch_layout: The layout of the corresponding ir.Buffer node. op_element: The Cutlass GEMM op operand's element whose alignment is to be updated. Returns: bool: True if the alignment was successfully updated, False otherwise.
python
torch/_inductor/codegen/cuda/gemm_template.py
700
[ "torch_layout", "op_element" ]
bool
true
5
7.92
pytorch/pytorch
96,034
google
false
score
def score(self, X, y=None, sample_weight=None, **params): """Transform the data, and apply `score` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `score` method. Only valid if the final estimator implements `score`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as ``sample_weight`` keyword argument to the ``score`` method of the final estimator. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Result of calling `score` on the final estimator. """ check_is_fitted(self) Xt = X if not _routing_enabled(): for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt) score_params = {} if sample_weight is not None: score_params["sample_weight"] = sample_weight return self.steps[-1][1].score(Xt, y, **score_params) # metadata routing is enabled. routed_params = process_routing( self, "score", sample_weight=sample_weight, **params ) Xt = X for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt, **routed_params[name].transform) return self.steps[-1][1].score(Xt, y, **routed_params[self.steps[-1][0]].score)
Transform the data, and apply `score` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `score` method. Only valid if the final estimator implements `score`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as ``sample_weight`` keyword argument to the ``score`` method of the final estimator. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Result of calling `score` on the final estimator.
python
sklearn/pipeline.py
1,092
[ "self", "X", "y", "sample_weight" ]
false
5
6.08
scikit-learn/scikit-learn
64,340
numpy
false
tz_convert
def tz_convert(self, tz) -> Self: """ Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, zoneinfo.ZoneInfo, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Datetme Array/Index with target `tz`. Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='h') >>> dti.tz_convert("US/Central") DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='h') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[us, Europe/Berlin]', freq='h') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[us]', freq='h') """ # noqa: E501 tz = timezones.maybe_get_tz(tz) if self.tz is None: # tz naive, use tz_localize raise TypeError( "Cannot convert tz-naive timestamps, use tz_localize to localize" ) # No conversion since timestamps are all UTC to begin with dtype = tz_to_dtype(tz, unit=self.unit) new_freq = None if isinstance(self.freq, Tick): new_freq = self.freq return self._simple_new(self._ndarray, dtype=dtype, freq=new_freq)
Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, zoneinfo.ZoneInfo, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Datetme Array/Index with target `tz`. Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='h') >>> dti.tz_convert("US/Central") DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='h') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[us, Europe/Berlin]', freq='h') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[us]', freq='h')
python
pandas/core/arrays/datetimes.py
860
[ "self", "tz" ]
Self
true
3
8.08
pandas-dev/pandas
47,362
numpy
false
trimmed
public ImmutableIntArray trimmed() { return isPartialView() ? new ImmutableIntArray(toArray()) : this; }
Returns an immutable array containing the same values as {@code this} array. This is logically a no-op, and in some circumstances {@code this} itself is returned. However, if this instance is a {@link #subArray} view of a larger array, this method will copy only the appropriate range of values, resulting in an equivalent array with a smaller memory footprint.
java
android/guava/src/com/google/common/primitives/ImmutableIntArray.java
632
[]
ImmutableIntArray
true
2
6.64
google/guava
51,352
javadoc
false
saturatedCast
public static int saturatedCast(long value) { if (value > Integer.MAX_VALUE) { return Integer.MAX_VALUE; } if (value < Integer.MIN_VALUE) { return Integer.MIN_VALUE; } return (int) value; }
Returns the {@code int} nearest in value to {@code value}. @param value any {@code long} value @return the same value cast to {@code int} if it is in the range of the {@code int} type, {@link Integer#MAX_VALUE} if it is too large, or {@link Integer#MIN_VALUE} if it is too small
java
android/guava/src/com/google/common/primitives/Ints.java
107
[ "value" ]
true
3
7.92
google/guava
51,352
javadoc
false
synchronizedMultimap
@J2ktIncompatible // Synchronized public static <K extends @Nullable Object, V extends @Nullable Object> Multimap<K, V> synchronizedMultimap(Multimap<K, V> multimap) { return Synchronized.multimap(multimap, null); }
Returns a synchronized (thread-safe) multimap backed by the specified multimap. In order to guarantee serial access, it is critical that <b>all</b> access to the backing multimap is accomplished through the returned multimap. <p>It is imperative that the user manually synchronize on the returned multimap when accessing any of its collection views: {@snippet : Multimap<K, V> multimap = Multimaps.synchronizedMultimap(HashMultimap.create()); ... Collection<V> values = multimap.get(key); // Needn't be in synchronized block ... synchronized (multimap) { // Synchronizing on multimap, not values! Iterator<V> i = values.iterator(); // Must be in synchronized block while (i.hasNext()) { foo(i.next()); } } } <p>Failure to follow this advice may result in non-deterministic behavior. <p>Note that the generated multimap's {@link Multimap#removeAll} and {@link Multimap#replaceValues} methods return collections that aren't synchronized. <p>The returned multimap will be serializable if the specified multimap is serializable. @param multimap the multimap to be wrapped in a synchronized view @return a synchronized view of the specified multimap
java
android/guava/src/com/google/common/collect/Multimaps.java
637
[ "multimap" ]
true
1
6.24
google/guava
51,352
javadoc
false
_boost
def _boost(self, iboost, X, y, sample_weight, random_state): """Implement a single boost for regression Perform a single boost according to the AdaBoost.R2 algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape (n_samples,) The current sample weights. random_state : RandomState The RandomState instance used if the base estimator accepts a `random_state` attribute. Controls also the bootstrap of the weights used to train the weak learner. Returns ------- sample_weight : array-like of shape (n_samples,) or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The regression error for the current boost. If None then boosting has terminated early. """ estimator = self._make_estimator(random_state=random_state) # Weighted sampling of the training set with replacement bootstrap_idx = random_state.choice( np.arange(_num_samples(X)), size=_num_samples(X), replace=True, p=sample_weight, ) # Fit on the bootstrapped sample and obtain a prediction # for all samples in the training set X_ = _safe_indexing(X, bootstrap_idx) y_ = _safe_indexing(y, bootstrap_idx) estimator.fit(X_, y_) y_predict = estimator.predict(X) error_vect = np.abs(y_predict - y) sample_mask = sample_weight > 0 masked_sample_weight = sample_weight[sample_mask] masked_error_vector = error_vect[sample_mask] error_max = masked_error_vector.max() if error_max != 0: masked_error_vector /= error_max if self.loss == "square": masked_error_vector **= 2 elif self.loss == "exponential": masked_error_vector = 1.0 - np.exp(-masked_error_vector) # Calculate the average loss estimator_error = (masked_sample_weight * masked_error_vector).sum() if estimator_error <= 0: # Stop if fit is perfect return sample_weight, 1.0, 0.0 elif estimator_error >= 0.5: # Discard current estimator only if it isn't the only one if len(self.estimators_) > 1: self.estimators_.pop(-1) return None, None, None beta = estimator_error / (1.0 - estimator_error) # Boost weight using AdaBoost.R2 alg estimator_weight = self.learning_rate * np.log(1.0 / beta) if not iboost == self.n_estimators - 1: sample_weight[sample_mask] *= np.power( beta, (1.0 - masked_error_vector) * self.learning_rate ) return sample_weight, estimator_weight, estimator_error
Implement a single boost for regression Perform a single boost according to the AdaBoost.R2 algorithm and return the updated sample weights. Parameters ---------- iboost : int The index of the current boost iteration. X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape (n_samples,) The current sample weights. random_state : RandomState The RandomState instance used if the base estimator accepts a `random_state` attribute. Controls also the bootstrap of the weights used to train the weak learner. Returns ------- sample_weight : array-like of shape (n_samples,) or None The reweighted sample weights. If None then boosting has terminated early. estimator_weight : float The weight for the current boost. If None then boosting has terminated early. estimator_error : float The regression error for the current boost. If None then boosting has terminated early.
python
sklearn/ensemble/_weight_boosting.py
969
[ "self", "iboost", "X", "y", "sample_weight", "random_state" ]
false
8
6
scikit-learn/scikit-learn
64,340
numpy
false
apply
public static <T, R, E extends Throwable> R apply(final FailableFunction<T, R, E> function, final T input) { return get(() -> function.apply(input)); }
Applies a function and rethrows any exception as a {@link RuntimeException}. @param function the function to apply @param input the input to apply {@code function} on @param <T> the type of the argument the function accepts @param <R> the return type of the function @param <E> the type of checked exception the function may throw @return the value returned from the function
java
src/main/java/org/apache/commons/lang3/function/Failable.java
161
[ "function", "input" ]
R
true
1
6.48
apache/commons-lang
2,896
javadoc
false
all
public KafkaFuture<Map<String, ClassicGroupDescription>> all() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0])).thenApply( nil -> { Map<String, ClassicGroupDescription> descriptions = new HashMap<>(futures.size()); futures.forEach((key, future) -> { try { descriptions.put(key, future.get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, since the KafkaFuture#allOf already ensured // that all of the futures completed successfully. throw new RuntimeException(e); } }); return descriptions; }); }
Return a future which yields all ClassicGroupDescription objects, if all the describes succeed.
java
clients/src/main/java/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.java
48
[]
true
2
6.4
apache/kafka
31,560
javadoc
false
poll
public boolean poll(RequestFuture<?> future, Timer timer, boolean disableWakeup) { do { poll(timer, future, disableWakeup); } while (!future.isDone() && timer.notExpired()); return future.isDone(); }
Block until the provided request future request has finished or the timeout has expired. @param future The request future to wait for @param timer Timer bounding how long this method can block @param disableWakeup true if we should not check for wakeups, false otherwise @return true if the future is done, false otherwise @throws WakeupException if {@link #wakeup()} is called from another thread and `disableWakeup` is false @throws InterruptException if the calling thread is interrupted
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
230
[ "future", "timer", "disableWakeup" ]
true
2
7.6
apache/kafka
31,560
javadoc
false
asBiFunction
@SuppressWarnings("unchecked") public static <T, U, R> BiFunction<T, U, R> asBiFunction(final Method method) { return asInterfaceInstance(BiFunction.class, method); }
Produces a {@link BiFunction} for a given a <em>function</em> Method. You call the BiFunction with two arguments: (1) the object receiving the method call, and (2) the method argument. The BiFunction return type must match the method's return type. <p> For example to invoke {@link String#charAt(int)}: </p> <pre>{@code final Method method = String.class.getMethod("charAt", int.class); final BiFunction<String, Integer, Character> function = MethodInvokers.asBiFunction(method); assertEquals('C', function.apply("ABC", 2)); }</pre> @param <T> the type of the first argument to the function: The type containing the method. @param <U> the type of the second argument to the function: the method argument type. @param <R> the type of the result of the function: The method return type. @param method the method to invoke. @return a correctly-typed wrapper for the given target.
java
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
108
[ "method" ]
true
1
6.32
apache/commons-lang
2,896
javadoc
false
weighted_mode
def weighted_mode(a, w, *, axis=0): """Return an array of the weighted modal (most common) value in the passed array. If there is more than one such value, only the first is returned. The bin-count for the modal bins is also returned. This is an extension of the algorithm in scipy.stats.mode. Parameters ---------- a : array-like of shape (n_samples,) Array of which values to find mode(s). w : array-like of shape (n_samples,) Array of weights for each value. axis : int, default=0 Axis along which to operate. Default is 0, i.e. the first axis. Returns ------- vals : ndarray Array of modal values. score : ndarray Array of weighted counts for each mode. See Also -------- scipy.stats.mode: Calculates the Modal (most common) value of array elements along specified axis. Examples -------- >>> from sklearn.utils.extmath import weighted_mode >>> x = [4, 1, 4, 2, 4, 2] >>> weights = [1, 1, 1, 1, 1, 1] >>> weighted_mode(x, weights) (array([4.]), array([3.])) The value 4 appears three times: with uniform weights, the result is simply the mode of the distribution. >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's >>> weighted_mode(x, weights) (array([2.]), array([3.5])) The value 2 has the highest score: it appears twice with weights of 1.5 and 2: the sum of these is 3.5. """ if axis is None: a = np.ravel(a) w = np.ravel(w) axis = 0 else: a = np.asarray(a) w = np.asarray(w) if a.shape != w.shape: w = np.full(a.shape, w, dtype=w.dtype) scores = np.unique(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape) oldcounts = np.zeros(testshape) for score in scores: template = np.zeros(a.shape) ind = a == score template[ind] = w[ind] counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return mostfrequent, oldcounts
Return an array of the weighted modal (most common) value in the passed array. If there is more than one such value, only the first is returned. The bin-count for the modal bins is also returned. This is an extension of the algorithm in scipy.stats.mode. Parameters ---------- a : array-like of shape (n_samples,) Array of which values to find mode(s). w : array-like of shape (n_samples,) Array of weights for each value. axis : int, default=0 Axis along which to operate. Default is 0, i.e. the first axis. Returns ------- vals : ndarray Array of modal values. score : ndarray Array of weighted counts for each mode. See Also -------- scipy.stats.mode: Calculates the Modal (most common) value of array elements along specified axis. Examples -------- >>> from sklearn.utils.extmath import weighted_mode >>> x = [4, 1, 4, 2, 4, 2] >>> weights = [1, 1, 1, 1, 1, 1] >>> weighted_mode(x, weights) (array([4.]), array([3.])) The value 4 appears three times: with uniform weights, the result is simply the mode of the distribution. >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's >>> weighted_mode(x, weights) (array([2.]), array([3.5])) The value 2 has the highest score: it appears twice with weights of 1.5 and 2: the sum of these is 3.5.
python
sklearn/utils/extmath.py
787
[ "a", "w", "axis" ]
false
5
7.6
scikit-learn/scikit-learn
64,340
numpy
false
reset
public StrTokenizer reset(final char[] input) { reset(); this.chars = ArrayUtils.clone(input); return this; }
Reset this tokenizer, giving it a new input string to parse. In this manner you can re-use a tokenizer with the same settings on multiple input lines. @param input the new character array to tokenize, not cloned, null sets no text to parse. @return {@code this} instance.
java
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
870
[ "input" ]
StrTokenizer
true
1
7.04
apache/commons-lang
2,896
javadoc
false
hasPath
function hasPath(object, path, hasFunc) { path = castPath(path, object); var index = -1, length = path.length, result = false; while (++index < length) { var key = toKey(path[index]); if (!(result = object != null && hasFunc(object, key))) { break; } object = object[key]; } if (result || ++index != length) { return result; } length = object == null ? 0 : object.length; return !!length && isLength(length) && isIndex(key, length) && (isArray(object) || isArguments(object)); }
Checks if `path` exists on `object`. @private @param {Object} object The object to query. @param {Array|string} path The path to check. @param {Function} hasFunc The function to check properties. @returns {boolean} Returns `true` if `path` exists, else `false`.
javascript
lodash.js
6,222
[ "object", "path", "hasFunc" ]
false
11
6.08
lodash/lodash
61,490
jsdoc
false
stop
def stop(self) -> int: """ The value of the `stop` parameter. This property returns the `stop` value of the RangeIndex, which defines the upper (or lower, in case of negative steps) bound of the index range. The `stop` value is exclusive, meaning the RangeIndex includes values up to but not including this value. See Also -------- RangeIndex : Immutable index representing a range of integers. RangeIndex.start : The start value of the RangeIndex. RangeIndex.step : The step size between elements in the RangeIndex. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.stop 5 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.stop -10 """ return self._range.stop
The value of the `stop` parameter. This property returns the `stop` value of the RangeIndex, which defines the upper (or lower, in case of negative steps) bound of the index range. The `stop` value is exclusive, meaning the RangeIndex includes values up to but not including this value. See Also -------- RangeIndex : Immutable index representing a range of integers. RangeIndex.start : The start value of the RangeIndex. RangeIndex.step : The step size between elements in the RangeIndex. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.stop 5 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.stop -10
python
pandas/core/indexes/range.py
344
[ "self" ]
int
true
1
7.28
pandas-dev/pandas
47,362
unknown
false
apply
R apply(T t, U u, V v);
Applies this function to the given arguments. @param t the first function argument @param u the second function argument @param v the third function argument @return the function result
java
src/main/java/org/apache/commons/lang3/function/TriFunction.java
64
[ "t", "u", "v" ]
R
true
1
6.64
apache/commons-lang
2,896
javadoc
false
check_axis_name
def check_axis_name(name: str) -> bool: """Check if the name is a valid axis name. Args: name (str): the axis name to check Returns: bool: whether the axis name is valid """ is_valid, _ = ParsedExpression.check_axis_name_return_reason(name) return is_valid
Check if the name is a valid axis name. Args: name (str): the axis name to check Returns: bool: whether the axis name is valid
python
functorch/einops/_parsing.py
199
[ "name" ]
bool
true
1
6.56
pytorch/pytorch
96,034
google
false
getGenericArgumentValue
public @Nullable ValueHolder getGenericArgumentValue(@Nullable Class<?> requiredType, @Nullable String requiredName, @Nullable Set<ValueHolder> usedValueHolders) { for (ValueHolder valueHolder : this.genericArgumentValues) { if (usedValueHolders != null && usedValueHolders.contains(valueHolder)) { continue; } if (valueHolder.getName() != null && (requiredName == null || (!requiredName.isEmpty() && !requiredName.equals(valueHolder.getName())))) { continue; } if (valueHolder.getType() != null && (requiredType == null || !ClassUtils.matchesTypeName(requiredType, valueHolder.getType()))) { continue; } if (requiredType != null && valueHolder.getType() == null && valueHolder.getName() == null && !ClassUtils.isAssignableValue(requiredType, valueHolder.getValue())) { continue; } return valueHolder; } return null; }
Look for the next generic argument value that matches the given type, ignoring argument values that have already been used in the current resolution process. @param requiredType the type to match (can be {@code null} to find an arbitrary next generic argument value) @param requiredName the name to match (can be {@code null} to not match argument values by name, or empty String to match any name) @param usedValueHolders a Set of ValueHolder objects that have already been used in the current resolution process and should therefore not be returned again @return the ValueHolder for the argument, or {@code null} if none found
java
spring-beans/src/main/java/org/springframework/beans/factory/config/ConstructorArgumentValues.java
274
[ "requiredType", "requiredName", "usedValueHolders" ]
ValueHolder
true
14
7.6
spring-projects/spring-framework
59,386
javadoc
false
getDatabase
@Nullable static Database getDatabase(final String databaseType) { Database database = null; if (Strings.hasText(databaseType)) { final String databaseTypeLowerCase = databaseType.toLowerCase(Locale.ROOT); if (databaseTypeLowerCase.startsWith(IPINFO_PREFIX)) { database = getIpinfoDatabase(databaseTypeLowerCase); // all lower case! } else { // for historical reasons, fall back to assuming maxmind-like type parsing database = getMaxmindDatabase(databaseType); } } return database; }
Parses the passed-in databaseType and return the Database instance that is associated with that databaseType. @param databaseType the database type String from the metadata of the database file @return the Database instance that is associated with the databaseType (or null)
java
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IpDataLookupFactories.java
43
[ "databaseType" ]
Database
true
3
7.92
elastic/elasticsearch
75,680
javadoc
false
createScopedProxy
public static BeanDefinitionHolder createScopedProxy(BeanDefinitionHolder definition, BeanDefinitionRegistry registry, boolean proxyTargetClass) { String originalBeanName = definition.getBeanName(); BeanDefinition targetDefinition = definition.getBeanDefinition(); String targetBeanName = getTargetBeanName(originalBeanName); // Create a scoped proxy definition for the original bean name, // "hiding" the target bean in an internal target definition. RootBeanDefinition proxyDefinition = new RootBeanDefinition(ScopedProxyFactoryBean.class); proxyDefinition.setDecoratedDefinition(new BeanDefinitionHolder(targetDefinition, targetBeanName)); proxyDefinition.setOriginatingBeanDefinition(targetDefinition); proxyDefinition.setSource(definition.getSource()); proxyDefinition.setRole(targetDefinition.getRole()); proxyDefinition.getPropertyValues().add("targetBeanName", targetBeanName); if (proxyTargetClass) { targetDefinition.setAttribute(AutoProxyUtils.PRESERVE_TARGET_CLASS_ATTRIBUTE, Boolean.TRUE); // ScopedProxyFactoryBean's "proxyTargetClass" default is TRUE, so we don't need to set it explicitly here. } else { proxyDefinition.getPropertyValues().add("proxyTargetClass", Boolean.FALSE); } // Copy autowire settings from original bean definition. proxyDefinition.setAutowireCandidate(targetDefinition.isAutowireCandidate()); proxyDefinition.setPrimary(targetDefinition.isPrimary()); proxyDefinition.setFallback(targetDefinition.isFallback()); if (targetDefinition instanceof AbstractBeanDefinition abd) { proxyDefinition.setDefaultCandidate(abd.isDefaultCandidate()); proxyDefinition.copyQualifiersFrom(abd); } // The target bean should be ignored in favor of the scoped proxy. targetDefinition.setAutowireCandidate(false); targetDefinition.setPrimary(false); targetDefinition.setFallback(false); if (targetDefinition instanceof AbstractBeanDefinition abd) { abd.setDefaultCandidate(false); } // Register the target bean as separate bean in the factory. registry.registerBeanDefinition(targetBeanName, targetDefinition); // Return the scoped proxy definition as primary bean definition // (potentially an inner bean). return new BeanDefinitionHolder(proxyDefinition, originalBeanName, definition.getAliases()); }
Generate a scoped proxy for the supplied target bean, registering the target bean with an internal name and setting 'targetBeanName' on the scoped proxy. @param definition the original bean definition @param registry the bean definition registry @param proxyTargetClass whether to create a target class proxy @return the scoped proxy definition @see #getTargetBeanName(String) @see #getOriginalBeanName(String)
java
spring-aop/src/main/java/org/springframework/aop/scope/ScopedProxyUtils.java
58
[ "definition", "registry", "proxyTargetClass" ]
BeanDefinitionHolder
true
4
7.44
spring-projects/spring-framework
59,386
javadoc
false
h3ToFaceIjk
public static FaceIJK h3ToFaceIjk(long h3) { int baseCell = H3Index.H3_get_base_cell(h3); if (baseCell < 0 || baseCell >= Constants.NUM_BASE_CELLS) { // LCOV_EXCL_BR_LINE // Base cells less than zero can not be represented in an index // To prevent reading uninitialized memory, we zero the output. throw new IllegalArgumentException(); } // adjust for the pentagonal missing sequence; all of sub-sequence 5 needs // to be adjusted (and some of sub-sequence 4 below) if (BaseCells.isBaseCellPentagon(baseCell) && h3LeadingNonZeroDigit(h3) == 5) { h3 = h3Rotate60cw(h3); } // start with the "home" face and ijk+ coordinates for the base cell of c FaceIJK fijk = BaseCells.getBaseFaceIJK(baseCell); if (h3ToFaceIjkWithInitializedFijk(h3, fijk) == false) { return fijk; // no overage is possible; h lies on this face } // if we're here we have the potential for an "overage"; i.e., it is // possible that c lies on an adjacent face int origI = fijk.coord.i; int origJ = fijk.coord.j; int origK = fijk.coord.k; // if we're in Class III, drop into the next finer Class II grid int res = H3Index.H3_get_resolution(h3); if (isResolutionClassIII(res)) { // Class III fijk.coord.downAp7r(); res++; } // adjust for overage if needed // a pentagon base cell with a leading 4 digit requires special handling boolean pentLeading4 = (BaseCells.isBaseCellPentagon(baseCell) && h3LeadingNonZeroDigit(h3) == 4); if (fijk.adjustOverageClassII(res, pentLeading4, false) != FaceIJK.Overage.NO_OVERAGE) { // if the base cell is a pentagon we have the potential for secondary // overages if (BaseCells.isBaseCellPentagon(baseCell)) { FaceIJK.Overage overage; do { overage = fijk.adjustOverageClassII(res, false, false); } while (overage != FaceIJK.Overage.NO_OVERAGE); } if (res != H3Index.H3_get_resolution(h3)) { fijk.coord.upAp7r(); } } else if (res != H3Index.H3_get_resolution(h3)) { fijk.coord.reset(origI, origJ, origK); } return fijk; }
Convert an H3Index to a FaceIJK address. @param h3 The H3Index.
java
libs/h3/src/main/java/org/elasticsearch/h3/H3Index.java
189
[ "h3" ]
FaceIJK
true
12
6.88
elastic/elasticsearch
75,680
javadoc
false
Future
Future(Future&& other) noexcept : cs_(std::move(other.cs_)), state_(std::exchange(other.state_, nullptr)), ct_(std::move(other.ct_)), hasCancelTokenOverride_( std::exchange(other.hasCancelTokenOverride_, false)) {}
Construct an empty Future. This object is not valid use until you initialize it with move assignment.
cpp
folly/coro/Promise.h
225
[]
true
2
6.88
facebook/folly
30,157
doxygen
false
SuspendedByGroup
function SuspendedByGroup({ bridge, element, inspectedElement, store, name, environment, suspendedBy, minTime, maxTime, }: GroupProps) { const [isOpen, setIsOpen] = useState(false); let start = Infinity; let end = -Infinity; let isRejected = false; for (let i = 0; i < suspendedBy.length; i++) { const asyncInfo: SerializedAsyncInfo = suspendedBy[i].value; const ioInfo = asyncInfo.awaited; if (ioInfo.start < start) { start = ioInfo.start; } if (ioInfo.end > end) { end = ioInfo.end; } const value: any = ioInfo.value; if ( value !== null && typeof value === 'object' && value[meta.name] === 'rejected Thenable' ) { isRejected = true; } } const timeScale = 100 / (maxTime - minTime); let left = (start - minTime) * timeScale; let width = (end - start) * timeScale; if (width < 5) { // Use at least a 5% width to avoid showing too small indicators. width = 5; if (left > 95) { left = 95; } } const pluralizedName = pluralize(name); return ( <div className={styles.CollapsableRow}> <Button className={styles.CollapsableHeader} onClick={() => { setIsOpen(prevIsOpen => !prevIsOpen); }} title={pluralizedName}> <ButtonIcon className={styles.CollapsableHeaderIcon} type={isOpen ? 'expanded' : 'collapsed'} /> <span className={styles.CollapsableHeaderTitle}>{pluralizedName}</span> <div className={styles.CollapsableHeaderFiller} /> {isOpen ? null : ( <div className={ styles.TimeBarContainer + ' ' + getClassNameForEnvironment(environment) }> <div className={ !isRejected ? styles.TimeBarSpan : styles.TimeBarSpanErrored } style={{ left: left.toFixed(2) + '%', width: width.toFixed(2) + '%', }} /> </div> )} </Button> {isOpen && suspendedBy.map(({value, index}) => ( <SuspendedByRow key={index} index={index} asyncInfo={value} bridge={bridge} element={element} inspectedElement={inspectedElement} store={store} minTime={minTime} maxTime={maxTime} skipName={true} /> ))} </div> ); }
Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. @flow
javascript
packages/react-devtools-shared/src/devtools/views/Components/InspectedElementSuspendedBy.js
360
[]
false
13
6.32
facebook/react
241,750
jsdoc
false
maybeFailWithError
private void maybeFailWithError() { if (!hasError()) { return; } // for ProducerFencedException, do not wrap it as a KafkaException // but create a new instance without the call trace since it was not thrown because of the current call if (lastError instanceof ProducerFencedException) { throw new ProducerFencedException("Producer with transactionalId '" + transactionalId + "' and " + producerIdAndEpoch + " has been fenced by another producer " + "with the same transactionalId"); } if (lastError instanceof InvalidProducerEpochException) { throw new InvalidProducerEpochException("Producer with transactionalId '" + transactionalId + "' and " + producerIdAndEpoch + " attempted to produce with an old epoch"); } if (lastError instanceof IllegalStateException) { throw new IllegalStateException("Producer with transactionalId '" + transactionalId + "' and " + producerIdAndEpoch + " cannot execute transactional method because of previous invalid state transition attempt", lastError); } throw new KafkaException("Cannot execute transactional method because we are in an error state", lastError); }
Check if the transaction is in the prepared state. @return true if the current state is PREPARED_TRANSACTION
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
1,154
[]
void
true
5
8.4
apache/kafka
31,560
javadoc
false
sensor
public synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents) { return this.sensor(name, config, Sensor.RecordingLevel.INFO, parents); }
Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will receive every value recorded with this sensor. This uses a default recording level of INFO. @param name The name of the sensor @param config A default configuration to use for this sensor for metrics that don't have their own config @param parents The parent sensors @return The sensor that is created
java
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
372
[ "name", "config" ]
Sensor
true
1
6.96
apache/kafka
31,560
javadoc
false
parse
public Value parse(XContentParser parser, Value value, Context context) throws IOException { XContentParser.Token token; if (parser.currentToken() != XContentParser.Token.START_OBJECT) { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { throwExpectedStartObject(parser, token); } } final List<String[]> requiredFields = this.requiredFieldSets.isEmpty() ? null : new ArrayList<>(this.requiredFieldSets); final List<List<String>> exclusiveFields; if (exclusiveFieldSets.isEmpty()) { exclusiveFields = null; } else { exclusiveFields = new ArrayList<>(); for (int i = 0; i < this.exclusiveFieldSets.size(); i++) { exclusiveFields.add(new ArrayList<>()); } } FieldParser fieldParser; String currentFieldName; XContentLocation currentPosition; final Map<String, FieldParser> parsers = fieldParserMap.getOrDefault(parser.getRestApiVersion(), Collections.emptyMap()); while ((currentFieldName = parser.nextFieldName()) != null) { currentPosition = parser.getTokenLocation(); fieldParser = parsers.get(currentFieldName); token = parser.nextToken(); if (fieldParser == null) { unknownFieldParser.acceptUnknownField(this, currentFieldName, currentPosition, parser, value, context); } else { fieldParser.assertSupports(name, parser, token, currentFieldName); if (requiredFields != null) { // Check to see if this field is a required field, if it is we can // remove the entry as the requirement is satisfied maybeMarkRequiredField(currentFieldName, requiredFields); } if (exclusiveFields != null) { // Check if this field is in an exclusive set, if it is then mark // it as seen. maybeMarkExclusiveField(currentFieldName, exclusiveFields); } parseSub(parser, fieldParser, token, currentFieldName, value, context); } } // Check for a) multiple entries appearing in exclusive field sets and b) empty required field entries if (exclusiveFields != null) { ensureExclusiveFields(exclusiveFields); } if (requiredFields != null && requiredFields.isEmpty() == false) { throwMissingRequiredFields(requiredFields); } return value; }
Parses a Value from the given {@link XContentParser} @param parser the parser to build a value from @param value the value to fill from the parser @param context a context that is passed along to all declared field parsers @return the parsed value @throws IOException if an IOException occurs.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
271
[ "parser", "value", "context" ]
Value
true
13
7.68
elastic/elasticsearch
75,680
javadoc
false
normalizeUpperBounds
public static Type[] normalizeUpperBounds(final Type[] bounds) { Objects.requireNonNull(bounds, "bounds"); // don't bother if there's only one (or none) type if (bounds.length < 2) { return bounds; } final Set<Type> types = new HashSet<>(bounds.length); for (final Type type1 : bounds) { boolean subtypeFound = false; for (final Type type2 : bounds) { if (type1 != type2 && isAssignable(type2, type1, null)) { subtypeFound = true; break; } } if (!subtypeFound) { types.add(type1); } } return types.toArray(ArrayUtils.EMPTY_TYPE_ARRAY); }
Strips out the redundant upper bound types in type variable types and wildcard types (or it would with wildcard types if multiple upper bounds were allowed). <p> Example, with the variable type declaration: </p> <pre>{@code <K extends java.util.Collection<String> & java.util.List<String>> }</pre> <p> since {@link List} is a subinterface of {@link Collection}, this method will return the bounds as if the declaration had been: </p> <pre>{@code <K extends java.util.List<String>> }</pre> @param bounds an array of types representing the upper bounds of either {@link WildcardType} or {@link TypeVariable}, not {@code null}. @return an array containing the values from {@code bounds} minus the redundant types. @throws NullPointerException if {@code bounds} is {@code null}.
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
1,346
[ "bounds" ]
true
5
7.76
apache/commons-lang
2,896
javadoc
false
lazyModule
function lazyModule() { return $Module ??= require('internal/modules/cjs/loader').Module; }
Import the Module class on first use. @returns {object}
javascript
lib/internal/modules/helpers.js
135
[]
false
1
6.16
nodejs/node
114,839
jsdoc
false
checkAndGetCoordinator
protected synchronized Node checkAndGetCoordinator() { if (coordinator != null && client.isUnavailable(coordinator)) { markCoordinatorUnknown(true, "coordinator unavailable"); return null; } return this.coordinator; }
Get the coordinator if its connection is still active. Otherwise mark it unknown and return null. @return the current coordinator or null if it is unknown
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
983
[]
Node
true
3
8.08
apache/kafka
31,560
javadoc
false
mapToIndexScaleZero
private static long mapToIndexScaleZero(double value) { long rawBits = Double.doubleToLongBits(value); long rawExponent = (rawBits & EXPONENT_BIT_MASK) >> SIGNIFICAND_WIDTH; long rawSignificand = rawBits & SIGNIFICAND_BIT_MASK; if (rawExponent == 0) { rawExponent -= Long.numberOfLeadingZeros(rawSignificand - 1) - EXPONENT_WIDTH - 1; } int ieeeExponent = (int) (rawExponent - EXPONENT_BIAS); if (rawSignificand == 0) { return ieeeExponent - 1; } return ieeeExponent; }
Compute the exact bucket index for scale zero by extracting the exponent. @see <a href="https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#scale-zero-extract-the-exponent">Scale Zero: Extract the Exponent</a>
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/Base2ExponentialHistogramIndexer.java
93
[ "value" ]
true
3
6.4
elastic/elasticsearch
75,680
javadoc
false
updatePartitionLeadership
public synchronized Set<TopicPartition> updatePartitionLeadership(Map<TopicPartition, LeaderIdAndEpoch> partitionLeaders, List<Node> leaderNodes) { Map<Integer, Node> newNodes = leaderNodes.stream().collect(Collectors.toMap(Node::id, node -> node)); // Insert non-overlapping nodes from existing-nodes into new-nodes. this.metadataSnapshot.cluster().nodes().forEach(node -> newNodes.putIfAbsent(node.id(), node)); // Create partition-metadata for all updated partitions. Exclude updates for partitions - // 1. for which the corresponding partition has newer leader in existing metadata. // 2. for which corresponding leader's node is missing in the new-nodes. // 3. for which the existing metadata doesn't know about the partition. List<PartitionMetadata> updatePartitionMetadata = new ArrayList<>(); for (Entry<TopicPartition, Metadata.LeaderIdAndEpoch> partitionLeader: partitionLeaders.entrySet()) { TopicPartition partition = partitionLeader.getKey(); Metadata.LeaderAndEpoch currentLeader = currentLeader(partition); Metadata.LeaderIdAndEpoch newLeader = partitionLeader.getValue(); if (newLeader.epoch.isEmpty() || newLeader.leaderId.isEmpty()) { log.debug("For {}, incoming leader information is incomplete {}", partition, newLeader); continue; } if (currentLeader.epoch.isPresent() && newLeader.epoch.get() <= currentLeader.epoch.get()) { log.debug("For {}, incoming leader({}) is not-newer than the one in the existing metadata {}, so ignoring.", partition, newLeader, currentLeader); continue; } if (!newNodes.containsKey(newLeader.leaderId.get())) { log.debug("For {}, incoming leader({}), the corresponding node information for node-id {} is missing, so ignoring.", partition, newLeader, newLeader.leaderId.get()); continue; } if (this.metadataSnapshot.partitionMetadata(partition).isEmpty()) { log.debug("For {}, incoming leader({}), partition metadata is no longer cached, ignoring.", partition, newLeader); continue; } MetadataResponse.PartitionMetadata existingMetadata = this.metadataSnapshot.partitionMetadata(partition).get(); MetadataResponse.PartitionMetadata updatedMetadata = new MetadataResponse.PartitionMetadata( existingMetadata.error, partition, newLeader.leaderId, newLeader.epoch, existingMetadata.replicaIds, existingMetadata.inSyncReplicaIds, existingMetadata.offlineReplicaIds ); updatePartitionMetadata.add(updatedMetadata); lastSeenLeaderEpochs.put(partition, newLeader.epoch.get()); } if (updatePartitionMetadata.isEmpty()) { log.debug("No relevant metadata updates."); return new HashSet<>(); } Set<String> updatedTopics = updatePartitionMetadata.stream().map(MetadataResponse.PartitionMetadata::topic).collect(Collectors.toSet()); // Get topic-ids for updated topics from existing topic-ids. Map<String, Uuid> existingTopicIds = this.metadataSnapshot.topicIds(); Map<String, Uuid> topicIdsForUpdatedTopics = updatedTopics.stream() .filter(existingTopicIds::containsKey) .collect(Collectors.toMap(e -> e, existingTopicIds::get)); if (log.isDebugEnabled()) { updatePartitionMetadata.forEach( partMetadata -> log.debug("For {} updating leader information, updated metadata is {}.", partMetadata.topicPartition, partMetadata) ); } // Fetch responses can include partition level leader changes, when this happens, we perform a partial // metadata update, by keeping the unchanged partition and update the changed partitions. this.metadataSnapshot = metadataSnapshot.mergeWith( metadataSnapshot.clusterResource().clusterId(), newNodes, updatePartitionMetadata, Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), metadataSnapshot.cluster().controller(), topicIdsForUpdatedTopics, (topic, isInternal) -> true); clusterResourceListeners.onUpdate(metadataSnapshot.clusterResource()); return updatePartitionMetadata.stream() .map(metadata -> metadata.topicPartition) .collect(Collectors.toSet()); }
Updates the partition-leadership info in the metadata. Update is done by merging existing metadata with the input leader information and nodes. This is called whenever partition-leadership updates are returned in a response from broker(ex - ProduceResponse & FetchResponse). Note that the updates via Metadata RPC are handled separately in ({@link #update}). Both partitionLeader and leaderNodes override the existing metadata. Non-overlapping metadata is kept as it is. @param partitionLeaders map of new leadership information for partitions. @param leaderNodes a list of nodes for leaders in the above map. @return a set of partitions, for which leaders were updated.
java
clients/src/main/java/org/apache/kafka/clients/Metadata.java
381
[ "partitionLeaders", "leaderNodes" ]
true
9
7.84
apache/kafka
31,560
javadoc
false
process
private void process(final StreamsOnTasksRevokedCallbackCompletedEvent event) { if (requestManagers.streamsMembershipManager.isEmpty()) { log.warn("An internal error occurred; the Streams membership manager was not present, so the notification " + "of the onTasksRevoked callback execution could not be sent"); return; } requestManagers.streamsMembershipManager.get().onTasksRevokedCallbackCompleted(event); }
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user. @param event Event containing a boolean to indicate if the callback handler is configured or not.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
687
[ "event" ]
void
true
2
6.08
apache/kafka
31,560
javadoc
false
delete_any_fargate_profiles
def delete_any_fargate_profiles(self) -> None: """ Delete all EKS Fargate profiles for a provided Amazon EKS Cluster. EKS Fargate profiles must be deleted one at a time, so we must wait for one to be deleted before sending the next delete command. """ fargate_profiles = self.hook.list_fargate_profiles(clusterName=self.cluster_name) if fargate_profiles: self.log.info(CAN_NOT_DELETE_MSG.format(compute=FARGATE_FULL_NAME, count=len(fargate_profiles))) self.log.info("Waiting for Fargate profiles to delete. This will take some time.") for profile in fargate_profiles: # The API will return a (cluster) ResourceInUseException if you try # to delete Fargate profiles in parallel the way we can with nodegroups, # so each must be deleted sequentially self.hook.delete_fargate_profile(clusterName=self.cluster_name, fargateProfileName=profile) self.hook.conn.get_waiter("fargate_profile_deleted").wait( clusterName=self.cluster_name, fargateProfileName=profile ) self.log.info(SUCCESS_MSG.format(compute=FARGATE_FULL_NAME))
Delete all EKS Fargate profiles for a provided Amazon EKS Cluster. EKS Fargate profiles must be deleted one at a time, so we must wait for one to be deleted before sending the next delete command.
python
providers/amazon/src/airflow/providers/amazon/aws/operators/eks.py
781
[ "self" ]
None
true
3
6
apache/airflow
43,597
unknown
false
empty
static ExponentialHistogram empty() { return EmptyExponentialHistogram.INSTANCE; }
Default hash code implementation to be used with {@link #equals(ExponentialHistogram, ExponentialHistogram)}. @param histogram the histogram to hash @return the hash code
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogram.java
219
[]
ExponentialHistogram
true
1
6
elastic/elasticsearch
75,680
javadoc
false
md5
def md5(string: ReadableBuffer = b"", /) -> hashlib._Hash: """ Safely allows calling the ``hashlib.md5`` function when ``usedforsecurity`` is disabled in configuration. :param string: The data to hash. Default to empty str byte. :return: The hashed value. """ return hashlib.md5(string, usedforsecurity=False)
Safely allows calling the ``hashlib.md5`` function when ``usedforsecurity`` is disabled in configuration. :param string: The data to hash. Default to empty str byte. :return: The hashed value.
python
airflow-core/src/airflow/utils/hashlib_wrapper.py
27
[ "string" ]
hashlib._Hash
true
1
6.72
apache/airflow
43,597
sphinx
false
patternKeyCompare
function patternKeyCompare(a, b) { const aPatternIndex = StringPrototypeIndexOf(a, '*'); const bPatternIndex = StringPrototypeIndexOf(b, '*'); const baseLenA = aPatternIndex === -1 ? a.length : aPatternIndex + 1; const baseLenB = bPatternIndex === -1 ? b.length : bPatternIndex + 1; if (baseLenA > baseLenB) { return -1; } if (baseLenB > baseLenA) { return 1; } if (aPatternIndex === -1) { return 1; } if (bPatternIndex === -1) { return -1; } if (a.length > b.length) { return -1; } if (b.length > a.length) { return 1; } return 0; }
Compares two strings that may contain a wildcard character ('*') and returns a value indicating their order. @param {string} a - The first string to compare. @param {string} b - The second string to compare. @returns {number} - A negative number if `a` should come before `b`, a positive number if `a` should come after `b`, or 0 if they are equal.
javascript
lib/internal/modules/esm/resolve.js
671
[ "a", "b" ]
false
9
6.08
nodejs/node
114,839
jsdoc
false
compression
@Override public double compression() { return compression; }
@param q The quantile desired. Can be in the range [0,1]. @return The minimum value x such that we think that the proportion of samples is &le; x is q.
java
libs/tdigest/src/main/java/org/elasticsearch/tdigest/AVLTreeDigest.java
367
[]
true
1
6.96
elastic/elasticsearch
75,680
javadoc
false
quantile
def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * (x-i)/(j-i)`, where `(x-i)/(j-i)` is the fractional part of the index surrounded by `i > j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(0.5) 2.5 >>> s.quantile([0.25, 0.5, 0.75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0]
Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * (x-i)/(j-i)`, where `(x-i)/(j-i)` is the fractional part of the index surrounded by `i > j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(0.5) 2.5 >>> s.quantile([0.25, 0.5, 0.75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64
python
pandas/core/series.py
2,608
[ "self", "q", "interpolation" ]
float | Series
true
4
8.56
pandas-dev/pandas
47,362
numpy
false
removeDebugFlags
private String[] removeDebugFlags(String[] args) { List<String> rtn = new ArrayList<>(args.length); boolean appArgsDetected = false; for (String arg : args) { // Allow apps to have a --debug argument appArgsDetected |= "--".equals(arg); if ("--debug".equals(arg) && !appArgsDetected) { continue; } rtn.add(arg); } return StringUtils.toStringArray(rtn); }
Run the appropriate and handle and errors. @param args the input arguments @return a return status code (non boot is used to indicate an error)
java
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/CommandRunner.java
189
[ "args" ]
true
3
8.4
spring-projects/spring-boot
79,428
javadoc
false
equals
@Override public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; FileAccessTree that = (FileAccessTree) o; return Objects.deepEquals(readPaths, that.readPaths) && Objects.deepEquals(writePaths, that.writePaths); }
@return the "canonical" form of the given {@code path}, to be used for entitlement checks.
java
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileAccessTree.java
393
[ "o" ]
true
4
7.04
elastic/elasticsearch
75,680
javadoc
false