function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
_arg_trim_zeros
def _arg_trim_zeros(filt): """Return indices of the first and last non-zero element. Parameters ---------- filt : array_like Input array. Returns ------- start, stop : ndarray Two arrays containing the indices of the first and last non-zero element in each dimension. See also -------- trim_zeros Examples -------- >>> import numpy as np >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) (array([2]), array([3])) """ nonzero = ( np.argwhere(filt) if filt.dtype != np.object_ # Historically, `trim_zeros` treats `None` in an object array # as non-zero while argwhere doesn't, account for that else np.argwhere(filt != 0) ) if nonzero.size == 0: start = stop = np.array([], dtype=np.intp) else: start = nonzero.min(axis=0) stop = nonzero.max(axis=0) return start, stop
Return indices of the first and last non-zero element. Parameters ---------- filt : array_like Input array. Returns ------- start, stop : ndarray Two arrays containing the indices of the first and last non-zero element in each dimension. See also -------- trim_zeros Examples -------- >>> import numpy as np >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) (array([2]), array([3]))
python
numpy/lib/_function_base_impl.py
1,897
[ "filt" ]
false
4
7.68
numpy/numpy
31,054
numpy
false
replaceAll
public StrBuilder replaceAll(final StrMatcher matcher, final String replaceStr) { return replace(matcher, replaceStr, 0, size, -1); }
Replaces all matches within the builder with the replace string. <p> Matchers can be used to perform advanced replace behavior. For example you could write a matcher to replace all occurrences where the character 'a' is followed by a number. </p> @param matcher the matcher to use to find the deletion, null causes no action @param replaceStr the replace string, null is equivalent to an empty string @return {@code this} instance.
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
2,615
[ "matcher", "replaceStr" ]
StrBuilder
true
1
6.64
apache/commons-lang
2,896
javadoc
false
_replace_with_mask
def _replace_with_mask( cls, values: pa.Array | pa.ChunkedArray, mask: npt.NDArray[np.bool_] | bool, replacements: ArrayLike | Scalar, ) -> pa.Array | pa.ChunkedArray: """ Replace items selected with a mask. Analogous to pyarrow.compute.replace_with_mask, with logic to fallback to numpy for unsupported types. Parameters ---------- values : pa.Array or pa.ChunkedArray mask : npt.NDArray[np.bool_] or bool replacements : ArrayLike or Scalar Replacement value(s) Returns ------- pa.Array or pa.ChunkedArray """ if isinstance(replacements, pa.ChunkedArray): # replacements must be array or scalar, not ChunkedArray replacements = replacements.combine_chunks() if isinstance(values, pa.ChunkedArray) and pa.types.is_boolean(values.type): # GH#52059 replace_with_mask segfaults for chunked array # https://github.com/apache/arrow/issues/34634 values = values.combine_chunks() try: return pc.replace_with_mask(values, mask, replacements) except pa.ArrowNotImplementedError: pass if isinstance(replacements, pa.Array): replacements = np.array(replacements, dtype=object) elif isinstance(replacements, pa.Scalar): replacements = replacements.as_py() result = np.array(values, dtype=object) result[mask] = replacements return pa.array(result, type=values.type, from_pandas=is_nan_na())
Replace items selected with a mask. Analogous to pyarrow.compute.replace_with_mask, with logic to fallback to numpy for unsupported types. Parameters ---------- values : pa.Array or pa.ChunkedArray mask : npt.NDArray[np.bool_] or bool replacements : ArrayLike or Scalar Replacement value(s) Returns ------- pa.Array or pa.ChunkedArray
python
pandas/core/arrays/arrow/array.py
2,545
[ "cls", "values", "mask", "replacements" ]
pa.Array | pa.ChunkedArray
true
6
6.4
pandas-dev/pandas
47,362
numpy
false
describe_pipeline_exec
def describe_pipeline_exec(self, pipeline_exec_arn: str, verbose: bool = False): """ Get info about a SageMaker pipeline execution. .. seealso:: - :external+boto3:py:meth:`SageMaker.Client.describe_pipeline_execution` - :external+boto3:py:meth:`SageMaker.Client.list_pipeline_execution_steps` :param pipeline_exec_arn: arn of the pipeline execution :param verbose: Whether to log details about the steps status in the pipeline execution """ if verbose: res = self.conn.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_exec_arn) count_by_state = Counter(s["StepStatus"] for s in res["PipelineExecutionSteps"]) running_steps = [ s["StepName"] for s in res["PipelineExecutionSteps"] if s["StepStatus"] == "Executing" ] self.log.info("state of the pipeline steps: %s", count_by_state) self.log.info("steps currently in progress: %s", running_steps) return self.conn.describe_pipeline_execution(PipelineExecutionArn=pipeline_exec_arn)
Get info about a SageMaker pipeline execution. .. seealso:: - :external+boto3:py:meth:`SageMaker.Client.describe_pipeline_execution` - :external+boto3:py:meth:`SageMaker.Client.list_pipeline_execution_steps` :param pipeline_exec_arn: arn of the pipeline execution :param verbose: Whether to log details about the steps status in the pipeline execution
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
1,079
[ "self", "pipeline_exec_arn", "verbose" ]
true
2
6.08
apache/airflow
43,597
sphinx
false
config_list
def config_list(**configs): """Generate configs based on the list of input shapes. This function will take input shapes specified in a list from user. Besides that, all other parameters will be cross produced first and each of the generated list will be merged with the input shapes list. Reserved Args: attr_names(reserved): a list of names for input shapes. attrs(reserved): a list of values for each input shape. corss_product: a dictionary of attributes which will be cross produced with the input shapes. tags(reserved): a tag used to filter inputs. Here is an example: attrs = [ [1, 2], [4, 5], ], attr_names = ['M', 'N'], cross_product_configs={ 'device': ['cpu', 'cuda'], }, we will generate [[{'M': 1}, {'N' : 2}, {'device' : 'cpu'}], [{'M': 1}, {'N' : 2}, {'device' : 'cuda'}], [{'M': 4}, {'N' : 5}, {'device' : 'cpu'}], [{'M': 4}, {'N' : 5}, {'device' : 'cuda'}]] """ generated_configs = [] reserved_names = ["attrs", "attr_names", "tags"] if any(attr not in configs for attr in reserved_names): raise ValueError("Missing attrs in configs") _validate(configs) cross_configs = None if "cross_product_configs" in configs: cross_configs = cross_product_configs(**configs["cross_product_configs"]) for inputs in configs["attrs"]: tmp_result = [ {configs["attr_names"][i]: input_value} for i, input_value in enumerate(inputs) ] # TODO(mingzhe0908): # If multiple 'tags' were provided, do they get concat? # If a config has both ['short', 'medium'], it should match # both 'short' and 'medium' tag-filter? tmp_result.append({"tags": "_".join(configs["tags"])}) if cross_configs: generated_configs += [tmp_result + list(config) for config in cross_configs] else: generated_configs.append(tmp_result) return generated_configs
Generate configs based on the list of input shapes. This function will take input shapes specified in a list from user. Besides that, all other parameters will be cross produced first and each of the generated list will be merged with the input shapes list. Reserved Args: attr_names(reserved): a list of names for input shapes. attrs(reserved): a list of values for each input shape. corss_product: a dictionary of attributes which will be cross produced with the input shapes. tags(reserved): a tag used to filter inputs. Here is an example: attrs = [ [1, 2], [4, 5], ], attr_names = ['M', 'N'], cross_product_configs={ 'device': ['cpu', 'cuda'], }, we will generate [[{'M': 1}, {'N' : 2}, {'device' : 'cpu'}], [{'M': 1}, {'N' : 2}, {'device' : 'cuda'}], [{'M': 4}, {'N' : 5}, {'device' : 'cpu'}], [{'M': 4}, {'N' : 5}, {'device' : 'cuda'}]]
python
benchmarks/operator_benchmark/benchmark_utils.py
134
[]
false
6
6.24
pytorch/pytorch
96,034
unknown
false
orFrom
public Source<T> orFrom(Supplier<? extends @Nullable T> fallback) { Assert.notNull(fallback, "'fallback' must not be null"); Supplier<@Nullable T> supplier = () -> { T value = getValue(); return (value != null) ? value : fallback.get(); }; return new Source<>(supplier, this.predicate); }
Return a source that will use the given supplier to obtain a fallback value to use in place of {@code null}. @param fallback the fallback supplier @return a new {@link Source} instance @since 4.0.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
172
[ "fallback" ]
true
2
8.24
spring-projects/spring-boot
79,428
javadoc
false
fill
public static byte[] fill(final byte[] a, final byte val) { if (a != null) { Arrays.fill(a, val); } return a; }
Fills and returns the given array, assigning the given {@code byte} value to each element of the array. @param a the array to be filled (may be null). @param val the value to be stored in all elements of the array. @return the given array. @see Arrays#fill(byte[],byte)
java
src/main/java/org/apache/commons/lang3/ArrayFill.java
56
[ "a", "val" ]
true
2
8.08
apache/commons-lang
2,896
javadoc
false
pipe
def pipe( obj: _T, func: Callable[Concatenate[_T, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any, ) -> T: """ Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, str) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the object. *args : iterable, optional Positional arguments passed into ``func``. **kwargs : dict, optional A dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. """ if isinstance(func, tuple): # Assigning to func_ so pyright understands that it's a callable func_, target = func if target in kwargs: msg = f"{target} is both the pipe target and a keyword argument" raise ValueError(msg) kwargs[target] = obj return func_(*args, **kwargs) else: return func(obj, *args, **kwargs)
Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, str) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the object. *args : iterable, optional Positional arguments passed into ``func``. **kwargs : dict, optional A dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``.
python
pandas/core/common.py
490
[ "obj", "func" ]
T
true
4
6.88
pandas-dev/pandas
47,362
numpy
false
pullAll
function pullAll(array, values) { return (array && array.length && values && values.length) ? basePullAll(array, values) : array; }
This method is like `_.pull` except that it accepts an array of values to remove. **Note:** Unlike `_.difference`, this method mutates `array`. @static @memberOf _ @since 4.0.0 @category Array @param {Array} array The array to modify. @param {Array} values The values to remove. @returns {Array} Returns `array`. @example var array = ['a', 'b', 'c', 'a', 'b', 'c']; _.pullAll(array, ['a', 'c']); console.log(array); // => ['b', 'b']
javascript
lodash.js
7,823
[ "array", "values" ]
false
5
7.52
lodash/lodash
61,490
jsdoc
false
pausedPartitions
public synchronized Set<TopicPartition> pausedPartitions() { return collectPartitions(TopicPartitionState::isPaused); }
@return True if subscribed using RE2J pattern. False otherwise.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
393
[]
true
1
6.48
apache/kafka
31,560
javadoc
false
owners
public DescribeDelegationTokenOptions owners(List<KafkaPrincipal> owners) { this.owners = owners; return this; }
If owners is null, all the user owned tokens and tokens where user have Describe permission will be returned. @param owners The owners that we want to describe delegation tokens for @return this instance
java
clients/src/main/java/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java
36
[ "owners" ]
DescribeDelegationTokenOptions
true
1
6.96
apache/kafka
31,560
javadoc
false
getGenericParameterTypes
@Override Type[] getGenericParameterTypes() { Type[] types = constructor.getGenericParameterTypes(); if (types.length > 0 && mayNeedHiddenThis()) { Class<?>[] rawParamTypes = constructor.getParameterTypes(); if (types.length == rawParamTypes.length && rawParamTypes[0] == getDeclaringClass().getEnclosingClass()) { // first parameter is the hidden 'this' return Arrays.copyOfRange(types, 1, types.length); } } return types; }
If the class is parameterized, such as {@link java.util.ArrayList ArrayList}, this returns {@code ArrayList<E>}.
java
android/guava/src/com/google/common/reflect/Invokable.java
434
[]
true
5
6.56
google/guava
51,352
javadoc
false
asJarArchiveEntry
private JarArchiveEntry asJarArchiveEntry(ZipEntry entry) throws ZipException { if (entry instanceof JarArchiveEntry jarArchiveEntry) { return jarArchiveEntry; } return new JarArchiveEntry(entry); }
Create a new {@link JarWriter} instance. @param file the file to write @param lastModifiedTime an optional last modified time to apply to the written entries @throws IOException if the file cannot be opened @throws FileNotFoundException if the file cannot be found @since 4.0.0
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/JarWriter.java
85
[ "entry" ]
JarArchiveEntry
true
2
6.72
spring-projects/spring-boot
79,428
javadoc
false
getWildcardDirectoryFromSpec
function getWildcardDirectoryFromSpec(spec: string, useCaseSensitiveFileNames: boolean): { key: CanonicalKey; path: string; flags: WatchDirectoryFlags; } | undefined { const match = wildcardDirectoryPattern.exec(spec); if (match) { // We check this with a few `indexOf` calls because 3 `indexOf`/`lastIndexOf` calls is // less algorithmically complex (roughly O(3n) worst-case) than the regex we used to use, // \/[^/]*?[*?][^/]*\/ which was polynominal in v8, since arbitrary sequences of wildcard // characters could match any of the central patterns, resulting in bad backtracking. const questionWildcardIndex = spec.indexOf("?"); const starWildcardIndex = spec.indexOf("*"); const lastDirectorySeperatorIndex = spec.lastIndexOf(directorySeparator); return { key: toCanonicalKey(match[0], useCaseSensitiveFileNames), path: match[0], flags: (questionWildcardIndex !== -1 && questionWildcardIndex < lastDirectorySeperatorIndex) || (starWildcardIndex !== -1 && starWildcardIndex < lastDirectorySeperatorIndex) ? WatchDirectoryFlags.Recursive : WatchDirectoryFlags.None, }; } if (isImplicitGlob(spec.substring(spec.lastIndexOf(directorySeparator) + 1))) { const path = removeTrailingDirectorySeparator(spec); return { key: toCanonicalKey(path, useCaseSensitiveFileNames), path, flags: WatchDirectoryFlags.Recursive, }; } return undefined; }
Gets directories in a set of include patterns that should be watched for changes.
typescript
src/compiler/commandLineParser.ts
4,151
[ "spec", "useCaseSensitiveFileNames" ]
true
7
6
microsoft/TypeScript
107,154
jsdoc
false
reinstall_if_different_sources
def reinstall_if_different_sources(airflow_sources: Path) -> bool: """ Prints warning if detected airflow sources are not the ones that Breeze was installed with. :param airflow_sources: source for airflow code that we are operating on :return: True if warning was printed. """ installation_airflow_sources = get_installation_airflow_sources() if installation_airflow_sources and airflow_sources != installation_airflow_sources: reinstall_breeze(airflow_sources / "dev" / "breeze") return True return False
Prints warning if detected airflow sources are not the ones that Breeze was installed with. :param airflow_sources: source for airflow code that we are operating on :return: True if warning was printed.
python
dev/breeze/src/airflow_breeze/utils/path_utils.py
148
[ "airflow_sources" ]
bool
true
3
8.24
apache/airflow
43,597
sphinx
false
toObject
public static Boolean[] toObject(final boolean[] array) { if (array == null) { return null; } if (array.length == 0) { return EMPTY_BOOLEAN_OBJECT_ARRAY; } return setAll(new Boolean[array.length], i -> array[i] ? Boolean.TRUE : Boolean.FALSE); }
Converts an array of primitive booleans to objects. <p>This method returns {@code null} for a {@code null} input array.</p> @param array a {@code boolean} array. @return a {@link Boolean} array, {@code null} if null array input.
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
8,672
[ "array" ]
true
4
8.24
apache/commons-lang
2,896
javadoc
false
prepare_docker_build_cache_command
def prepare_docker_build_cache_command( image_params: CommonBuildParams, ) -> list[str]: """ Constructs docker build_cache command based on the parameters passed. :param image_params: parameters of the image :return: Command to run as list of string """ final_command = [] final_command.extend(["docker"]) final_command.extend( ["buildx", "build", "--builder", get_and_use_docker_context(image_params.builder), "--progress=auto"] ) final_command.extend(image_params.common_docker_build_flags) final_command.extend(["--pull"]) final_command.extend(image_params.prepare_arguments_for_docker_build_command()) final_command.extend(["--target", "main", "."]) final_command.extend( ["-f", "Dockerfile" if isinstance(image_params, BuildProdParams) else "Dockerfile.ci"] ) final_command.extend(["--platform", image_params.platform]) final_command.extend( [f"--cache-to=type=registry,ref={image_params.get_cache(image_params.platform)},mode=max"] ) return final_command
Constructs docker build_cache command based on the parameters passed. :param image_params: parameters of the image :return: Command to run as list of string
python
dev/breeze/src/airflow_breeze/utils/docker_command_utils.py
364
[ "image_params" ]
list[str]
true
2
7.76
apache/airflow
43,597
sphinx
false
applyAsInt
int applyAsInt(int left, int right) throws E;
Applies this operator to the given operands. @param left the first operand @param right the second operand @return the operator result @throws E if the operation fails
java
src/main/java/org/apache/commons/lang3/function/FailableIntBinaryOperator.java
39
[ "left", "right" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
toArray
public String[] toArray() { return toList().toArray(ArrayUtils.EMPTY_STRING_ARRAY); }
Returns a new {@code String[]} containing the tokenizer elements. @return a new {@code String[]}.
java
src/main/java/org/apache/commons/lang3/util/IterableStringTokenizer.java
91
[]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
getBean
@Override public <T> T getBean(Class<T> requiredType) throws BeansException { String[] beanNames = getBeanNamesForType(requiredType); if (beanNames.length == 1) { return getBean(beanNames[0], requiredType); } else if (beanNames.length > 1) { throw new NoUniqueBeanDefinitionException(requiredType, beanNames); } else { throw new NoSuchBeanDefinitionException(requiredType); } }
Add a new singleton bean. <p>Will overwrite any existing instance for the given name. @param name the name of the bean @param bean the bean instance
java
spring-beans/src/main/java/org/springframework/beans/factory/support/StaticListableBeanFactory.java
170
[ "requiredType" ]
T
true
3
6.88
spring-projects/spring-framework
59,386
javadoc
false
descendingEntryIterator
@Override Iterator<Entry<Cut<C>, Range<C>>> descendingEntryIterator() { Collection<Range<C>> candidates; if (upperBoundWindow.hasUpperBound()) { candidates = rangesByLowerBound .headMap(upperBoundWindow.upperEndpoint(), false) .descendingMap() .values(); } else { candidates = rangesByLowerBound.descendingMap().values(); } PeekingIterator<Range<C>> backingItr = peekingIterator(candidates.iterator()); if (backingItr.hasNext() && upperBoundWindow.upperBound.isLessThan(backingItr.peek().upperBound)) { backingItr.next(); } return new AbstractIterator<Entry<Cut<C>, Range<C>>>() { @Override protected @Nullable Entry<Cut<C>, Range<C>> computeNext() { if (!backingItr.hasNext()) { return endOfData(); } Range<C> range = backingItr.next(); return upperBoundWindow.lowerBound.isLessThan(range.upperBound) ? immutableEntry(range.upperBound, range) : endOfData(); } }; }
upperBoundWindow represents the headMap/subMap/tailMap view of the entire "ranges by upper bound" map; it's a constraint on the *keys*, and does not affect the values.
java
android/guava/src/com/google/common/collect/TreeRangeSet.java
403
[]
true
6
6.56
google/guava
51,352
javadoc
false
invoke
protected final <R> InvocationResult<R> invoke(C callbackInstance, Supplier<@Nullable R> supplier) { if (this.filter.match(this.callbackType, callbackInstance, this.argument, this.additionalArguments)) { try { return InvocationResult.of(supplier.get()); } catch (ClassCastException ex) { if (!isLambdaGenericProblem(ex)) { throw ex; } logNonMatchingType(callbackInstance, ex); } } return InvocationResult.noResult(); }
Use a specific filter to determine when a callback should apply. If no explicit filter is set filter will be attempted using the generic type on the callback type. @param filter the filter to use @return this instance @since 3.4.8
java
core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java
158
[ "callbackInstance", "supplier" ]
true
4
8.24
spring-projects/spring-boot
79,428
javadoc
false
resolveConfigVariables
private Map<String, ?> resolveConfigVariables(Map<String, ?> configProviderProps, Map<String, Object> originals) { Map<String, String> providerConfigString; Map<String, ?> configProperties; Predicate<String> classNameFilter; Map<String, Object> resolvedOriginals = new HashMap<>(); // As variable configs are strings, parse the originals and obtain the potential variable configs. Map<String, String> indirectVariables = extractPotentialVariables(originals); resolvedOriginals.putAll(originals); if (configProviderProps == null || configProviderProps.isEmpty()) { providerConfigString = indirectVariables; configProperties = originals; classNameFilter = automaticConfigProvidersFilter(); } else { providerConfigString = extractPotentialVariables(configProviderProps); configProperties = configProviderProps; classNameFilter = ignored -> true; } Map<String, ConfigProvider> providers = instantiateConfigProviders(providerConfigString, configProperties, classNameFilter); if (!providers.isEmpty()) { ConfigTransformer configTransformer = new ConfigTransformer(providers); ConfigTransformerResult result = configTransformer.transform(indirectVariables); if (!result.data().isEmpty()) { resolvedOriginals.putAll(result.data()); } } providers.values().forEach(x -> Utils.closeQuietly(x, "config provider")); return new ResolvingMap<>(resolvedOriginals, originals); }
Instantiates given list of config providers and fetches the actual values of config variables from the config providers. returns a map of config key and resolved values. @param configProviderProps The map of config provider configs @param originals The map of raw configs. @return map of resolved config variable.
java
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
535
[ "configProviderProps", "originals" ]
true
5
8.08
apache/kafka
31,560
javadoc
false
throwCause
private static Exception throwCause(Exception e, boolean combineStackTraces) throws Exception { Throwable cause = e.getCause(); if (cause == null) { throw e; } if (combineStackTraces) { StackTraceElement[] combined = ObjectArrays.concat(cause.getStackTrace(), e.getStackTrace(), StackTraceElement.class); cause.setStackTrace(combined); } if (cause instanceof Exception) { throw (Exception) cause; } if (cause instanceof Error) { throw (Error) cause; } // The cause is a weird kind of Throwable, so throw the outer exception. throw e; }
Creates a TimeLimiter instance using the given executor service to execute method calls. <p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills up, any time callers spend waiting for a thread may count toward their time limit, and in this case the call may even time out before the target method is ever invoked. @param executor the ExecutorService that will execute the method calls on the target objects; for example, a {@link Executors#newCachedThreadPool()}. @since 22.0
java
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
221
[ "e", "combineStackTraces" ]
Exception
true
5
6.88
google/guava
51,352
javadoc
false
toString
@Override public String toString() { MoreObjects.ToStringHelper s = MoreObjects.toStringHelper(this); if (initialCapacity != UNSET_INT) { s.add("initialCapacity", initialCapacity); } if (concurrencyLevel != UNSET_INT) { s.add("concurrencyLevel", concurrencyLevel); } if (keyStrength != null) { s.add("keyStrength", Ascii.toLowerCase(keyStrength.toString())); } if (valueStrength != null) { s.add("valueStrength", Ascii.toLowerCase(valueStrength.toString())); } if (keyEquivalence != null) { s.addValue("keyEquivalence"); } return s.toString(); }
Returns a string representation for this MapMaker instance. The exact form of the returned string is not specified.
java
android/guava/src/com/google/common/collect/MapMaker.java
294
[]
String
true
6
6.56
google/guava
51,352
javadoc
false
getSingletonInstance
private synchronized Object getSingletonInstance() { if (this.singletonInstance == null) { this.targetSource = freshTargetSource(); if (this.autodetectInterfaces && getProxiedInterfaces().length == 0 && !isProxyTargetClass()) { // Rely on AOP infrastructure to tell us what interfaces to proxy. Class<?> targetClass = getTargetClass(); if (targetClass == null) { throw new FactoryBeanNotInitializedException("Cannot determine target class for proxy"); } setInterfaces(ClassUtils.getAllInterfacesForClass(targetClass, this.proxyClassLoader)); } // Initialize the shared singleton instance. super.setFrozen(this.freezeProxy); this.singletonInstance = getProxy(createAopProxy()); } return this.singletonInstance; }
Return the singleton instance of this class's proxy object, lazily creating it if it hasn't been created already. @return the shared singleton proxy
java
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
300
[]
Object
true
6
8.08
spring-projects/spring-framework
59,386
javadoc
false
unwrapListeners
function unwrapListeners(arr) { const ret = arrayClone(arr); for (let i = 0; i < ret.length; ++i) { const orig = ret[i].listener; if (typeof orig === 'function') ret[i] = orig; } return ret; }
Returns an array listing the events for which the emitter has registered listeners. @returns {(string | symbol)[]}
javascript
lib/events.js
877
[ "arr" ]
false
3
6.08
nodejs/node
114,839
jsdoc
false
createJarFileForStream
private JarFile createJarFileForStream(InputStream in, Version version, Consumer<JarFile> closeAction) throws IOException { Path local = Files.createTempFile("jar_cache", null); try { Files.copy(in, local, StandardCopyOption.REPLACE_EXISTING); JarFile jarFile = new UrlJarFile(local.toFile(), version, closeAction); local.toFile().deleteOnExit(); return jarFile; } catch (Throwable ex) { deleteIfPossible(local, ex); throw ex; } }
Create a new {@link UrlJarFile} or {@link UrlNestedJarFile} instance. @param jarFileUrl the jar file URL @param closeAction the action to call when the file is closed @return a new {@link JarFile} instance @throws IOException on I/O error
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFileFactory.java
95
[ "in", "version", "closeAction" ]
JarFile
true
2
7.92
spring-projects/spring-boot
79,428
javadoc
false
addPropertyValue
public MutablePropertyValues addPropertyValue(PropertyValue pv) { for (int i = 0; i < this.propertyValueList.size(); i++) { PropertyValue currentPv = this.propertyValueList.get(i); if (currentPv.getName().equals(pv.getName())) { pv = mergeIfRequired(pv, currentPv); setPropertyValueAt(pv, i); return this; } } this.propertyValueList.add(pv); return this; }
Add a PropertyValue object, replacing any existing one for the corresponding property or getting merged with it (if applicable). @param pv the PropertyValue object to add @return this in order to allow for adding multiple property values in a chain
java
spring-beans/src/main/java/org/springframework/beans/MutablePropertyValues.java
173
[ "pv" ]
MutablePropertyValues
true
3
7.6
spring-projects/spring-framework
59,386
javadoc
false
on
static <T> Collection<UncheckedFuture<T>> on(final Collection<Future<T>> futures) { return map(futures).collect(Collectors.toList()); }
Maps the given instances as unchecked. @param <T> The result type returned by the Futures' {@link #get()} and {@link #get(long, TimeUnit)} methods. @param futures The Futures to uncheck. @return a new collection.
java
src/main/java/org/apache/commons/lang3/concurrent/UncheckedFuture.java
58
[ "futures" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
escape
public abstract String escape(String string);
Returns the escaped form of a given literal string. <p>Note that this method may treat input characters differently depending on the specific escaper implementation. <ul> <li>{@link UnicodeEscaper} handles <a href="http://en.wikipedia.org/wiki/UTF-16">UTF-16</a> correctly, including surrogate character pairs. If the input is badly formed the escaper should throw {@link IllegalArgumentException}. <li>{@link CharEscaper} handles Java characters independently and does not verify the input for well formed characters. A {@code CharEscaper} should not be used in situations where input is not guaranteed to be restricted to the Basic Multilingual Plane (BMP). </ul> @param string the literal string to be escaped @return the escaped form of {@code string} @throws NullPointerException if {@code string} is null @throws IllegalArgumentException if {@code string} contains badly formed UTF-16 or cannot be escaped for any other reason
java
android/guava/src/com/google/common/escape/Escaper.java
86
[ "string" ]
String
true
1
6.16
google/guava
51,352
javadoc
false
H
def H(self): """ Returns the (complex) conjugate transpose of `self`. Equivalent to ``np.transpose(self)`` if `self` is real-valued. Parameters ---------- None Returns ------- ret : matrix object complex conjugate transpose of `self` Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))) >>> z = x - 1j*x; z matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) >>> z.getH() matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], [ 1. +1.j, 5. +5.j, 9. +9.j], [ 2. +2.j, 6. +6.j, 10.+10.j], [ 3. +3.j, 7. +7.j, 11.+11.j]]) """ if issubclass(self.dtype.type, N.complexfloating): return self.transpose().conjugate() else: return self.transpose()
Returns the (complex) conjugate transpose of `self`. Equivalent to ``np.transpose(self)`` if `self` is real-valued. Parameters ---------- None Returns ------- ret : matrix object complex conjugate transpose of `self` Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))) >>> z = x - 1j*x; z matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) >>> z.getH() matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], [ 1. +1.j, 5. +5.j, 9. +9.j], [ 2. +2.j, 6. +6.j, 10.+10.j], [ 3. +3.j, 7. +7.j, 11.+11.j]])
python
numpy/matrixlib/defmatrix.py
976
[ "self" ]
false
3
7.68
numpy/numpy
31,054
numpy
false
_maybe_convert_platform_interval
def _maybe_convert_platform_interval(values) -> ArrayLike: """ Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array """ if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif not is_list_like(values) or isinstance(values, ABCDataFrame): # This will raise later, but we avoid passing to maybe_convert_platform return values elif isinstance(getattr(values, "dtype", None), CategoricalDtype): values = np.asarray(values) elif not hasattr(values, "dtype") and not isinstance(values, (list, tuple, range)): # TODO: should we just cast these to list? return values else: values = extract_array(values, extract_numpy=True) if not hasattr(values, "dtype"): values = np.asarray(values) if values.dtype.kind in "iu" and values.dtype != np.int64: values = values.astype(np.int64) return values
Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array
python
pandas/core/arrays/interval.py
2,162
[ "values" ]
ArrayLike
true
12
6.4
pandas-dev/pandas
47,362
numpy
false
unsplit
public void unsplit() { if (splitState != SplitState.SPLIT) { throw new IllegalStateException("Stopwatch has not been split."); } splitState = SplitState.UNSPLIT; splits.remove(splits.size() - 1); }
Removes the split. <p> This method clears the stop time. The start time is unaffected, enabling timing from the original start point to continue. </p> @throws IllegalStateException if this StopWatch has not been split.
java
src/main/java/org/apache/commons/lang3/time/StopWatch.java
831
[]
void
true
2
6.72
apache/commons-lang
2,896
javadoc
false
maybePropagateMetadataError
private void maybePropagateMetadataError() { try { metadata.maybeThrowAnyException(); } catch (Exception e) { if (notifyMetadataErrorsViaErrorQueue) { backgroundEventHandler.add(new ErrorEvent(e)); } else { metadataError = Optional.of(e); } } }
This method will try to send the unsent requests, poll for responses, and check the disconnected nodes. @param timeoutMs timeout time @param currentTimeMs current time @param onClose True when the network thread is closing.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
172
[]
void
true
3
6.72
apache/kafka
31,560
javadoc
false
processDatabase
private void processDatabase(final String name, final Checksum checksum, final CheckedSupplier<InputStream, IOException> source) { Metadata metadata = state.getDatabases().getOrDefault(name, Metadata.EMPTY); if (checksum.matches(metadata)) { updateTimestamp(name, metadata); return; } logger.debug("downloading database [{}]", name); long start = System.currentTimeMillis(); try (InputStream is = source.get()) { int firstChunk = metadata.lastChunk() + 1; // if there is no metadata, then Metadata.EMPTY + 1 = 0 Tuple<Integer, String> tuple = indexChunks(name, is, firstChunk, checksum, start); int lastChunk = tuple.v1(); String md5 = tuple.v2(); // the md5 of the bytes as they passed through indexChunks if (lastChunk > firstChunk) { // if there is a sha256 for this download, then record it (otherwise record null for it, which is also fine) String sha256 = checksum.type == Checksum.Type.SHA256 ? checksum.checksum : null; state = state.put(name, new Metadata(start, firstChunk, lastChunk - 1, md5, start, sha256)); updateTaskState(); logger.info("successfully downloaded database [{}]", name); deleteOldChunks(name, firstChunk); } } catch (Exception e) { logger.error(() -> "error downloading database [" + name + "]", e); } }
This method fetches the database file for the given database from the passed-in source, then indexes that database file into the .geoip_databases Elasticsearch index, deleting any old versions of the database from the index if they exist. @param name The name of the database to be downloaded and indexed into an Elasticsearch index @param checksum The checksum to compare to the computed checksum of the downloaded file @param source The supplier of an InputStream that will actually download the file
java
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java
277
[ "name", "checksum", "source" ]
void
true
5
6.72
elastic/elasticsearch
75,680
javadoc
false
doParse
@Override protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) { NamedNodeMap attributes = element.getAttributes(); for (int x = 0; x < attributes.getLength(); x++) { Attr attribute = (Attr) attributes.item(x); if (isEligibleAttribute(attribute, parserContext)) { String propertyName = extractPropertyName(attribute.getLocalName()); Assert.state(StringUtils.hasText(propertyName), "Illegal property name returned from 'extractPropertyName(String)': cannot be null or empty."); builder.addPropertyValue(propertyName, attribute.getValue()); } } postProcess(builder, element); }
Parse the supplied {@link Element} and populate the supplied {@link BeanDefinitionBuilder} as required. <p>This implementation maps any attributes present on the supplied element to {@link org.springframework.beans.PropertyValue} instances, and {@link BeanDefinitionBuilder#addPropertyValue(String, Object) adds them} to the {@link org.springframework.beans.factory.config.BeanDefinition builder}. <p>The {@link #extractPropertyName(String)} method is used to reconcile the name of an attribute with the name of a JavaBean property. @param element the XML element being parsed @param builder used to define the {@code BeanDefinition} @see #extractPropertyName(String)
java
spring-beans/src/main/java/org/springframework/beans/factory/xml/AbstractSimpleBeanDefinitionParser.java
126
[ "element", "parserContext", "builder" ]
void
true
3
6.24
spring-projects/spring-framework
59,386
javadoc
false
lastIndexOf
public int lastIndexOf(final StrMatcher matcher) { return lastIndexOf(matcher, size); }
Searches the string builder using the matcher to find the last match. <p> Matchers can be used to perform advanced searching behavior. For example you could write a matcher to find the character 'a' followed by a number. </p> @param matcher the matcher to use, null returns -1 @return the last index matched, or -1 if not found
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
2,375
[ "matcher" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
from_spmatrix
def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame: """ Create a new DataFrame from a scipy sparse matrix. Parameters ---------- data : scipy.sparse.spmatrix Must be convertible to csc format. index, columns : Index, optional Row and column labels to use for the resulting DataFrame. Defaults to a RangeIndex. Returns ------- DataFrame Each column of the DataFrame is stored as a :class:`arrays.SparseArray`. See Also -------- DataFrame.sparse.to_coo : Return the contents of the frame as a sparse SciPy COO matrix. Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.eye(3, dtype=int) >>> pd.DataFrame.sparse.from_spmatrix(mat) 0 1 2 0 1 0 0 1 0 1 0 2 0 0 1 """ from pandas._libs.sparse import IntIndex from pandas import DataFrame data = data.tocsc() index, columns = cls._prep_index(data, index, columns) n_rows, n_columns = data.shape # We need to make sure indices are sorted, as we create # IntIndex with no input validation (i.e. check_integrity=False ). # Indices may already be sorted in scipy in which case this adds # a small overhead. data.sort_indices() indices = data.indices indptr = data.indptr array_data = data.data dtype = SparseDtype(array_data.dtype) arrays = [] for i in range(n_columns): sl = slice(indptr[i], indptr[i + 1]) idx = IntIndex(n_rows, indices[sl], check_integrity=False) arr = SparseArray._simple_new(array_data[sl], idx, dtype) arrays.append(arr) return DataFrame._from_arrays( arrays, columns=columns, index=index, verify_integrity=False )
Create a new DataFrame from a scipy sparse matrix. Parameters ---------- data : scipy.sparse.spmatrix Must be convertible to csc format. index, columns : Index, optional Row and column labels to use for the resulting DataFrame. Defaults to a RangeIndex. Returns ------- DataFrame Each column of the DataFrame is stored as a :class:`arrays.SparseArray`. See Also -------- DataFrame.sparse.to_coo : Return the contents of the frame as a sparse SciPy COO matrix. Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.eye(3, dtype=int) >>> pd.DataFrame.sparse.from_spmatrix(mat) 0 1 2 0 1 0 0 1 0 1 0 2 0 0 1
python
pandas/core/arrays/sparse/accessor.py
309
[ "cls", "data", "index", "columns" ]
DataFrame
true
2
8.56
pandas-dev/pandas
47,362
numpy
false
multiple_chunks
def multiple_chunks(self, chunk_size=None): """ Return ``True`` if you can expect multiple chunks. NB: If a particular file representation is in memory, subclasses should always return ``False`` -- there's no good reason to read from memory in chunks. """ return self.size > (chunk_size or self.DEFAULT_CHUNK_SIZE)
Return ``True`` if you can expect multiple chunks. NB: If a particular file representation is in memory, subclasses should always return ``False`` -- there's no good reason to read from memory in chunks.
python
django/core/files/base.py
65
[ "self", "chunk_size" ]
false
2
6.08
django/django
86,204
unknown
false
load
static ZipLocalFileHeaderRecord load(DataBlock dataBlock, long pos) throws IOException { debug.log("Loading LocalFileHeaderRecord from position %s", pos); ByteBuffer buffer = ByteBuffer.allocate(MINIMUM_SIZE); buffer.order(ByteOrder.LITTLE_ENDIAN); dataBlock.readFully(buffer, pos); buffer.rewind(); if (buffer.getInt() != SIGNATURE) { throw new IOException("Zip 'Local File Header Record' not found at position " + pos); } return new ZipLocalFileHeaderRecord(buffer.getShort(), buffer.getShort(), buffer.getShort(), buffer.getShort(), buffer.getShort(), buffer.getInt(), buffer.getInt(), buffer.getInt(), buffer.getShort(), buffer.getShort()); }
Load the {@link ZipLocalFileHeaderRecord} from the given data block. @param dataBlock the source data block @param pos the position of the record @return a new {@link ZipLocalFileHeaderRecord} instance @throws IOException on I/O error
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipLocalFileHeaderRecord.java
111
[ "dataBlock", "pos" ]
ZipLocalFileHeaderRecord
true
2
7.44
spring-projects/spring-boot
79,428
javadoc
false
remainderIsDashes
private boolean remainderIsDashes(Elements elements, int element, int index) { if (elements.getType(element).isIndexed()) { return false; } int length = elements.getLength(element); do { char c = elements.charAt(element, index++); if (c != '-') { return false; } } while (index < length); return true; }
Returns {@code true} if this element is an ancestor (immediate or nested parent) of the specified name. @param name the name to check @return {@code true} if this name is an ancestor
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
512
[ "elements", "element", "index" ]
true
3
8.24
spring-projects/spring-boot
79,428
javadoc
false
iterator
@Override public Iterator<E> iterator() { Set<E> delegate = delegateOrNull(); if (delegate != null) { return delegate.iterator(); } return new Iterator<E>() { int expectedMetadata = metadata; int currentIndex = firstEntryIndex(); int indexToRemove = -1; @Override public boolean hasNext() { return currentIndex >= 0; } @Override @ParametricNullness public E next() { checkForConcurrentModification(); if (!hasNext()) { throw new NoSuchElementException(); } indexToRemove = currentIndex; E result = element(currentIndex); currentIndex = getSuccessor(currentIndex); return result; } @Override public void remove() { checkForConcurrentModification(); checkRemove(indexToRemove >= 0); incrementExpectedModCount(); CompactHashSet.this.remove(element(indexToRemove)); currentIndex = adjustAfterRemove(currentIndex, indexToRemove); indexToRemove = -1; } void incrementExpectedModCount() { expectedMetadata += CompactHashing.MODIFICATION_COUNT_INCREMENT; } private void checkForConcurrentModification() { if (metadata != expectedMetadata) { throw new ConcurrentModificationException(); } } }; }
Updates the index an iterator is pointing to after a call to remove: returns the index of the entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the index that *was* the next entry that would be looked at.
java
android/guava/src/com/google/common/collect/CompactHashSet.java
535
[]
true
4
6.4
google/guava
51,352
javadoc
false
rehydrateCachedInfo
function rehydrateCachedInfo(info: CachedSymbolExportInfo): SymbolExportInfo { if (info.symbol && info.moduleSymbol) return info as SymbolExportInfo; const { id, exportKind, targetFlags, isFromPackageJson, moduleFileName } = info; const [cachedSymbol, cachedModuleSymbol] = symbols.get(id) || emptyArray; if (cachedSymbol && cachedModuleSymbol) { return { symbol: cachedSymbol, moduleSymbol: cachedModuleSymbol, moduleFileName, exportKind, targetFlags, isFromPackageJson, }; } const checker = (isFromPackageJson ? host.getPackageJsonAutoImportProvider()! : host.getCurrentProgram()!).getTypeChecker(); const moduleSymbol = info.moduleSymbol || cachedModuleSymbol || Debug.checkDefined( info.moduleFile ? checker.getMergedSymbol(info.moduleFile.symbol) : checker.tryFindAmbientModule(info.moduleName), ); const symbol = info.symbol || cachedSymbol || Debug.checkDefined( exportKind === ExportKind.ExportEquals ? checker.resolveExternalModuleSymbol(moduleSymbol) : checker.tryGetMemberInModuleExportsAndProperties(unescapeLeadingUnderscores(info.symbolTableKey), moduleSymbol), `Could not find symbol '${info.symbolName}' by key '${info.symbolTableKey}' in module ${moduleSymbol.name}`, ); symbols.set(id, [symbol, moduleSymbol]); return { symbol, moduleSymbol, moduleFileName, exportKind, targetFlags, isFromPackageJson, }; }
Key: node_modules package name (no @types). Value: path to deepest node_modules folder seen that is both visible to `usableByFileName` and contains the package. Later, we can see if a given SymbolExportInfo is shadowed by a another installation of the same package in a deeper node_modules folder by seeing if its path starts with the value stored here.
typescript
src/services/exportInfoMap.ts
281
[ "info" ]
true
13
6.72
microsoft/TypeScript
107,154
jsdoc
false
size
public int size() { return size; }
Gets the length of the string builder. <p> This method is the same as {@link #length()} and is provided to match the API of Collections. </p> @return the length
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
2,851
[]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
atLeast
public boolean atLeast(final JavaVersion requiredVersion) { return this.value >= requiredVersion.value; }
Tests whether this version of Java is at least the version of Java passed in. <p> For example: </p> <pre> {@code myVersion.atLeast(JavaVersion.JAVA_1_8) }</pre> @param requiredVersion the version to check against, not null. @return true if this version is equal to or greater than the specified version.
java
src/main/java/org/apache/commons/lang3/JavaVersion.java
380
[ "requiredVersion" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
buildWiringInfo
protected BeanWiringInfo buildWiringInfo(Object beanInstance, Configurable annotation) { if (!Autowire.NO.equals(annotation.autowire())) { // Autowiring by name or by type return new BeanWiringInfo(annotation.autowire().value(), annotation.dependencyCheck()); } else if (!annotation.value().isEmpty()) { // Explicitly specified bean name for bean definition to take property values from return new BeanWiringInfo(annotation.value(), false); } else { // Default bean name for bean definition to take property values from return new BeanWiringInfo(getDefaultBeanName(beanInstance), true); } }
Build the {@link BeanWiringInfo} for the given {@link Configurable} annotation. @param beanInstance the bean instance @param annotation the Configurable annotation found on the bean class @return the resolved BeanWiringInfo
java
spring-beans/src/main/java/org/springframework/beans/factory/annotation/AnnotationBeanWiringInfoResolver.java
54
[ "beanInstance", "annotation" ]
BeanWiringInfo
true
3
7.44
spring-projects/spring-framework
59,386
javadoc
false
nonNull
public static <E> Stream<E> nonNull(final Collection<E> collection) { return of(collection).filter(Objects::nonNull); }
Streams the non-null elements of a collection. @param <E> the type of elements in the collection. @param collection the collection to stream or null. @return A non-null stream that filters out null elements. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/stream/Streams.java
626
[ "collection" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
get_window_bounds
def get_window_bounds( self, num_values: int = 0, min_periods: int | None = None, center: bool | None = None, closed: str | None = None, step: int | None = None, ) -> tuple[np.ndarray, np.ndarray]: """ Computes the bounds of a window. Parameters ---------- num_values : int, default 0 number of values that will be aggregated over window_size : int, default 0 the number of rows in a window min_periods : int, default None min_periods passed from the top level rolling API center : bool, default None center passed from the top level rolling API closed : str, default None closed passed from the top level rolling API step : int, default None step passed from the top level rolling API win_type : str, default None win_type passed from the top level rolling API Returns ------- A tuple of ndarray[int64]s, indicating the boundaries of each window """ if center: raise ValueError("Forward-looking windows can't have center=True") if closed is not None: raise ValueError( "Forward-looking windows don't support setting the closed argument" ) if step is None: step = 1 start = np.arange(0, num_values, step, dtype="int64") end = start + self.window_size if self.window_size: end = np.clip(end, 0, num_values) return start, end
Computes the bounds of a window. Parameters ---------- num_values : int, default 0 number of values that will be aggregated over window_size : int, default 0 the number of rows in a window min_periods : int, default None min_periods passed from the top level rolling API center : bool, default None center passed from the top level rolling API closed : str, default None closed passed from the top level rolling API step : int, default None step passed from the top level rolling API win_type : str, default None win_type passed from the top level rolling API Returns ------- A tuple of ndarray[int64]s, indicating the boundaries of each window
python
pandas/core/indexers/objects.py
480
[ "self", "num_values", "min_periods", "center", "closed", "step" ]
tuple[np.ndarray, np.ndarray]
true
5
6.4
pandas-dev/pandas
47,362
numpy
false
intToHexDigitMsb0
public static char intToHexDigitMsb0(final int nibble) { switch (nibble) { case 0x0: return '0'; case 0x1: return '8'; case 0x2: return '4'; case 0x3: return 'c'; case 0x4: return '2'; case 0x5: return 'a'; case 0x6: return '6'; case 0x7: return 'e'; case 0x8: return '1'; case 0x9: return '9'; case 0xA: return '5'; case 0xB: return 'd'; case 0xC: return '3'; case 0xD: return 'b'; case 0xE: return '7'; case 0xF: return 'f'; default: throw new IllegalArgumentException("nibble value not between 0 and 15: " + nibble); } }
Converts the 4 LSB of an int to a hexadecimal digit encoded using the MSB0 bit ordering. <p> 0 returns '0' </p> <p> 1 returns '8' </p> <p> 10 returns '5' and so on... </p> @param nibble the 4 bits to convert. @return a hexadecimal digit representing the 4 LSB of {@code nibble}. @throws IllegalArgumentException if {@code nibble < 0} or {@code nibble > 15}.
java
src/main/java/org/apache/commons/lang3/Conversion.java
1,005
[ "nibble" ]
true
1
6.88
apache/commons-lang
2,896
javadoc
false
idxmax
def idxmax( self, skipna: bool = True, numeric_only: bool = False, ) -> DataFrame: """ Return index of first occurrence of maximum in each group. Parameters ---------- skipna : bool, default True Exclude NA values. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- DataFrame Indexes of maxima in each column according to the group. Raises ------ ValueError When there are no valid values for a group. Then can happen if: * There is an unobserved group and ``observed=False``. * All values for a group are NA. * Some values for a group are NA and ``skipna=False``. .. versionchanged:: 3.0.0 Previously if all values for a group are NA or some values for a group are NA and ``skipna=False``, this method would return NA. Now it raises instead. See Also -------- Series.idxmax : Return index of the maximum element. DataFrame.idxmax : Indexes of maxima along the specified axis. Notes ----- This method is the DataFrame version of ``ndarray.argmax``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame( ... { ... "consumption": [10.51, 103.11, 55.48], ... "co2_emissions": [37.2, 19.66, 1712], ... "food_type": ["meat", "plant", "meat"], ... }, ... index=["Pork", "Wheat Products", "Beef"], ... ) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the maximum value in each column according to the group. >>> df.groupby("food_type").idxmax() consumption co2_emissions food_type animal Beef Beef plant Wheat Products Wheat Products """ return self._idxmax_idxmin("idxmax", numeric_only=numeric_only, skipna=skipna)
Return index of first occurrence of maximum in each group. Parameters ---------- skipna : bool, default True Exclude NA values. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- DataFrame Indexes of maxima in each column according to the group. Raises ------ ValueError When there are no valid values for a group. Then can happen if: * There is an unobserved group and ``observed=False``. * All values for a group are NA. * Some values for a group are NA and ``skipna=False``. .. versionchanged:: 3.0.0 Previously if all values for a group are NA or some values for a group are NA and ``skipna=False``, this method would return NA. Now it raises instead. See Also -------- Series.idxmax : Return index of the maximum element. DataFrame.idxmax : Indexes of maxima along the specified axis. Notes ----- This method is the DataFrame version of ``ndarray.argmax``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame( ... { ... "consumption": [10.51, 103.11, 55.48], ... "co2_emissions": [37.2, 19.66, 1712], ... "food_type": ["meat", "plant", "meat"], ... }, ... index=["Pork", "Wheat Products", "Beef"], ... ) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the maximum value in each column according to the group. >>> df.groupby("food_type").idxmax() consumption co2_emissions food_type animal Beef Beef plant Wheat Products Wheat Products
python
pandas/core/groupby/generic.py
2,991
[ "self", "skipna", "numeric_only" ]
DataFrame
true
1
7.04
pandas-dev/pandas
47,362
numpy
false
categories
def categories(self) -> Index: """ The categories of this categorical. Setting assigns new values to each category (effectively a rename of each individual category). The assigned value has to be a list-like object. All items must be unique and the number of items in the new categories must be the same as the number of items in the old categories. Raises ------ ValueError If the new categories do not validate as categories or if the number of new categories is unequal the number of old categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- For :class:`pandas.Series`: >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category") >>> ser.cat.categories Index(['a', 'b', 'c'], dtype='str') >>> raw_cat = pd.Categorical([None, "b", "c", None], categories=["b", "c", "d"]) >>> ser = pd.Series(raw_cat) >>> ser.cat.categories Index(['b', 'c', 'd'], dtype='str') For :class:`pandas.Categorical`: >>> cat = pd.Categorical(["a", "b"], ordered=True) >>> cat.categories Index(['a', 'b'], dtype='str') For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(["a", "c", "b", "a", "c", "b"]) >>> ci.categories Index(['a', 'b', 'c'], dtype='str') >>> ci = pd.CategoricalIndex(["a", "c"], categories=["c", "b", "a"]) >>> ci.categories Index(['c', 'b', 'a'], dtype='str') """ return self.dtype.categories
The categories of this categorical. Setting assigns new values to each category (effectively a rename of each individual category). The assigned value has to be a list-like object. All items must be unique and the number of items in the new categories must be the same as the number of items in the old categories. Raises ------ ValueError If the new categories do not validate as categories or if the number of new categories is unequal the number of old categories See Also -------- rename_categories : Rename categories. reorder_categories : Reorder categories. add_categories : Add new categories. remove_categories : Remove the specified categories. remove_unused_categories : Remove categories which are not used. set_categories : Set the categories to the specified ones. Examples -------- For :class:`pandas.Series`: >>> ser = pd.Series(["a", "b", "c", "a"], dtype="category") >>> ser.cat.categories Index(['a', 'b', 'c'], dtype='str') >>> raw_cat = pd.Categorical([None, "b", "c", None], categories=["b", "c", "d"]) >>> ser = pd.Series(raw_cat) >>> ser.cat.categories Index(['b', 'c', 'd'], dtype='str') For :class:`pandas.Categorical`: >>> cat = pd.Categorical(["a", "b"], ordered=True) >>> cat.categories Index(['a', 'b'], dtype='str') For :class:`pandas.CategoricalIndex`: >>> ci = pd.CategoricalIndex(["a", "c", "b", "a", "c", "b"]) >>> ci.categories Index(['a', 'b', 'c'], dtype='str') >>> ci = pd.CategoricalIndex(["a", "c"], categories=["c", "b", "a"]) >>> ci.categories Index(['c', 'b', 'a'], dtype='str')
python
pandas/core/arrays/categorical.py
793
[ "self" ]
Index
true
1
6.64
pandas-dev/pandas
47,362
unknown
false
listGroups
default ListGroupsResult listGroups() { return listGroups(new ListGroupsOptions()); }
List the groups available in the cluster with the default options. <p>This is a convenience method for {@link #listGroups(ListGroupsOptions)} with default options. See the overload for more details. @return The ListGroupsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
1,070
[]
ListGroupsResult
true
1
6.32
apache/kafka
31,560
javadoc
false
substituteElementAccessExpression
function substituteElementAccessExpression(node: ElementAccessExpression) { if (node.expression.kind === SyntaxKind.SuperKeyword) { return createSuperElementAccessInAsyncMethod( node.argumentExpression, node, ); } return node; }
Hooks node substitutions. @param hint The context for the emitter. @param node The node to substitute.
typescript
src/compiler/transformers/es2018.ts
1,424
[ "node" ]
false
2
6.08
microsoft/TypeScript
107,154
jsdoc
false
visitForOfStatement
function visitForOfStatement(node: ForOfStatement): VisitResult<Statement> { if (isVariableDeclarationList(node.initializer) && !(node.initializer.flags & NodeFlags.BlockScoped)) { const exportStatements = appendExportsOfVariableDeclarationList(/*statements*/ undefined, node.initializer, /*isForInOrOfInitializer*/ true); const initializer = visitNode(node.initializer, discardedValueVisitor, isForInitializer); const expression = visitNode(node.expression, visitor, isExpression); let body = visitIterationBody(node.statement, topLevelNestedVisitor, context); if (some(exportStatements)) { body = isBlock(body) ? factory.updateBlock(body, [...exportStatements, ...body.statements]) : factory.createBlock([...exportStatements, body], /*multiLine*/ true); } return factory.updateForOfStatement(node, node.awaitModifier, initializer, expression, body); } return factory.updateForOfStatement( node, node.awaitModifier, visitNode(node.initializer, discardedValueVisitor, isForInitializer), visitNode(node.expression, visitor, isExpression), visitIterationBody(node.statement, topLevelNestedVisitor, context), ); }
Visits the body of a ForOfStatement to hoist declarations. @param node The node to visit.
typescript
src/compiler/transformers/module/module.ts
958
[ "node" ]
true
5
6.56
microsoft/TypeScript
107,154
jsdoc
false
findSingleMainClass
public static @Nullable String findSingleMainClass(File rootDirectory, @Nullable String annotationName) throws IOException { SingleMainClassCallback callback = new SingleMainClassCallback(annotationName); MainClassFinder.doWithMainClasses(rootDirectory, callback); return callback.getMainClassName(); }
Find a single main class from the given {@code rootDirectory}. A main class annotated with an annotation with the given {@code annotationName} will be preferred over a main class with no such annotation. @param rootDirectory the root directory to search @param annotationName the name of the annotation that may be present on the main class @return the main class or {@code null} @throws IOException if the directory cannot be read
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/MainClassFinder.java
111
[ "rootDirectory", "annotationName" ]
String
true
1
6.56
spring-projects/spring-boot
79,428
javadoc
false
isIterateeCall
function isIterateeCall(value, index, object) { if (!isObject(object)) { return false; } var type = typeof index; if (type == 'number' ? (isArrayLike(object) && isIndex(index, object.length)) : (type == 'string' && index in object) ) { return eq(object[index], value); } return false; }
Checks if the given arguments are from an iteratee call. @private @param {*} value The potential iteratee value argument. @param {*} index The potential iteratee index or key argument. @param {*} object The potential iteratee object argument. @returns {boolean} Returns `true` if the arguments are from an iteratee call, else `false`.
javascript
lodash.js
6,383
[ "value", "index", "object" ]
false
6
6.08
lodash/lodash
61,490
jsdoc
false
eye
def eye(n, M=None, k=0, dtype=float, order='C'): """ Return a matrix with ones on the diagonal and zeros elsewhere. Parameters ---------- n : int Number of rows in the output. M : int, optional Number of columns in the output, defaults to `n`. k : int, optional Index of the diagonal: 0 refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : dtype, optional Data-type of the returned matrix. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- I : matrix A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- numpy.eye : Equivalent array function. identity : Square identity matrix. Examples -------- >>> import numpy.matlib >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]]) """ return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
Return a matrix with ones on the diagonal and zeros elsewhere. Parameters ---------- n : int Number of rows in the output. M : int, optional Number of columns in the output, defaults to `n`. k : int, optional Index of the diagonal: 0 refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : dtype, optional Data-type of the returned matrix. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. Returns ------- I : matrix A `n` x `M` matrix where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- numpy.eye : Equivalent array function. identity : Square identity matrix. Examples -------- >>> import numpy.matlib >>> np.matlib.eye(3, k=1, dtype=np.float64) matrix([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]])
python
numpy/matlib.py
191
[ "n", "M", "k", "dtype", "order" ]
false
1
6.32
numpy/numpy
31,054
numpy
false
count
def count(self): """ Compute count of group, excluding missing values. Returns ------- Series or DataFrame Count of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").count() 2023-01-01 2 2023-02-01 2 Freq: MS, dtype: int64 """ result = self._downsample("count") if not len(self.ax): if self._selected_obj.ndim == 1: result = type(self._selected_obj)( [], index=result.index, dtype="int64", name=self._selected_obj.name ) else: from pandas import DataFrame result = DataFrame( [], index=result.index, columns=result.columns, dtype="int64" ) return result
Compute count of group, excluding missing values. Returns ------- Series or DataFrame Count of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 >>> ser.resample("MS").count() 2023-01-01 2 2023-02-01 2 Freq: MS, dtype: int64
python
pandas/core/resample.py
1,845
[ "self" ]
false
4
7.68
pandas-dev/pandas
47,362
unknown
false
_validate_or_indexify_columns
def _validate_or_indexify_columns( content: list[np.ndarray], columns: Index | None ) -> Index: """ If columns is None, make numbers as column names; Otherwise, validate that columns have valid length. Parameters ---------- content : list of np.ndarrays columns : Index or None Returns ------- Index If columns is None, assign positional column index value as columns. Raises ------ 1. AssertionError when content is not composed of list of lists, and if length of columns is not equal to length of content. 2. ValueError when content is list of lists, but length of each sub-list is not equal 3. ValueError when content is list of lists, but length of sub-list is not equal to length of content """ if columns is None: columns = default_index(len(content)) else: # Add mask for data which is composed of list of lists is_mi_list = isinstance(columns, list) and all( isinstance(col, list) for col in columns ) if not is_mi_list and len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... raise AssertionError( f"{len(columns)} columns passed, passed data had {len(content)} columns" ) if is_mi_list: # check if nested list column, length of each sub-list should be equal if len({len(col) for col in columns}) > 1: raise ValueError( "Length of columns passed for MultiIndex columns is different" ) # if columns is not empty and length of sublist is not equal to content if columns and len(columns[0]) != len(content): raise ValueError( f"{len(columns[0])} columns passed, passed data had " f"{len(content)} columns" ) return columns
If columns is None, make numbers as column names; Otherwise, validate that columns have valid length. Parameters ---------- content : list of np.ndarrays columns : Index or None Returns ------- Index If columns is None, assign positional column index value as columns. Raises ------ 1. AssertionError when content is not composed of list of lists, and if length of columns is not equal to length of content. 2. ValueError when content is list of lists, but length of each sub-list is not equal 3. ValueError when content is list of lists, but length of sub-list is not equal to length of content
python
pandas/core/internals/construction.py
894
[ "content", "columns" ]
Index
true
10
7.04
pandas-dev/pandas
47,362
numpy
false
getBeanName
public static String getBeanName(MethodInvocation mi) throws IllegalStateException { if (!(mi instanceof ProxyMethodInvocation pmi)) { throw new IllegalArgumentException("MethodInvocation is not a Spring ProxyMethodInvocation: " + mi); } String beanName = (String) pmi.getUserAttribute(BEAN_NAME_ATTRIBUTE); if (beanName == null) { throw new IllegalStateException("Cannot get bean name; not set on MethodInvocation: " + mi); } return beanName; }
Find the bean name for the given invocation. Assumes that an ExposeBeanNameAdvisor has been included in the interceptor chain. @param mi the MethodInvocation that should contain the bean name as an attribute @return the bean name (never {@code null}) @throws IllegalStateException if the bean name has not been exposed
java
spring-aop/src/main/java/org/springframework/aop/interceptor/ExposeBeanNameAdvisors.java
70
[ "mi" ]
String
true
3
7.76
spring-projects/spring-framework
59,386
javadoc
false
prepare_import
def prepare_import(path: str) -> str: """Given a filename this will try to calculate the python path, add it to the search path and return the actual module name that is expected. """ path = os.path.realpath(path) fname, ext = os.path.splitext(path) if ext == ".py": path = fname if os.path.basename(path) == "__init__": path = os.path.dirname(path) module_name = [] # move up until outside package structure (no __init__.py) while True: path, name = os.path.split(path) module_name.append(name) if not os.path.exists(os.path.join(path, "__init__.py")): break if sys.path[0] != path: sys.path.insert(0, path) return ".".join(module_name[::-1])
Given a filename this will try to calculate the python path, add it to the search path and return the actual module name that is expected.
python
src/flask/cli.py
200
[ "path" ]
str
true
6
6
pallets/flask
70,946
unknown
false
h3Rotate60ccw
public static long h3Rotate60ccw(long h) { for (int r = 1, res = H3_get_resolution(h); r <= res; r++) { h = H3_set_index_digit(h, r, CoordIJK.rotate60ccw(H3_get_index_digit(h, r))); } return h; }
Rotate an H3Index 60 degrees counter-clockwise. @param h The H3Index.
java
libs/h3/src/main/java/org/elasticsearch/h3/H3Index.java
303
[ "h" ]
true
2
6.56
elastic/elasticsearch
75,680
javadoc
false
threadNamePrefix
public ThreadPoolTaskSchedulerBuilder threadNamePrefix(@Nullable String threadNamePrefix) { return new ThreadPoolTaskSchedulerBuilder(this.poolSize, this.awaitTermination, this.awaitTerminationPeriod, threadNamePrefix, this.taskDecorator, this.customizers); }
Set the prefix to use for the names of newly created threads. @param threadNamePrefix the thread name prefix to set @return a new builder instance
java
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskSchedulerBuilder.java
116
[ "threadNamePrefix" ]
ThreadPoolTaskSchedulerBuilder
true
1
6.64
spring-projects/spring-boot
79,428
javadoc
false
_prepared
def _prepared(self, tasks, partial_args, group_id, root_id, app, CallableSignature=abstract.CallableSignature, from_dict=Signature.from_dict, isinstance=isinstance, tuple=tuple): """Recursively unroll the group into a generator of its tasks. This is used by :meth:`apply_async` and :meth:`apply` to unroll the group into a list of tasks that can be evaluated. Note: This does not change the group itself, it only returns a generator of the tasks that the group would evaluate to. Arguments: tasks (list): List of tasks in the group (may contain nested groups). partial_args (list): List of arguments to be prepended to the arguments of each task. group_id (str): The group id of the group. root_id (str): The root id of the group. app (Celery): The Celery app instance. CallableSignature (class): The signature class of the group's tasks. from_dict (fun): Function to create a signature from a dict. isinstance (fun): Function to check if an object is an instance of a class. tuple (class): A tuple-like class. Returns: generator: A generator for the unrolled group tasks. The generator yields tuples of the form ``(task, AsyncResult, group_id)``. """ for index, task in enumerate(tasks): if isinstance(task, CallableSignature): # local sigs are always of type Signature, and we # clone them to make sure we don't modify the originals. task = task.clone() else: # serialized sigs must be converted to Signature. task = from_dict(task, app=app) if isinstance(task, group): # needs yield_from :( unroll = task._prepared( task.tasks, partial_args, group_id, root_id, app, ) yield from unroll else: if partial_args and not task.immutable: task.args = tuple(partial_args) + tuple(task.args) yield task, task.freeze(group_id=group_id, root_id=root_id, group_index=index), group_id
Recursively unroll the group into a generator of its tasks. This is used by :meth:`apply_async` and :meth:`apply` to unroll the group into a list of tasks that can be evaluated. Note: This does not change the group itself, it only returns a generator of the tasks that the group would evaluate to. Arguments: tasks (list): List of tasks in the group (may contain nested groups). partial_args (list): List of arguments to be prepended to the arguments of each task. group_id (str): The group id of the group. root_id (str): The root id of the group. app (Celery): The Celery app instance. CallableSignature (class): The signature class of the group's tasks. from_dict (fun): Function to create a signature from a dict. isinstance (fun): Function to check if an object is an instance of a class. tuple (class): A tuple-like class. Returns: generator: A generator for the unrolled group tasks. The generator yields tuples of the form ``(task, AsyncResult, group_id)``.
python
celery/canvas.py
1,693
[ "self", "tasks", "partial_args", "group_id", "root_id", "app", "CallableSignature", "from_dict", "isinstance", "tuple" ]
false
8
7.44
celery/celery
27,741
google
false
optJSONArray
public JSONArray optJSONArray(String name) { Object object = opt(name); return object instanceof JSONArray ? (JSONArray) object : null; }
Returns the value mapped by {@code name} if it exists and is a {@code JSONArray}. Returns null otherwise. @param name the name of the property @return the value or {@code null}
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
612
[ "name" ]
JSONArray
true
2
8
spring-projects/spring-boot
79,428
javadoc
false
buildOrThrow
@Override public ImmutableBiMap<K, V> buildOrThrow() { switch (size) { case 0: return of(); case 1: // requireNonNull is safe because the first `size` elements have been filled in. Entry<K, V> onlyEntry = requireNonNull(entries[0]); return of(onlyEntry.getKey(), onlyEntry.getValue()); default: /* * If entries is full, or if hash flooding is detected, then this implementation may end * up using the entries array directly and writing over the entry objects with * non-terminal entries, but this is safe; if this Builder is used further, it will grow * the entries array (so it can't affect the original array), and future build() calls * will always copy any entry objects that cannot be safely reused. */ if (valueComparator != null) { if (entriesUsed) { entries = Arrays.copyOf(entries, size); } sort( (Entry<K, V>[]) entries, // Entries up to size are not null 0, size, Ordering.from(valueComparator).onResultOf(Entry::getValue)); } entriesUsed = true; return RegularImmutableBiMap.fromEntryArray(size, entries); } }
Returns a newly-created immutable bimap, or throws an exception if any key or value was added more than once. The iteration order of the returned bimap is the order in which entries were inserted into the builder, unless {@link #orderEntriesByValue} was called, in which case entries are sorted by value. @throws IllegalArgumentException if duplicate keys or values were added @since 31.0
java
guava/src/com/google/common/collect/ImmutableBiMap.java
451
[]
true
3
6.88
google/guava
51,352
javadoc
false
negate
public static Boolean negate(final Boolean bool) { if (bool == null) { return null; } return bool.booleanValue() ? Boolean.FALSE : Boolean.TRUE; }
Negates the specified boolean. <p>If {@code null} is passed in, {@code null} will be returned.</p> <p>NOTE: This returns {@code null} and will throw a {@link NullPointerException} if unboxed to a boolean.</p> <pre> BooleanUtils.negate(Boolean.TRUE) = Boolean.FALSE; BooleanUtils.negate(Boolean.FALSE) = Boolean.TRUE; BooleanUtils.negate(null) = null; </pre> @param bool the Boolean to negate, may be null @return the negated Boolean, or {@code null} if {@code null} input
java
src/main/java/org/apache/commons/lang3/BooleanUtils.java
262
[ "bool" ]
Boolean
true
3
7.76
apache/commons-lang
2,896
javadoc
false
isExcluded
private boolean isExcluded(EndpointId endpointId) { if (this.exclude.isEmpty()) { return false; } return this.exclude.matches(endpointId); }
Return {@code true} if the filter matches. @param endpointId the endpoint ID to check @return {@code true} if the filter matches @since 2.6.0
java
module/spring-boot-actuator-autoconfigure/src/main/java/org/springframework/boot/actuate/autoconfigure/endpoint/expose/IncludeExcludeEndpointFilter.java
136
[ "endpointId" ]
true
2
7.92
spring-projects/spring-boot
79,428
javadoc
false
allDescriptions
public KafkaFuture<Map<Integer, Map<String, LogDirDescription>>> allDescriptions() { return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture<?>[0])). thenApply(v -> { Map<Integer, Map<String, LogDirDescription>> descriptions = new HashMap<>(futures.size()); for (Map.Entry<Integer, KafkaFuture<Map<String, LogDirDescription>>> entry : futures.entrySet()) { try { descriptions.put(entry.getKey(), entry.getValue().get()); } catch (InterruptedException | ExecutionException e) { // This should be unreachable, because allOf ensured that all the futures completed successfully. throw new RuntimeException(e); } } return descriptions; }); }
Return a future which succeeds only if all the brokers have responded without error. The result of the future is a map from brokerId to a map from broker log directory path to a description of that log directory.
java
clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java
50
[]
true
2
6.72
apache/kafka
31,560
javadoc
false
secondsFrac
public double secondsFrac() { return ((double) nanos()) / C3; }
@return the number of {@link #timeUnit()} units this value contains
java
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
186
[]
true
1
6
elastic/elasticsearch
75,680
javadoc
false
nop
@SuppressWarnings("unchecked") static <E extends Throwable> FailableDoubleToIntFunction<E> nop() { return NOP; }
Gets the NOP singleton. @param <E> The kind of thrown exception or error. @return The NOP singleton.
java
src/main/java/org/apache/commons/lang3/function/FailableDoubleToIntFunction.java
41
[]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
_generate_strided_index
def _generate_strided_index(self, index: sympy.Expr) -> str: """ Generate JAX code to compute an index array for strided/complex indexing patterns. For expressions like `2 * x3 + 32 * x2 + 256 * x1 + 1024 * x0`, we generate code that computes the flattened index array using broadcasting. The iteration variables (x0, x1, x2, x3) are already defined as jnp.arange arrays in the kernel. We just need to convert the sympy expression to JAX code. """ # Get iteration variables iter_vars = OrderedSet(self.range_tree_nodes.keys()) free_symbols = index.free_symbols # Check that all free symbols are iteration variables (no indirect vars) used_vars = free_symbols & iter_vars if used_vars != free_symbols: raise Unsupported( f"Pallas backend does not yet support mixed index pattern: {index}" ) # Convert sympy expression to Python/JAX code string # The iteration variables are already defined as jnp.arange arrays index_str = self.kexpr(index) # Mark this as requiring flatten access return index_str
Generate JAX code to compute an index array for strided/complex indexing patterns. For expressions like `2 * x3 + 32 * x2 + 256 * x1 + 1024 * x0`, we generate code that computes the flattened index array using broadcasting. The iteration variables (x0, x1, x2, x3) are already defined as jnp.arange arrays in the kernel. We just need to convert the sympy expression to JAX code.
python
torch/_inductor/codegen/pallas.py
984
[ "self", "index" ]
str
true
2
6
pytorch/pytorch
96,034
unknown
false
replaceWithNullptr
static void replaceWithNullptr(ClangTidyCheck &Check, SourceManager &SM, SourceLocation StartLoc, SourceLocation EndLoc) { const CharSourceRange Range(SourceRange(StartLoc, EndLoc), true); // Add a space if nullptr follows an alphanumeric character. This happens // whenever there is an c-style explicit cast to nullptr not surrounded by // parentheses and right beside a return statement. const SourceLocation PreviousLocation = StartLoc.getLocWithOffset(-1); const bool NeedsSpace = isAlphanumeric(*SM.getCharacterData(PreviousLocation)); Check.diag(Range.getBegin(), "use nullptr") << FixItHint::CreateReplacement( Range, NeedsSpace ? " nullptr" : "nullptr"); }
Returns true if and only if a replacement was made.
cpp
clang-tools-extra/clang-tidy/modernize/UseNullptrCheck.cpp
93
[ "StartLoc", "EndLoc" ]
true
2
6
llvm/llvm-project
36,021
doxygen
false
replace
public static <V> String replace(final Object source, final Map<String, V> valueMap, final String prefix, final String suffix) { return new StrSubstitutor(valueMap, prefix, suffix).replace(source); }
Replaces all the occurrences of variables in the given source object with their matching values from the map. This method allows to specify a custom variable prefix and suffix. @param <V> the type of the values in the map. @param source the source text containing the variables to substitute, null returns null. @param valueMap the map with the values, may be null. @param prefix the prefix of variables, not null. @param suffix the suffix of variables, not null. @return the result of the replace operation. @throws IllegalArgumentException if the prefix or suffix is null.
java
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
194
[ "source", "valueMap", "prefix", "suffix" ]
String
true
1
6.8
apache/commons-lang
2,896
javadoc
false
from
static SslManagerBundle from(@Nullable SslStoreBundle storeBundle, @Nullable SslBundleKey key) { return new DefaultSslManagerBundle(storeBundle, key); }
Factory method to create a new {@link SslManagerBundle} backed by the given {@link SslBundle} and {@link SslBundleKey}. @param storeBundle the SSL store bundle @param key the key reference @return a new {@link SslManagerBundle} instance
java
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslManagerBundle.java
125
[ "storeBundle", "key" ]
SslManagerBundle
true
1
6.48
spring-projects/spring-boot
79,428
javadoc
false
replaceEachRepeatedly
public static String replaceEachRepeatedly(final String text, final String[] searchList, final String[] replacementList) { final int timeToLive = Math.max(ArrayUtils.getLength(searchList), DEFAULT_TTL); return replaceEach(text, searchList, replacementList, true, timeToLive); }
Replaces all occurrences of Strings within another String. <p> A {@code null} reference passed to this method is a no-op, or if any "search string" or "string to replace" is null, that replace will be ignored. </p> <pre> StringUtils.replaceEachRepeatedly(null, *, *) = null StringUtils.replaceEachRepeatedly("", *, *) = "" StringUtils.replaceEachRepeatedly("aba", null, null) = "aba" StringUtils.replaceEachRepeatedly("aba", new String[0], null) = "aba" StringUtils.replaceEachRepeatedly("aba", null, new String[0]) = "aba" StringUtils.replaceEachRepeatedly("aba", new String[]{"a"}, null) = "aba" StringUtils.replaceEachRepeatedly("aba", new String[]{"a"}, new String[]{""}) = "b" StringUtils.replaceEachRepeatedly("aba", new String[]{null}, new String[]{"a"}) = "aba" StringUtils.replaceEachRepeatedly("abcde", new String[]{"ab", "d"}, new String[]{"w", "t"}) = "wcte" (example of how it repeats) StringUtils.replaceEachRepeatedly("abcde", new String[]{"ab", "d"}, new String[]{"d", "t"}) = "tcte" StringUtils.replaceEachRepeatedly("abcde", new String[]{"ab", "d"}, new String[]{"d", "ab"}) = Throws {@link IllegalStateException} </pre> @param text text to search and replace in, no-op if null. @param searchList the Strings to search for, no-op if null. @param replacementList the Strings to replace them with, no-op if null. @return the text with any replacements processed, {@code null} if null String input. @throws IllegalStateException if the search is repeating and there is an endless loop due to outputs of one being inputs to another. @throws IllegalArgumentException if the lengths of the arrays are not the same (null is ok, and/or size 0). @since 2.4
java
src/main/java/org/apache/commons/lang3/StringUtils.java
6,549
[ "text", "searchList", "replacementList" ]
String
true
1
6.48
apache/commons-lang
2,896
javadoc
false
ignore_warnings
def ignore_warnings(obj=None, category=Warning): """Context manager and decorator to ignore warnings. Note: Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging, this is not your tool of choice. Parameters ---------- obj : callable, default=None callable where you want to ignore the warnings. category : warning class, default=Warning The category to filter. If Warning, all categories will be muted. Examples -------- >>> import warnings >>> from sklearn.utils._testing import ignore_warnings >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if isinstance(obj, type) and issubclass(obj, Warning): # Avoid common pitfall of passing category as the first positional # argument which result in the test not being run warning_name = obj.__name__ raise ValueError( "'obj' should be a callable where you want to ignore warnings. " "You passed a warning class instead: 'obj={warning_name}'. " "If you want to pass a warning class to ignore_warnings, " "you should use 'category={warning_name}'".format(warning_name=warning_name) ) elif callable(obj): return _IgnoreWarnings(category=category)(obj) else: return _IgnoreWarnings(category=category)
Context manager and decorator to ignore warnings. Note: Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging, this is not your tool of choice. Parameters ---------- obj : callable, default=None callable where you want to ignore the warnings. category : warning class, default=Warning The category to filter. If Warning, all categories will be muted. Examples -------- >>> import warnings >>> from sklearn.utils._testing import ignore_warnings >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42
python
sklearn/utils/_testing.py
70
[ "obj", "category" ]
false
5
7.36
scikit-learn/scikit-learn
64,340
numpy
false
mergeIfPossible
@SuppressWarnings("unchecked") private static void mergeIfPossible(Map<String, Object> source, MutablePropertySources sources, Map<String, Object> resultingSource) { PropertySource<?> existingSource = sources.get(NAME); if (existingSource != null) { Object underlyingSource = existingSource.getSource(); if (underlyingSource instanceof Map) { resultingSource.putAll((Map<String, Object>) underlyingSource); } resultingSource.putAll(source); } }
Add a new {@link DefaultPropertiesPropertySource} or merge with an existing one. @param source the {@code Map} source @param sources the existing sources @since 2.4.4
java
core/spring-boot/src/main/java/org/springframework/boot/env/DefaultPropertiesPropertySource.java
100
[ "source", "sources", "resultingSource" ]
void
true
3
6.88
spring-projects/spring-boot
79,428
javadoc
false
isBinaryOpContext
function isBinaryOpContext(context: FormattingContext): boolean { switch (context.contextNode.kind) { case SyntaxKind.BinaryExpression: return (context.contextNode as BinaryExpression).operatorToken.kind !== SyntaxKind.CommaToken; case SyntaxKind.ConditionalExpression: case SyntaxKind.ConditionalType: case SyntaxKind.AsExpression: case SyntaxKind.ExportSpecifier: case SyntaxKind.ImportSpecifier: case SyntaxKind.TypePredicate: case SyntaxKind.UnionType: case SyntaxKind.IntersectionType: case SyntaxKind.SatisfiesExpression: return true; // equals in binding elements: function foo([[x, y] = [1, 2]]) case SyntaxKind.BindingElement: // equals in type X = ... // falls through case SyntaxKind.TypeAliasDeclaration: // equal in import a = module('a'); // falls through case SyntaxKind.ImportEqualsDeclaration: // equal in export = 1 // falls through case SyntaxKind.ExportAssignment: // equal in let a = 0 // falls through case SyntaxKind.VariableDeclaration: // equal in p = 0 // falls through case SyntaxKind.Parameter: case SyntaxKind.EnumMember: case SyntaxKind.PropertyDeclaration: case SyntaxKind.PropertySignature: return context.currentTokenSpan.kind === SyntaxKind.EqualsToken || context.nextTokenSpan.kind === SyntaxKind.EqualsToken; // "in" keyword in for (let x in []) { } case SyntaxKind.ForInStatement: // "in" keyword in [P in keyof T]: T[P] // falls through case SyntaxKind.TypeParameter: return context.currentTokenSpan.kind === SyntaxKind.InKeyword || context.nextTokenSpan.kind === SyntaxKind.InKeyword || context.currentTokenSpan.kind === SyntaxKind.EqualsToken || context.nextTokenSpan.kind === SyntaxKind.EqualsToken; // Technically, "of" is not a binary operator, but format it the same way as "in" case SyntaxKind.ForOfStatement: return context.currentTokenSpan.kind === SyntaxKind.OfKeyword || context.nextTokenSpan.kind === SyntaxKind.OfKeyword; } return false; }
A rule takes a two tokens (left/right) and a particular context for which you're meant to look at them. You then declare what should the whitespace annotation be between these tokens via the action param. @param debugName Name to print @param left The left side of the comparison @param right The right side of the comparison @param context A set of filters to narrow down the space in which this formatter rule applies @param action a declaration of the expected whitespace @param flags whether the rule deletes a line or not, defaults to no-op
typescript
src/services/formatting/rules.ts
501
[ "context" ]
true
6
6.4
microsoft/TypeScript
107,154
jsdoc
false
detectRuntime
function detectRuntime(): RuntimeName { // Note: we're currently not taking 'fastly' into account. Why? const runtimeChecks = [ [isNetlify, 'netlify'], [isEdgeLight, 'edge-light'], [isWorkerd, 'workerd'], [isDeno, 'deno'], [isBun, 'bun'], [isNode, 'node'], ] as const const detectedRuntime = runtimeChecks // TODO: Transforming destructuring to the configured target environment ('chrome58', 'edge16', 'firefox57', 'safari11') is not supported yet, // so we can't write the following code yet: // ``` // .flatMap(([isCurrentRuntime, runtime]) => isCurrentRuntime() ? [runtime] : []) // ``` .flatMap((check) => (check[0]() ? [check[1]] : [])) .at(0) ?? '' return detectedRuntime }
Indicates if running in Cloudflare Workers runtime. See: https://developers.cloudflare.com/workers/runtime-apis/web-standards/#navigatoruseragent
typescript
packages/client/src/runtime/utils/getRuntime.ts
37
[]
true
2
6.56
prisma/prisma
44,834
jsdoc
false
onEmitNode
function onEmitNode(hint: EmitHint, node: Node, emitCallback: (hint: EmitHint, node: Node) => void) { if (enabledSubstitutions & ES2015SubstitutionFlags.CapturedThis && isFunctionLike(node)) { // If we are tracking a captured `this`, keep track of the enclosing function. const ancestorFacts = enterSubtree( HierarchyFacts.FunctionExcludes, getEmitFlags(node) & EmitFlags.CapturesThis ? HierarchyFacts.FunctionIncludes | HierarchyFacts.CapturesThis : HierarchyFacts.FunctionIncludes, ); previousOnEmitNode(hint, node, emitCallback); exitSubtree(ancestorFacts, HierarchyFacts.None, HierarchyFacts.None); return; } previousOnEmitNode(hint, node, emitCallback); }
Called by the printer just before a node is printed. @param hint A hint as to the intended usage of the node. @param node The node to be printed. @param emitCallback The callback used to emit the node.
typescript
src/compiler/transformers/es2015.ts
4,855
[ "hint", "node", "emitCallback" ]
false
4
6.08
microsoft/TypeScript
107,154
jsdoc
false
elementIterator
@Override Iterator<E> elementIterator() { throw new AssertionError("should never be called"); }
Sets the number of occurrences of {@code element} to {@code newCount}, but only if the count is currently {@code expectedOldCount}. If {@code element} does not appear in the multiset exactly {@code expectedOldCount} times, no changes will be made. @return {@code true} if the change was successful. This usually indicates that the multiset has been modified, but not always: in the case that {@code expectedOldCount == newCount}, the method will return {@code true} if the condition was met. @throws IllegalArgumentException if {@code expectedOldCount} or {@code newCount} is negative
java
android/guava/src/com/google/common/collect/ConcurrentHashMultiset.java
502
[]
true
1
6.64
google/guava
51,352
javadoc
false
weakKeys
@GwtIncompatible // java.lang.ref.WeakReference @CanIgnoreReturnValue public CacheBuilder<K, V> weakKeys() { return setKeyStrength(Strength.WEAK); }
Specifies that each key (not value) stored in the cache should be wrapped in a {@link WeakReference} (by default, strong references are used). <p><b>Warning:</b> when this method is used, the resulting cache will use identity ({@code ==}) comparison to determine equality of keys. Its {@link Cache#asMap} view will therefore technically violate the {@link Map} specification (in the same way that {@link IdentityHashMap} does). <p>Entries with keys that have been garbage collected may be counted in {@link Cache#size}, but will never be visible to read or write operations; such entries are cleaned up as part of the routine maintenance described in the class javadoc. @return this {@code CacheBuilder} instance (for chaining) @throws IllegalStateException if the key strength was already set
java
android/guava/src/com/google/common/cache/CacheBuilder.java
626
[]
true
1
6.72
google/guava
51,352
javadoc
false
try_finally
def try_finally( self, code_options: dict[str, Any], cleanup: list[Instruction] ) -> list[Instruction]: """ Codegen based off of: load args enter context try: (rest) finally: exit context """ # NOTE: we assume that TOS is a context manager CLASS! load_args = [] if self.target_values: load_args = [create_load_const(val) for val in self.target_values] ctx_name = unique_id(f"___context_manager_{self.stack_index}") if ctx_name not in code_options["co_varnames"]: code_options["co_varnames"] += (ctx_name,) for name in ["__enter__", "__exit__"]: if name not in code_options["co_names"]: code_options["co_names"] += (name,) create_ctx: list[Instruction] = [] _initial_push_null(create_ctx) create_ctx.extend( [ *load_args, *create_call_function(len(load_args), False), create_instruction("STORE_FAST", argval=ctx_name), ] ) def _template(ctx: AbstractContextManager[Any], dummy: Any) -> None: ctx.__enter__() try: dummy finally: ctx.__exit__(None, None, None) setup_try_finally, epilogue = _bytecode_from_template_with_split( _template, self.stack_index, varname_map={"ctx": ctx_name} ) cleanup[:] = epilogue + cleanup return create_ctx + setup_try_finally
Codegen based off of: load args enter context try: (rest) finally: exit context
python
torch/_dynamo/resume_execution.py
145
[ "self", "code_options", "cleanup" ]
list[Instruction]
true
5
6.4
pytorch/pytorch
96,034
unknown
false
run
@SuppressWarnings("ShortCircuitBoolean") @Override public void run() { Thread currentThread = Thread.currentThread(); if (currentThread != submitting) { /* * requireNonNull is safe because we set `task` before submitting this Runnable to an * Executor, and we don't null it out until here. */ Runnable localTask = requireNonNull(task); task = null; localTask.run(); return; } // Executor called reentrantly! Make sure that further calls don't overflow stack. Further // reentrant calls will see that their current thread is the same as the one set in // latestTaskQueue, and queue rather than calling execute() directly. ThreadConfinedTaskQueue executingTaskQueue = new ThreadConfinedTaskQueue(); executingTaskQueue.thread = currentThread; /* * requireNonNull is safe because we don't null out `sequencer` except: * * - after the requireNonNull call below. (And this object has its Runnable.run override * called only once, just as it has its Executor.execute override called only once.) * * - if we return immediately from `execute` (in which case we never get here) * * - in the "reentrant submit" case of `execute` (in which case we must have started running a * user task -- which means that we already got past this code (or else we exited early * above)) */ // Unconditionally set; there is no risk of throwing away a queued task from another thread, // because in order for the current task to run on this executor the previous task must have // already started execution. Because each task on a TaskNonReentrantExecutor can only produce // one execute() call to another instance from the same ExecutionSequencer, we know by // induction that the task that launched this one must not have added any other runnables to // that thread's queue, and thus we cannot be replacing a TaskAndThread object that would // otherwise have another task queued on to it. Note the exception to this, cancellation, is // specially handled in execute() - execute() calls triggered by cancellation are no-ops, and // thus don't count. requireNonNull(sequencer).latestTaskQueue = executingTaskQueue; sequencer = null; try { // requireNonNull is safe, as discussed above. Runnable localTask = requireNonNull(task); task = null; localTask.run(); // Now check if our task attempted to reentrantly execute the next task. Runnable queuedTask; Executor queuedExecutor; // Intentionally using non-short-circuit operator while ((queuedTask = executingTaskQueue.nextTask) != null && (queuedExecutor = executingTaskQueue.nextExecutor) != null) { executingTaskQueue.nextTask = null; executingTaskQueue.nextExecutor = null; queuedExecutor.execute(queuedTask); } } finally { // Null out the thread field, so that we don't leak a reference to Thread, and so that // future `thread == currentThread()` calls from this thread don't incorrectly queue instead // of executing. Don't null out the latestTaskQueue field, because the work done here // may have scheduled more operations on another thread, and if those operations then // trigger reentrant calls that thread will have updated the latestTaskQueue field, and // we'd be interfering with their operation. executingTaskQueue.thread = null; } }
Thread that called execute(). Set in execute, cleared when delegate.execute() returns.
java
android/guava/src/com/google/common/util/concurrent/ExecutionSequencer.java
378
[]
void
true
4
6.64
google/guava
51,352
javadoc
false
afterPropertiesSet
@Override public void afterPropertiesSet() { if (this.name == null) { this.name = this.beanName; } if (this.group == null) { this.group = Scheduler.DEFAULT_GROUP; } if (this.jobDetail != null) { this.jobDataMap.put("jobDetail", this.jobDetail); } if (this.startDelay > 0 || this.startTime == null) { this.startTime = new Date(System.currentTimeMillis() + this.startDelay); } SimpleTriggerImpl sti = new SimpleTriggerImpl(); sti.setName(this.name != null ? this.name : toString()); sti.setGroup(this.group); if (this.jobDetail != null) { sti.setJobKey(this.jobDetail.getKey()); } sti.setJobDataMap(this.jobDataMap); sti.setStartTime(this.startTime); sti.setRepeatInterval(this.repeatInterval); sti.setRepeatCount(this.repeatCount); sti.setPriority(this.priority); sti.setMisfireInstruction(this.misfireInstruction); sti.setDescription(this.description); this.simpleTrigger = sti; }
Associate a textual description with this trigger.
java
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SimpleTriggerFactoryBean.java
238
[]
void
true
8
6.4
spring-projects/spring-framework
59,386
javadoc
false
setArgumentNamesFromStringArray
public void setArgumentNamesFromStringArray(@Nullable String... argumentNames) { this.argumentNames = new String[argumentNames.length]; for (int i = 0; i < argumentNames.length; i++) { String argumentName = argumentNames[i]; this.argumentNames[i] = argumentName != null ? argumentName.strip() : null; if (!isVariableName(this.argumentNames[i])) { throw new IllegalArgumentException( "'argumentNames' property of AbstractAspectJAdvice contains an argument name '" + this.argumentNames[i] + "' that is not a valid Java identifier"); } } if (this.aspectJAdviceMethod.getParameterCount() == this.argumentNames.length + 1) { // May need to add implicit join point arg name... for (int i = 0; i < this.aspectJAdviceMethod.getParameterCount(); i++) { Class<?> argType = this.aspectJAdviceMethod.getParameterTypes()[i]; if (argType == JoinPoint.class || argType == ProceedingJoinPoint.class || argType == JoinPoint.StaticPart.class) { @Nullable String[] oldNames = this.argumentNames; this.argumentNames = new String[oldNames.length + 1]; System.arraycopy(oldNames, 0, this.argumentNames, 0, i); this.argumentNames[i] = "THIS_JOIN_POINT"; System.arraycopy(oldNames, i, this.argumentNames, i + 1, oldNames.length - i); break; } } } }
Set by the creator of this advice object if the argument names are known. <p>This could be for example because they have been explicitly specified in XML or in an advice annotation. @param argumentNames list of argument names
java
spring-aop/src/main/java/org/springframework/aop/aspectj/AbstractAspectJAdvice.java
262
[]
void
true
9
6.72
spring-projects/spring-framework
59,386
javadoc
false
dumps
def dumps(obj: t.Any, **kwargs: t.Any) -> str: """Serialize data as JSON. If :data:`~flask.current_app` is available, it will use its :meth:`app.json.dumps() <flask.json.provider.JSONProvider.dumps>` method, otherwise it will use :func:`json.dumps`. :param obj: The data to serialize. :param kwargs: Arguments passed to the ``dumps`` implementation. .. versionchanged:: 2.3 The ``app`` parameter was removed. .. versionchanged:: 2.2 Calls ``current_app.json.dumps``, allowing an app to override the behavior. .. versionchanged:: 2.0.2 :class:`decimal.Decimal` is supported by converting to a string. .. versionchanged:: 2.0 ``encoding`` will be removed in Flask 2.1. .. versionchanged:: 1.0.3 ``app`` can be passed directly, rather than requiring an app context for configuration. """ if current_app: return current_app.json.dumps(obj, **kwargs) kwargs.setdefault("default", _default) return _json.dumps(obj, **kwargs)
Serialize data as JSON. If :data:`~flask.current_app` is available, it will use its :meth:`app.json.dumps() <flask.json.provider.JSONProvider.dumps>` method, otherwise it will use :func:`json.dumps`. :param obj: The data to serialize. :param kwargs: Arguments passed to the ``dumps`` implementation. .. versionchanged:: 2.3 The ``app`` parameter was removed. .. versionchanged:: 2.2 Calls ``current_app.json.dumps``, allowing an app to override the behavior. .. versionchanged:: 2.0.2 :class:`decimal.Decimal` is supported by converting to a string. .. versionchanged:: 2.0 ``encoding`` will be removed in Flask 2.1. .. versionchanged:: 1.0.3 ``app`` can be passed directly, rather than requiring an app context for configuration.
python
src/flask/json/__init__.py
13
[ "obj" ]
str
true
2
6.24
pallets/flask
70,946
sphinx
false
toString
@Override public String toString() { return this.path; }
Determine if this template location exists using the specified {@link ResourcePatternResolver}. @param resolver the resolver used to test if the location exists @return {@code true} if the location exists.
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/template/TemplateLocation.java
77
[]
String
true
1
6.16
spring-projects/spring-boot
79,428
javadoc
false
generateResolverForConstructor
private CodeBlock generateResolverForConstructor(ConstructorDescriptor descriptor) { CodeBlock parameterTypes = generateParameterTypesCode(descriptor.constructor().getParameterTypes()); return CodeBlock.of("return $T.<$T>forConstructor($L)", BeanInstanceSupplier.class, descriptor.publicType(), parameterTypes); }
Generate the instance supplier code. @param registeredBean the bean to handle @param instantiationDescriptor the executable to use to create the bean @return the generated code @since 6.1.7
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
247
[ "descriptor" ]
CodeBlock
true
1
6.24
spring-projects/spring-framework
59,386
javadoc
false
compareParameterTypes
private static int compareParameterTypes(final Executable left, final Executable right, final Class<?>[] actual) { final float leftCost = getTotalTransformationCost(actual, left); final float rightCost = getTotalTransformationCost(actual, right); return Float.compare(leftCost, rightCost); }
Compares the relative fitness of two Executables in terms of how well they match a set of runtime parameter types, such that a list ordered by the results of the comparison would return the best match first (least). @param left the "left" Executable. @param right the "right" Executable. @param actual the runtime parameter types to match against. {@code left}/{@code right}. @return int consistent with {@code compare} semantics.
java
src/main/java/org/apache/commons/lang3/reflect/MemberUtils.java
122
[ "left", "right", "actual" ]
true
1
6.88
apache/commons-lang
2,896
javadoc
false
removeAll
public static float[] removeAll(final float[] array, final int... indices) { return (float[]) removeAll((Object) array, indices); }
Removes the elements at the specified positions from the specified array. All remaining elements are shifted to the left. <p> This method returns a new array with the same elements of the input array except those at the specified positions. The component type of the returned array is always the same as that of the input array. </p> <p> If the input array is {@code null}, an IndexOutOfBoundsException will be thrown, because in that case no valid index can be specified. </p> <pre> ArrayUtils.removeAll([1], 0) = [] ArrayUtils.removeAll([2, 6], 0) = [6] ArrayUtils.removeAll([2, 6], 0, 1) = [] ArrayUtils.removeAll([2, 6, 3], 1, 2) = [2] ArrayUtils.removeAll([2, 6, 3], 0, 2) = [6] ArrayUtils.removeAll([2, 6, 3], 0, 1, 2) = [] </pre> @param array the array to remove the element from, may not be {@code null}. @param indices the positions of the elements to be removed. @return A new array containing the existing elements except those at the specified positions. @throws IndexOutOfBoundsException if any index is out of range (index &lt; 0 || index &gt;= array.length), or if the array is {@code null}. @since 3.0.1
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
5,080
[ "array" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
set_multiprocessing_start_method
def set_multiprocessing_start_method(): """Set multiprocessing start method to 'fork' if not on Linux.""" if platform.system() != "Linux": try: multiprocessing.set_start_method("fork") except RuntimeError: # The method is already set pass
Set multiprocessing start method to 'fork' if not on Linux.
python
t/integration/test_tasks.py
33
[]
false
2
6.4
celery/celery
27,741
unknown
false
buffer_to_ndarray
def buffer_to_ndarray( buffer: Buffer, dtype: tuple[DtypeKind, int, str, str], *, length: int, offset: int = 0, ) -> np.ndarray: """ Build a NumPy array from the passed buffer. Parameters ---------- buffer : Buffer Buffer to build a NumPy array from. dtype : tuple Data type of the buffer conforming protocol dtypes format. offset : int, default: 0 Number of elements to offset from the start of the buffer. length : int, optional If the buffer is a bit-mask, specifies a number of bits to read from the buffer. Has no effect otherwise. Returns ------- np.ndarray Notes ----- The returned array doesn't own the memory. The caller of this function is responsible for keeping the memory owner object alive as long as the returned NumPy array is being used. """ kind, bit_width, _, _ = dtype column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None) if column_dtype is None: raise NotImplementedError(f"Conversion for {dtype} is not yet supported.") # TODO: No DLPack yet, so need to construct a new ndarray from the data pointer # and size in the buffer plus the dtype on the column. Use DLPack as NumPy supports # it since https://github.com/numpy/numpy/pull/19083 ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype) if bit_width == 1: assert length is not None, "`length` must be specified for a bit-mask buffer." pa = import_optional_dependency("pyarrow") arr = pa.BooleanArray.from_buffers( pa.bool_(), length, [None, pa.foreign_buffer(buffer.ptr, length)], offset=offset, ) return np.asarray(arr) else: data_pointer = ctypes.cast( buffer.ptr + (offset * bit_width // 8), ctypes.POINTER(ctypes_type) ) if length > 0: return np.ctypeslib.as_array(data_pointer, shape=(length,)) return np.array([], dtype=ctypes_type)
Build a NumPy array from the passed buffer. Parameters ---------- buffer : Buffer Buffer to build a NumPy array from. dtype : tuple Data type of the buffer conforming protocol dtypes format. offset : int, default: 0 Number of elements to offset from the start of the buffer. length : int, optional If the buffer is a bit-mask, specifies a number of bits to read from the buffer. Has no effect otherwise. Returns ------- np.ndarray Notes ----- The returned array doesn't own the memory. The caller of this function is responsible for keeping the memory owner object alive as long as the returned NumPy array is being used.
python
pandas/core/interchange/from_dataframe.py
466
[ "buffer", "dtype", "length", "offset" ]
np.ndarray
true
5
6.96
pandas-dev/pandas
47,362
numpy
false
poll
boolean poll(Timer timer) { return poll(timer, true); }
Fetch the committed offsets for a set of partitions. This is a non-blocking call. The returned future can be polled to get the actual offsets returned from the broker. @param partitions The set of partitions to get offsets for. @return A request future containing the committed offsets.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
1,704
[ "timer" ]
true
1
6.96
apache/kafka
31,560
javadoc
false
detectDeprecation
public CodeWarnings detectDeprecation(AnnotatedElement... elements) { for (AnnotatedElement element : elements) { registerDeprecationIfNecessary(element); } return this; }
Detect the presence of {@link Deprecated} on the specified elements. @param elements the elements to check @return {@code this} instance
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/CodeWarnings.java
64
[]
CodeWarnings
true
1
6.88
spring-projects/spring-framework
59,386
javadoc
false
codegen
def codegen( self, output_name: str, input_names: list[str], output_spec: Spec ) -> str: """Generate code for masked_select with synthesized inputs to match size. Constructs an input tensor and mask so that exactly k elements are selected, where k = output_spec.size[0]. No data-dependent guards. """ if len(input_names) != 2: raise ValueError("MaskedSelectOperator requires exactly two inputs") if not isinstance(output_spec, TensorSpec) or len(output_spec.size) != 1: raise ValueError("MaskedSelectOperator requires 1D TensorSpec output") k = output_spec.size[0] # Build a 1D input of length >= k and a mask with first k positions True # Use input's device and output dtype to avoid mismatches return ( f"_x_ms = torch.arange(max({k}, 1), device={input_names[0]}.device).to({input_names[0]}.dtype)\n" f"_mask_ms = torch.zeros_like(_x_ms, dtype=torch.bool)\n" f"_mask_ms[:{k}] = True\n" f"{output_name} = torch.masked_select(_x_ms, _mask_ms)" )
Generate code for masked_select with synthesized inputs to match size. Constructs an input tensor and mask so that exactly k elements are selected, where k = output_spec.size[0]. No data-dependent guards.
python
tools/experimental/torchfuzz/operators/masked_select.py
44
[ "self", "output_name", "input_names", "output_spec" ]
str
true
4
6
pytorch/pytorch
96,034
unknown
false
cond
function cond(pairs) { var length = pairs == null ? 0 : pairs.length, toIteratee = getIteratee(); pairs = !length ? [] : arrayMap(pairs, function(pair) { if (typeof pair[1] != 'function') { throw new TypeError(FUNC_ERROR_TEXT); } return [toIteratee(pair[0]), pair[1]]; }); return baseRest(function(args) { var index = -1; while (++index < length) { var pair = pairs[index]; if (apply(pair[0], this, args)) { return apply(pair[1], this, args); } } }); }
Creates a function that iterates over `pairs` and invokes the corresponding function of the first predicate to return truthy. The predicate-function pairs are invoked with the `this` binding and arguments of the created function. @static @memberOf _ @since 4.0.0 @category Util @param {Array} pairs The predicate-function pairs. @returns {Function} Returns the new composite function. @example var func = _.cond([ [_.matches({ 'a': 1 }), _.constant('matches A')], [_.conforms({ 'b': _.isNumber }), _.constant('matches B')], [_.stubTrue, _.constant('no match')] ]); func({ 'a': 1, 'b': 2 }); // => 'matches A' func({ 'a': 0, 'b': 1 }); // => 'matches B' func({ 'a': '1', 'b': '2' }); // => 'no match'
javascript
lodash.js
15,434
[ "pairs" ]
false
6
7.68
lodash/lodash
61,490
jsdoc
false
shouldDisassemble
static bool shouldDisassemble(const BinaryFunction &BF) { if (BF.isPseudo()) return false; if (opts::processAllFunctions()) return true; return !BF.isIgnored(); }
Return true if the function \p BF should be disassembled.
cpp
bolt/lib/Rewrite/RewriteInstance.cpp
486
[]
true
3
7.04
llvm/llvm-project
36,021
doxygen
false