function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
_validate_usecols_names
def _validate_usecols_names(self, usecols: SequenceT, names: Sequence) -> SequenceT: """ Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The `usecols` parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them. """ missing = [c for c in usecols if c not in names] if len(missing) > 0: raise ValueError( f"Usecols do not match columns, columns expected but not found: " f"{missing}" ) return usecols
Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The `usecols` parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them.
python
pandas/io/parsers/base_parser.py
638
[ "self", "usecols", "names" ]
SequenceT
true
2
6.72
pandas-dev/pandas
47,362
numpy
false
splitByCharacterType
private static String[] splitByCharacterType(final String str, final boolean camelCase) { if (str == null) { return null; } if (str.isEmpty()) { return ArrayUtils.EMPTY_STRING_ARRAY; } final char[] c = str.toCharArray(); final List<String> list = new ArrayList<>(); int tokenStart = 0; int currentType = Character.getType(c[tokenStart]); for (int pos = tokenStart + 1; pos < c.length; pos++) { final int type = Character.getType(c[pos]); if (type == currentType) { continue; } if (camelCase && type == Character.LOWERCASE_LETTER && currentType == Character.UPPERCASE_LETTER) { final int newTokenStart = pos - 1; if (newTokenStart != tokenStart) { list.add(new String(c, tokenStart, newTokenStart - tokenStart)); tokenStart = newTokenStart; } } else { list.add(new String(c, tokenStart, pos - tokenStart)); tokenStart = pos; } currentType = type; } list.add(new String(c, tokenStart, c.length - tokenStart)); return list.toArray(ArrayUtils.EMPTY_STRING_ARRAY); }
Splits a String by Character type as returned by {@code java.lang.Character.getType(char)}. Groups of contiguous characters of the same type are returned as complete tokens, with the following exception: if {@code camelCase} is {@code true}, the character of type {@code Character.UPPERCASE_LETTER}, if any, immediately preceding a token of type {@code Character.LOWERCASE_LETTER} will belong to the following token rather than to the preceding, if any, {@code Character.UPPERCASE_LETTER} token. @param str the String to split, may be {@code null}. @param camelCase whether to use so-called "camel-case" for letter types. @return an array of parsed Strings, {@code null} if null String input. @since 2.4
java
src/main/java/org/apache/commons/lang3/StringUtils.java
7,166
[ "str", "camelCase" ]
true
9
7.6
apache/commons-lang
2,896
javadoc
false
current
int current() { return currentUsages.get(); }
Prepares the database for lookup by incrementing the usage count. If the usage count is already negative, it indicates that the database is being closed, and this method will return false to indicate that no lookup should be performed. @return true if the database is ready for lookup, false if it is being closed
java
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
112
[]
true
1
6.8
elastic/elasticsearch
75,680
javadoc
false
pandas_validate
def pandas_validate(func_name: str): """ Call the numpydoc validation, and add the errors specific to pandas. Parameters ---------- func_name : str Name of the object of the docstring to validate. Returns ------- dict Information about the docstring and the errors found. """ func_obj = Validator._load_obj(func_name) # Some objects are instances, e.g. IndexSlice, which numpydoc can't validate doc_obj = get_doc_object(func_obj, doc=func_obj.__doc__) doc = PandasDocstring(func_name, doc_obj) if func_obj.__doc__ is not None: result = validate(doc_obj) else: result = { "docstring": "", "file": None, "file_line": None, "errors": [("GL08", "The object does not have a docstring")], } mentioned_errs = doc.mentioned_private_classes if mentioned_errs: result["errors"].append( pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs)) ) if doc.see_also: result["errors"].extend( pandas_error( "SA05", reference_name=rel_name, right_reference=rel_name[len("pandas.") :], ) for rel_name in doc.see_also if rel_name.startswith("pandas.") ) result["examples_errs"] = "" if doc.examples: for error_code, error_message, line_number, col_number in doc.validate_pep8(): result["errors"].append( pandas_error( "EX03", error_code=error_code, error_message=error_message, line_number=line_number, col_number=col_number, ) ) examples_source_code = "".join(doc.examples_source_code) result["errors"].extend( pandas_error("EX04", imported_library=wrong_import) for wrong_import in ("numpy", "pandas") if f"import {wrong_import}" in examples_source_code ) if doc.non_hyphenated_array_like(): result["errors"].append(pandas_error("PD01")) plt.close("all") return result
Call the numpydoc validation, and add the errors specific to pandas. Parameters ---------- func_name : str Name of the object of the docstring to validate. Returns ------- dict Information about the docstring and the errors found.
python
scripts/validate_docstrings.py
233
[ "func_name" ]
true
8
6.8
pandas-dev/pandas
47,362
numpy
false
export_archived_records
def export_archived_records( export_format: str, output_path: str, table_names: list[str] | None = None, drop_archives: bool = False, needs_confirm: bool = True, session: Session = NEW_SESSION, ) -> None: """Export archived data to the given output path in the given format.""" archived_table_names = _get_archived_table_names(table_names, session) # If user chose to drop archives, check there are archive tables that exists # before asking for confirmation if drop_archives and archived_table_names and needs_confirm: _confirm_drop_archives(tables=sorted(archived_table_names)) export_count = 0 dropped_count = 0 for table_name in archived_table_names: logger.info("Exporting table %s", table_name) _dump_table_to_file( target_table=table_name, file_path=os.path.join(output_path, f"{table_name}.{export_format}"), export_format=export_format, session=session, ) export_count += 1 if drop_archives: logger.info("Dropping archived table %s", table_name) session.execute(text(f"DROP TABLE {table_name}")) dropped_count += 1 logger.info("Total exported tables: %s, Total dropped tables: %s", export_count, dropped_count)
Export archived data to the given output path in the given format.
python
airflow-core/src/airflow/utils/db_cleanup.py
556
[ "export_format", "output_path", "table_names", "drop_archives", "needs_confirm", "session" ]
None
true
6
6
apache/airflow
43,597
unknown
false
value_counts
def value_counts(self, dropna: bool = True) -> Series: """ Return a Series containing counts of unique values. Parameters ---------- dropna : bool, default True Don't include counts of NA values. Returns ------- Series """ if self.ndim != 1: raise NotImplementedError from pandas import ( Index, Series, ) if dropna: # error: Unsupported operand type for ~ ("ExtensionArray") values = self[~self.isna()]._ndarray # type: ignore[operator] else: values = self._ndarray result = value_counts(values, sort=False, dropna=dropna) index_arr = self._from_backing_data(np.asarray(result.index._data)) index = Index(index_arr, name=result.index.name) return Series(result._values, index=index, name=result.name, copy=False)
Return a Series containing counts of unique values. Parameters ---------- dropna : bool, default True Don't include counts of NA values. Returns ------- Series
python
pandas/core/arrays/_mixins.py
469
[ "self", "dropna" ]
Series
true
4
6.72
pandas-dev/pandas
47,362
numpy
false
forField
public static AutowiredFieldValueResolver forField(String fieldName) { return new AutowiredFieldValueResolver(fieldName, false, null); }
Create a new {@link AutowiredFieldValueResolver} for the specified field where injection is optional. @param fieldName the field name @return a new {@link AutowiredFieldValueResolver} instance
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredFieldValueResolver.java
78
[ "fieldName" ]
AutowiredFieldValueResolver
true
1
6
spring-projects/spring-framework
59,386
javadoc
false
positiveBuckets
@Override public ExponentialHistogram.Buckets positiveBuckets() { return positiveBuckets; }
Attempts to add a bucket to the positive or negative range of this histogram. <br> Callers must adhere to the following rules: <ul> <li>All buckets for the negative values range must be provided before the first one from the positive values range.</li> <li>For both the negative and positive ranges, buckets must be provided with their indices in ascending order.</li> <li>It is not allowed to provide the same bucket more than once.</li> <li>It is not allowed to add empty buckets ({@code count <= 0}).</li> </ul> If any of these rules are violated, this call will fail with an exception. If the bucket cannot be added because the maximum capacity has been reached, the call will not modify the state of this histogram and will return {@code false}. @param index the index of the bucket to add @param count the count to associate with the given bucket @param isPositive {@code true} if the bucket belongs to the positive range, {@code false} if it belongs to the negative range @return {@code true} if the bucket was added, {@code false} if it could not be added due to insufficient capacity
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
201
[]
true
1
6.64
elastic/elasticsearch
75,680
javadoc
false
keySet
@Override public Set<K> keySet() { // does not impact recency ordering Set<K> ks = keySet; return (ks != null) ? ks : (keySet = new KeySet()); }
Returns the internal entry for the specified key. The entry may be loading, expired, or partially collected.
java
android/guava/src/com/google/common/cache/LocalCache.java
4,178
[]
true
2
7.04
google/guava
51,352
javadoc
false
of
public static <L, R> MutablePair<L, R> of(final L left, final R right) { return new MutablePair<>(left, right); }
Creates a mutable pair of two objects inferring the generic types. @param <L> the left element type. @param <R> the right element type. @param left the left element, may be null. @param right the right element, may be null. @return a mutable pair formed from the two parameters, not null.
java
src/main/java/org/apache/commons/lang3/tuple/MutablePair.java
68
[ "left", "right" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
bench
def bench(ctx, tests, compare, verbose, quick, factor, cpu_affinity, commits, build_dir): """🏋 Run benchmarks. \b Examples: \b $ spin bench -t bench_lib $ spin bench -t bench_random.Random $ spin bench -t Random -t Shuffle Two benchmark runs can be compared. By default, `HEAD` is compared to `main`. You can also specify the branches/commits to compare: \b $ spin bench --compare $ spin bench --compare main $ spin bench --compare main HEAD You can also choose which benchmarks to run in comparison mode: $ spin bench -t Random --compare """ if not commits: commits = ('main', 'HEAD') elif len(commits) == 1: commits = commits + ('HEAD',) elif len(commits) > 2: raise click.ClickException( 'Need a maximum of two revisions to compare' ) bench_args = [] for t in tests: bench_args += ['--bench', t] if verbose: bench_args = ['-v'] + bench_args if quick: bench_args = ['--quick'] + bench_args if cpu_affinity: bench_args += ['--cpu-affinity', cpu_affinity] if not compare: # No comparison requested; we build and benchmark the current version click.secho( "Invoking `build` prior to running benchmarks:", bold=True, fg="bright_green" ) ctx.invoke(build) meson._set_pythonpath(build_dir) p = spin.util.run( [sys.executable, '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, output=False ) os.chdir('..') np_ver = p.stdout.strip().decode('ascii') click.secho( f'Running benchmarks on NumPy {np_ver}', bold=True, fg="bright_green" ) cmd = [ 'asv', 'run', '--dry-run', '--show-stderr', '--python=same' ] + bench_args _run_asv(cmd) else: # Ensure that we don't have uncommitted changes commit_a, commit_b = [_commit_to_sha(c) for c in commits] if commit_b == 'HEAD' and _dirty_git_working_dir(): click.secho( "WARNING: you have uncommitted changes --- " "these will NOT be benchmarked!", fg="red" ) cmd_compare = [ 'asv', 'continuous', '--factor', str(factor), ] + bench_args + [commit_a, commit_b] _run_asv(cmd_compare)
🏋 Run benchmarks. \b Examples: \b $ spin bench -t bench_lib $ spin bench -t bench_random.Random $ spin bench -t Random -t Shuffle Two benchmark runs can be compared. By default, `HEAD` is compared to `main`. You can also specify the branches/commits to compare: \b $ spin bench --compare $ spin bench --compare main $ spin bench --compare main HEAD You can also choose which benchmarks to run in comparison mode: $ spin bench -t Random --compare
python
.spin/cmds.py
412
[ "ctx", "tests", "compare", "verbose", "quick", "factor", "cpu_affinity", "commits", "build_dir" ]
false
12
6.72
numpy/numpy
31,054
unknown
false
process
private void process(final AsyncPollEvent event) { // Trigger a reconciliation that can safely commit offsets if needed to rebalance, // as we're processing before any new fetching starts requestManagers.consumerMembershipManager.ifPresent(consumerMembershipManager -> consumerMembershipManager.maybeReconcile(true)); if (requestManagers.commitRequestManager.isPresent()) { CommitRequestManager commitRequestManager = requestManagers.commitRequestManager.get(); commitRequestManager.updateTimerAndMaybeCommit(event.pollTimeMs()); requestManagers.consumerHeartbeatRequestManager.ifPresent(hrm -> { ConsumerMembershipManager membershipManager = hrm.membershipManager(); maybeUpdatePatternSubscription(membershipManager::onSubscriptionUpdated); membershipManager.onConsumerPoll(); hrm.resetPollTimer(event.pollTimeMs()); }); requestManagers.streamsGroupHeartbeatRequestManager.ifPresent(hrm -> { StreamsMembershipManager membershipManager = hrm.membershipManager(); maybeUpdatePatternSubscription(membershipManager::onSubscriptionUpdated); membershipManager.onConsumerPoll(); hrm.resetPollTimer(event.pollTimeMs()); }); } CompletableFuture<Void> updatePositionsFuture = requestManagers.offsetsRequestManager.updateFetchPositions(event.deadlineMs()); event.markValidatePositionsComplete(); updatePositionsFuture.whenComplete((__, updatePositionsError) -> { if (maybeCompleteAsyncPollEventExceptionally(event, updatePositionsError)) return; requestManagers.fetchRequestManager.createFetchRequests().whenComplete((___, fetchError) -> { if (maybeCompleteAsyncPollEventExceptionally(event, fetchError)) return; event.completeSuccessfully(); }); }); }
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user. @param event Event containing a boolean to indicate if the callback handler is configured or not.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
714
[ "event" ]
void
true
4
6.24
apache/kafka
31,560
javadoc
false
ensureCapacity
public StrBuilder ensureCapacity(final int capacity) { if (capacity > buffer.length) { buffer = ArrayUtils.arraycopy(buffer, 0, 0, size, () -> new char[capacity * 2]); } return this; }
Checks the capacity and ensures that it is at least the size specified. @param capacity the capacity to ensure @return {@code this} instance.
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
1,841
[ "capacity" ]
StrBuilder
true
2
8.08
apache/commons-lang
2,896
javadoc
false
writeNativeImageProperties
private void writeNativeImageProperties(List<String> args) { if (CollectionUtils.isEmpty(args)) { return; } StringBuilder sb = new StringBuilder(); sb.append("Args = "); sb.append(String.join(String.format(" \\%n"), args)); Path file = getSettings().getResourceOutput().resolve("META-INF/native-image/" + getSettings().getGroupId() + "/" + getSettings().getArtifactId() + "/native-image.properties"); try { if (!Files.exists(file)) { Files.createDirectories(file.getParent()); Files.createFile(file); } Files.writeString(file, sb.toString()); } catch (IOException ex) { throw new IllegalStateException("Failed to write native-image.properties", ex); } }
Return the native image arguments to use. <p>By default, the main class to use, as well as standard application flags are added. <p>If the returned list is empty, no {@code native-image.properties} is contributed. @param applicationClassName the fully qualified class name of the application entry point @return the native image options to contribute
java
spring-context/src/main/java/org/springframework/context/aot/ContextAotProcessor.java
154
[ "args" ]
void
true
4
7.92
spring-projects/spring-framework
59,386
javadoc
false
setPriority
@CanIgnoreReturnValue public ThreadFactoryBuilder setPriority(int priority) { // Thread#setPriority() already checks for validity. These error messages // are nicer though and will fail-fast. checkArgument( priority >= Thread.MIN_PRIORITY, "Thread priority (%s) must be >= %s", priority, Thread.MIN_PRIORITY); checkArgument( priority <= Thread.MAX_PRIORITY, "Thread priority (%s) must be <= %s", priority, Thread.MAX_PRIORITY); this.priority = priority; return this; }
Sets the priority for new threads created with this ThreadFactory. <p><b>Warning:</b> relying on the thread scheduler is <a href="http://errorprone.info/bugpattern/ThreadPriorityCheck">discouraged</a>. <p><b>Java 21+ users:</b> use {@link Thread.Builder.OfPlatform#priority(int)} instead. @param priority the priority for new Threads created with this ThreadFactory @return this for the builder pattern
java
android/guava/src/com/google/common/util/concurrent/ThreadFactoryBuilder.java
120
[ "priority" ]
ThreadFactoryBuilder
true
1
6.4
google/guava
51,352
javadoc
false
clear
def clear( self, task_ids: Collection[str | tuple[str, int]] | None = None, *, run_id: str | None = None, start_date: datetime.datetime | None = None, end_date: datetime.datetime | None = None, only_failed: bool = False, only_running: bool = False, dag_run_state: DagRunState = DagRunState.QUEUED, dry_run: bool = False, session: Session = NEW_SESSION, exclude_task_ids: frozenset[str] | frozenset[tuple[str, int]] | None = frozenset(), exclude_run_ids: frozenset[str] | None = frozenset(), run_on_latest_version: bool = False, ) -> int | Iterable[TaskInstance]: """ Clear a set of task instances associated with the current dag for a specified date range. :param task_ids: List of task ids or (``task_id``, ``map_index``) tuples to clear :param run_id: The run_id for which the tasks should be cleared :param start_date: The minimum logical_date to clear :param end_date: The maximum logical_date to clear :param only_failed: Only clear failed tasks :param only_running: Only clear running tasks. :param dag_run_state: state to set DagRun to. If set to False, dagrun state will not be changed. :param dry_run: Find the tasks to clear but don't clear them. :param run_on_latest_version: whether to run on latest serialized DAG and Bundle version :param session: The sqlalchemy session to use :param exclude_task_ids: A set of ``task_id`` or (``task_id``, ``map_index``) tuples that should not be cleared :param exclude_run_ids: A set of ``run_id`` or (``run_id``) """ from airflow.models.taskinstance import clear_task_instances state: list[TaskInstanceState] = [] if only_failed: state += [TaskInstanceState.FAILED, TaskInstanceState.UPSTREAM_FAILED] if only_running: # Yes, having `+=` doesn't make sense, but this was the existing behaviour state += [TaskInstanceState.RUNNING] tis_result = self._get_task_instances( task_ids=task_ids, start_date=start_date, end_date=end_date, run_id=run_id, state=state, session=session, exclude_task_ids=exclude_task_ids, exclude_run_ids=exclude_run_ids, ) if dry_run: return list(tis_result) tis = list(tis_result) count = len(tis) if count == 0: return 0 clear_task_instances( list(tis), session, dag_run_state=dag_run_state, run_on_latest_version=run_on_latest_version, ) session.flush() return count
Clear a set of task instances associated with the current dag for a specified date range. :param task_ids: List of task ids or (``task_id``, ``map_index``) tuples to clear :param run_id: The run_id for which the tasks should be cleared :param start_date: The minimum logical_date to clear :param end_date: The maximum logical_date to clear :param only_failed: Only clear failed tasks :param only_running: Only clear running tasks. :param dag_run_state: state to set DagRun to. If set to False, dagrun state will not be changed. :param dry_run: Find the tasks to clear but don't clear them. :param run_on_latest_version: whether to run on latest serialized DAG and Bundle version :param session: The sqlalchemy session to use :param exclude_task_ids: A set of ``task_id`` or (``task_id``, ``map_index``) tuples that should not be cleared :param exclude_run_ids: A set of ``run_id`` or (``run_id``)
python
airflow-core/src/airflow/serialization/serialized_objects.py
3,535
[ "self", "task_ids", "run_id", "start_date", "end_date", "only_failed", "only_running", "dag_run_state", "dry_run", "session", "exclude_task_ids", "exclude_run_ids", "run_on_latest_version" ]
int | Iterable[TaskInstance]
true
5
6.96
apache/airflow
43,597
sphinx
false
daemon
public Builder daemon(final boolean daemon) { this.daemon = Boolean.valueOf(daemon); return this; }
Sets the daemon flag for the new {@link BasicThreadFactory}. If this flag is set to <strong>true</strong> the new thread factory will create daemon threads. @param daemon the value of the daemon flag @return a reference to this {@link Builder}
java
src/main/java/org/apache/commons/lang3/concurrent/BasicThreadFactory.java
162
[ "daemon" ]
Builder
true
1
6.64
apache/commons-lang
2,896
javadoc
false
targetAssignmentReconciled
private boolean targetAssignmentReconciled() { return currentAssignment.equals(currentTargetAssignment); }
@return True if there are no assignments waiting to be resolved from metadata or reconciled.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
746
[]
true
1
6.8
apache/kafka
31,560
javadoc
false
generateConstructorDecorationExpression
function generateConstructorDecorationExpression(node: ClassExpression | ClassDeclaration) { const allDecorators = getAllDecoratorsOfClass(node, /*useLegacyDecorators*/ true); const decoratorExpressions = transformAllDecoratorsOfDeclaration(allDecorators); if (!decoratorExpressions) { return undefined; } const classAlias = classAliases && classAliases[getOriginalNodeId(node)]; // When we transform to ES5/3 this will be moved inside an IIFE and should reference the name // without any block-scoped variable collision handling const localName = languageVersion < ScriptTarget.ES2015 ? factory.getInternalName(node, /*allowComments*/ false, /*allowSourceMaps*/ true) : factory.getDeclarationName(node, /*allowComments*/ false, /*allowSourceMaps*/ true); const decorate = emitHelpers().createDecorateHelper(decoratorExpressions, localName); const expression = factory.createAssignment(localName, classAlias ? factory.createAssignment(classAlias, decorate) : decorate); setEmitFlags(expression, EmitFlags.NoComments); setSourceMapRange(expression, moveRangePastModifiers(node)); return expression; }
Generates a __decorate helper call for a class constructor. @param node The class node.
typescript
src/compiler/transformers/legacyDecorators.ts
681
[ "node" ]
false
5
6.08
microsoft/TypeScript
107,154
jsdoc
false
charBuffer
@Override public CharBuffer charBuffer() throws IOException { try { return CharBuffer.wrap(parser.getTextCharacters(), parser.getTextOffset(), parser.getTextLength()); } catch (IOException e) { throw handleParserException(e); } }
Handle parser exception depending on type. This converts known exceptions to XContentParseException and rethrows them.
java
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
168
[]
CharBuffer
true
2
6.08
elastic/elasticsearch
75,680
javadoc
false
definePackageIfNecessary
protected final void definePackageIfNecessary(String className) { if (className.startsWith("java.")) { return; } int lastDot = className.lastIndexOf('.'); if (lastDot >= 0) { String packageName = className.substring(0, lastDot); if (getDefinedPackage(packageName) == null) { try { definePackage(className, packageName); } catch (IllegalArgumentException ex) { tolerateRaceConditionDueToBeingParallelCapable(ex, packageName); } } } }
Define a package before a {@code findClass} call is made. This is necessary to ensure that the appropriate manifest for nested JARs is associated with the package. @param className the class name being found
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrlClassLoader.java
120
[ "className" ]
void
true
5
6.56
spring-projects/spring-boot
79,428
javadoc
false
getPackageCanonicalName
public static String getPackageCanonicalName(final Object object, final String valueIfNull) { if (object == null) { return valueIfNull; } return getPackageCanonicalName(object.getClass().getName()); }
Gets the package name from the class name of an {@link Object}. @param object the class to get the package name for, may be null. @param valueIfNull the value to return if null. @return the package name of the object, or the null value. @since 2.4
java
src/main/java/org/apache/commons/lang3/ClassUtils.java
744
[ "object", "valueIfNull" ]
String
true
2
7.76
apache/commons-lang
2,896
javadoc
false
runInputLoop
private void runInputLoop() throws Exception { String line; while ((line = this.consoleReader.readLine(getPrompt())) != null) { while (line.endsWith("\\")) { line = line.substring(0, line.length() - 1); line += this.consoleReader.readLine("> "); } if (StringUtils.hasLength(line)) { String[] args = this.argumentDelimiter.parseArguments(line); this.commandRunner.runAndHandleErrors(args); } } }
Run the shell until the user exists. @throws Exception on error
java
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/shell/Shell.java
149
[]
void
true
4
7.04
spring-projects/spring-boot
79,428
javadoc
false
log_event_start
def log_event_start( self, event_name: str, time_ns: int, metadata: dict[str, Any], log_pt2_compile_event: bool = False, compile_id: Optional[CompileId] = None, ) -> None: """ Logs the start of a single event. :param str event_name Name of event to appear in trace :param time_ns Timestamp in nanoseconds :param metadata: Any extra metadata associated with this event :param log_pt2_compile_event: If True, log to pt2_compile_events :param compile_id: Explicit compile_id (rather than using the current context) """ compile_id = compile_id or torch._guards.CompileContext.current_compile_id() metadata["compile_id"] = str(compile_id) self._log_timed_event( event_name, time_ns, "B", metadata, ) self.get_stack().append(event_name) # Add metadata from start event self.add_event_data(event_name, **metadata) if log_pt2_compile_event: self.get_pt2_compile_substack().append(event_name)
Logs the start of a single event. :param str event_name Name of event to appear in trace :param time_ns Timestamp in nanoseconds :param metadata: Any extra metadata associated with this event :param log_pt2_compile_event: If True, log to pt2_compile_events :param compile_id: Explicit compile_id (rather than using the current context)
python
torch/_dynamo/utils.py
1,920
[ "self", "event_name", "time_ns", "metadata", "log_pt2_compile_event", "compile_id" ]
None
true
3
6.56
pytorch/pytorch
96,034
sphinx
false
show_versions
def show_versions(): """Print useful debugging information. .. versionadded:: 0.20 Examples -------- >>> from sklearn import show_versions >>> show_versions() # doctest: +SKIP """ sys_info = _get_sys_info() deps_info = _get_deps_info() print("\nSystem:") for k, stat in sys_info.items(): print("{k:>10}: {stat}".format(k=k, stat=stat)) print("\nPython dependencies:") for k, stat in deps_info.items(): print("{k:>13}: {stat}".format(k=k, stat=stat)) print( "\n{k}: {stat}".format( k="Built with OpenMP", stat=_openmp_parallelism_enabled() ) ) # show threadpoolctl results threadpool_results = threadpool_info() if threadpool_results: print() print("threadpoolctl info:") for i, result in enumerate(threadpool_results): for key, val in result.items(): print(f"{key:>15}: {val}") if i != len(threadpool_results) - 1: print()
Print useful debugging information. .. versionadded:: 0.20 Examples -------- >>> from sklearn import show_versions >>> show_versions() # doctest: +SKIP
python
sklearn/utils/_show_versions.py
77
[]
false
7
7.52
scikit-learn/scikit-learn
64,340
unknown
false
onApplicationEventInternal
protected void onApplicationEventInternal(ApplicationEvent event) { if (this.delegate == null) { throw new IllegalStateException( "Must specify a delegate object or override the onApplicationEventInternal method"); } this.delegate.onApplicationEvent(event); }
Actually process the event, after having filtered according to the desired event source already. <p>The default implementation invokes the specified delegate, if any. @param event the event to process (matching the specified source)
java
spring-context/src/main/java/org/springframework/context/event/SourceFilteringListener.java
104
[ "event" ]
void
true
2
6.4
spring-projects/spring-framework
59,386
javadoc
false
join
def join(self, sep: str): """ Join lists contained as elements in the Series/Index with passed delimiter. If the elements of a Series are lists themselves, join the content of these lists using the delimiter passed to the function. This function is an equivalent to :meth:`str.join`. Parameters ---------- sep : str Delimiter to use between list entries. Returns ------- Series/Index: object The list entries concatenated by intervening occurrences of the delimiter. Raises ------ AttributeError If the supplied Series contains neither strings nor lists. See Also -------- str.join : Standard library version of this method. Series.str.split : Split strings around given separator/delimiter. Notes ----- If any of the list items is not a string object, the result of the join will be `NaN`. Examples -------- Example with a list that contains non-string elements. >>> s = pd.Series( ... [ ... ["lion", "elephant", "zebra"], ... [1.1, 2.2, 3.3], ... ["cat", np.nan, "dog"], ... ["cow", 4.5, "goat"], ... ["duck", ["swan", "fish"], "guppy"], ... ] ... ) >>> s 0 [lion, elephant, zebra] 1 [1.1, 2.2, 3.3] 2 [cat, nan, dog] 3 [cow, 4.5, goat] 4 [duck, [swan, fish], guppy] dtype: object Join all lists using a '-'. The lists containing object(s) of types other than str will produce a NaN. >>> s.str.join("-") 0 lion-elephant-zebra 1 NaN 2 NaN 3 NaN 4 NaN dtype: object """ result = self._data.array._str_join(sep) return self._wrap_result(result)
Join lists contained as elements in the Series/Index with passed delimiter. If the elements of a Series are lists themselves, join the content of these lists using the delimiter passed to the function. This function is an equivalent to :meth:`str.join`. Parameters ---------- sep : str Delimiter to use between list entries. Returns ------- Series/Index: object The list entries concatenated by intervening occurrences of the delimiter. Raises ------ AttributeError If the supplied Series contains neither strings nor lists. See Also -------- str.join : Standard library version of this method. Series.str.split : Split strings around given separator/delimiter. Notes ----- If any of the list items is not a string object, the result of the join will be `NaN`. Examples -------- Example with a list that contains non-string elements. >>> s = pd.Series( ... [ ... ["lion", "elephant", "zebra"], ... [1.1, 2.2, 3.3], ... ["cat", np.nan, "dog"], ... ["cow", 4.5, "goat"], ... ["duck", ["swan", "fish"], "guppy"], ... ] ... ) >>> s 0 [lion, elephant, zebra] 1 [1.1, 2.2, 3.3] 2 [cat, nan, dog] 3 [cow, 4.5, goat] 4 [duck, [swan, fish], guppy] dtype: object Join all lists using a '-'. The lists containing object(s) of types other than str will produce a NaN. >>> s.str.join("-") 0 lion-elephant-zebra 1 NaN 2 NaN 3 NaN 4 NaN dtype: object
python
pandas/core/strings/accessor.py
1,152
[ "self", "sep" ]
true
1
7.2
pandas-dev/pandas
47,362
numpy
false
nextTokenCanFollowDefaultKeyword
function nextTokenCanFollowDefaultKeyword(): boolean { nextToken(); return token() === SyntaxKind.ClassKeyword || token() === SyntaxKind.FunctionKeyword || token() === SyntaxKind.InterfaceKeyword || token() === SyntaxKind.AtToken || (token() === SyntaxKind.AbstractKeyword && lookAhead(nextTokenIsClassKeywordOnSameLine)) || (token() === SyntaxKind.AsyncKeyword && lookAhead(nextTokenIsFunctionKeywordOnSameLine)); }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
2,824
[]
true
8
6.72
microsoft/TypeScript
107,154
jsdoc
false
ceiling
public static Date ceiling(final Object date, final int field) { Objects.requireNonNull(date, "date"); if (date instanceof Date) { return ceiling((Date) date, field); } if (date instanceof Calendar) { return ceiling((Calendar) date, field).getTime(); } throw new ClassCastException("Could not find ceiling of for type: " + date.getClass()); }
Gets a date ceiling, leaving the field specified as the most significant field. <p>For example, if you had the date-time of 28 Mar 2002 13:45:01.231, if you passed with HOUR, it would return 28 Mar 2002 14:00:00.000. If this was passed with MONTH, it would return 1 Apr 2002 0:00:00.000.</p> @param date the date to work with, either {@link Date} or {@link Calendar}, not null. @param field the field from {@link Calendar} or {@code SEMI_MONTH}. @return the different ceil date, not null. @throws NullPointerException if the date is {@code null}. @throws ClassCastException if the object type is not a {@link Date} or {@link Calendar}. @throws ArithmeticException if the year is over 280 million. @since 2.5
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
390
[ "date", "field" ]
Date
true
3
7.92
apache/commons-lang
2,896
javadoc
false
_create_subgraph_for_node
def _create_subgraph_for_node(graph: fx.Graph, node: fx.Node) -> fx.GraphModule: """ Create a subgraph that exactly recreates a node's operation. The subgraph takes only the fx.Node arguments and recreates the operation with the exact target, args structure, and kwargs. Args: graph: The parent graph node: The node to wrap in a subgraph Returns: A GraphModule containing the subgraph """ # Get the owning module # torch.distributed.breakpoint(0) owning_module = graph.owning_module # Create a new graph for the subgraph subgraph = fx.Graph(owning_module) new_args: list[Any] = [] placeholder_idx = 0 for _, arg in enumerate(node.args): if not isinstance(arg, fx.Node): new_args.append(arg) continue placeholder = subgraph.placeholder(f"arg_{placeholder_idx}") placeholder_idx += 1 if "val" in arg.meta: placeholder.meta.update(arg.meta) new_args.append(placeholder) # type: ignore[arg-type] new_kwargs: dict[str, Any] = {} for key, value in node.kwargs.items(): if not isinstance(value, fx.Node): new_kwargs[key] = value continue placeholder = subgraph.placeholder(f"kwarg_{key}") if "val" in value.meta: placeholder.meta.update(value.meta) new_kwargs[key] = placeholder # type: ignore[assignment] # Recreate the exact original operation in the subgraph assert callable(node.target) result = subgraph.call_function( node.target, tuple(new_args), new_kwargs, # type: ignore[arg-type] ) # Copy metadata from the original node result.meta.update(node.meta) out = subgraph.output(result) if "val" in result.meta: out.meta["val"] = result.meta["val"] return fx.GraphModule(owning_module, subgraph)
Create a subgraph that exactly recreates a node's operation. The subgraph takes only the fx.Node arguments and recreates the operation with the exact target, args structure, and kwargs. Args: graph: The parent graph node: The node to wrap in a subgraph Returns: A GraphModule containing the subgraph
python
torch/_inductor/fx_passes/control_dependencies.py
166
[ "graph", "node" ]
fx.GraphModule
true
8
8.08
pytorch/pytorch
96,034
google
false
_should_compare
def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ # NB: we use inferred_type rather than is_bool_dtype to catch # object_dtype_of_bool and categorical[object_dtype_of_bool] cases if ( other.inferred_type == "boolean" and is_any_real_numeric_dtype(self.dtype) ) or ( self.inferred_type == "boolean" and is_any_real_numeric_dtype(other.dtype) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False dtype = _unpack_nested_dtype(other) return ( self._is_comparable_dtype(dtype) or is_object_dtype(dtype) or is_string_dtype(dtype) )
Check if `self == other` can ever have non-False entries.
python
pandas/core/indexes/base.py
6,426
[ "self", "other" ]
bool
true
7
6
pandas-dev/pandas
47,362
unknown
false
trimmed
public ImmutableDoubleArray trimmed() { return isPartialView() ? new ImmutableDoubleArray(toArray()) : this; }
Returns an immutable array containing the same values as {@code this} array. This is logically a no-op, and in some circumstances {@code this} itself is returned. However, if this instance is a {@link #subArray} view of a larger array, this method will copy only the appropriate range of values, resulting in an equivalent array with a smaller memory footprint.
java
android/guava/src/com/google/common/primitives/ImmutableDoubleArray.java
642
[]
ImmutableDoubleArray
true
2
6.64
google/guava
51,352
javadoc
false
from
static @Nullable ConfigurationPropertySource from(PropertySource<?> source) { if (source instanceof ConfigurationPropertySourcesPropertySource) { return null; } return SpringConfigurationPropertySource.from(source); }
Return a single new {@link ConfigurationPropertySource} adapted from the given Spring {@link PropertySource} or {@code null} if the source cannot be adapted. @param source the Spring property source to adapt @return an adapted source or {@code null} {@link SpringConfigurationPropertySource} @since 2.4.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertySource.java
105
[ "source" ]
ConfigurationPropertySource
true
2
7.6
spring-projects/spring-boot
79,428
javadoc
false
propagateChildrenFlags
function propagateChildrenFlags(children: NodeArray<Node> | undefined): TransformFlags { return children ? children.transformFlags : TransformFlags.None; }
Creates a `NodeFactory` that can be used to create and update a syntax tree. @param flags Flags that control factory behavior. @param baseFactory A `BaseNodeFactory` used to create the base `Node` objects. @internal
typescript
src/compiler/factory/nodeFactory.ts
7,305
[ "children" ]
true
2
6.32
microsoft/TypeScript
107,154
jsdoc
false
maybe_promote
def maybe_promote(dtype: np.dtype, fill_value=np.nan): """ Find the minimal dtype that can hold both the given dtype and fill_value. Parameters ---------- dtype : np.dtype fill_value : scalar, default np.nan Returns ------- dtype Upcasted from dtype argument if necessary. fill_value Upcasted from fill_value argument if necessary. Raises ------ ValueError If fill_value is a non-scalar and dtype is not object. """ orig = fill_value orig_is_nat = False if checknull(fill_value): # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740 # avoid cache misses with NaN/NaT values that are not singletons if fill_value is not NA: try: orig_is_nat = np.isnat(fill_value) except TypeError: pass fill_value = _canonical_nans.get(type(fill_value), fill_value) # for performance, we are using a cached version of the actual implementation # of the function in _maybe_promote. However, this doesn't always work (in case # of non-hashable arguments), so we fallback to the actual implementation if needed try: # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type # "Type[Any]"; expected "Hashable" [arg-type] dtype, fill_value = _maybe_promote_cached( dtype, fill_value, type(fill_value), # type: ignore[arg-type] ) except TypeError: # if fill_value is not hashable (required for caching) dtype, fill_value = _maybe_promote(dtype, fill_value) if (dtype == _dtype_obj and orig is not None) or ( orig_is_nat and np.datetime_data(orig)[0] != "ns" ): # GH#51592,53497 restore our potentially non-canonical fill_value fill_value = orig return dtype, fill_value
Find the minimal dtype that can hold both the given dtype and fill_value. Parameters ---------- dtype : np.dtype fill_value : scalar, default np.nan Returns ------- dtype Upcasted from dtype argument if necessary. fill_value Upcasted from fill_value argument if necessary. Raises ------ ValueError If fill_value is a non-scalar and dtype is not object.
python
pandas/core/dtypes/cast.py
452
[ "dtype", "fill_value" ]
true
7
6.72
pandas-dev/pandas
47,362
numpy
false
addMethodName
public NameMatchMethodPointcut addMethodName(String mappedNamePattern) { this.mappedNamePatterns.add(mappedNamePattern); return this; }
Add another method name pattern, in addition to those already configured. <p>Like the "set" methods, this method is for use when configuring proxies, before a proxy is used. <p><b>NOTE:</b> This method does not work after the proxy is in use, since advice chains will be cached. @param mappedNamePattern the additional method name pattern @return this pointcut to allow for method chaining @see #setMappedNames(String...) @see #setMappedName(String)
java
spring-aop/src/main/java/org/springframework/aop/support/NameMatchMethodPointcut.java
82
[ "mappedNamePattern" ]
NameMatchMethodPointcut
true
1
6.64
spring-projects/spring-framework
59,386
javadoc
false
newWriter
public static BufferedWriter newWriter(File file, Charset charset) throws FileNotFoundException { checkNotNull(file); checkNotNull(charset); return new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), charset)); }
Returns a buffered writer that writes to a file using the given character set. <p><b>{@link java.nio.file.Path} equivalent:</b> {@link java.nio.file.Files#newBufferedWriter(java.nio.file.Path, Charset, java.nio.file.OpenOption...)}. @param file the file to write to @param charset the charset used to encode the output stream; see {@link StandardCharsets} for helpful predefined constants @return the buffered writer
java
android/guava/src/com/google/common/io/Files.java
104
[ "file", "charset" ]
BufferedWriter
true
1
6.4
google/guava
51,352
javadoc
false
valueComparator
@Nullable Comparator<? super V> valueComparator() { return emptySet instanceof ImmutableSortedSet ? ((ImmutableSortedSet<V>) emptySet).comparator() : null; }
@serialData number of distinct keys, and then for each distinct key: the key, the number of values for that key, and the key's values
java
android/guava/src/com/google/common/collect/ImmutableSetMultimap.java
683
[]
true
2
6.24
google/guava
51,352
javadoc
false
get_default_pool
def get_default_pool(session: Session = NEW_SESSION) -> Pool | None: """ Get the Pool of the default_pool from the Pools. :param session: SQLAlchemy ORM Session :return: the pool object """ return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)
Get the Pool of the default_pool from the Pools. :param session: SQLAlchemy ORM Session :return: the pool object
python
airflow-core/src/airflow/models/pool.py
90
[ "session" ]
Pool | None
true
1
6.88
apache/airflow
43,597
sphinx
false
addSeconds
public static Date addSeconds(final Date date, final int amount) { return add(date, Calendar.SECOND, amount); }
Adds a number of seconds to a date returning a new object. The original {@link Date} is unchanged. @param date the date, not null. @param amount the amount to add, may be negative. @return the new {@link Date} with the amount added. @throws NullPointerException if the date is null.
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
302
[ "date", "amount" ]
Date
true
1
6.8
apache/commons-lang
2,896
javadoc
false
create
public static <T extends @Nullable Object> BloomFilter<T> create( Funnel<? super T> funnel, long expectedInsertions, double fpp) { return create(funnel, expectedInsertions, fpp, BloomFilterStrategies.MURMUR128_MITZ_64); }
Creates a {@link BloomFilter} with the expected number of insertions and expected false positive probability. <p>Note that overflowing a {@code BloomFilter} with significantly more elements than specified, will result in its saturation, and a sharp deterioration of its false positive probability. <p>The constructed {@code BloomFilter} will be serializable if the provided {@code Funnel<T>} is. <p>It is recommended that the funnel be implemented as a Java enum. This has the benefit of ensuring proper serialization and deserialization, which is important since {@link #equals} also relies on object identity of funnels. @param funnel the funnel of T's that the constructed {@code BloomFilter} will use @param expectedInsertions the number of expected insertions to the constructed {@code BloomFilter}; must be positive @param fpp the desired false positive probability (must be positive and less than 1.0) @return a {@code BloomFilter} @since 19.0
java
android/guava/src/com/google/common/hash/BloomFilter.java
422
[ "funnel", "expectedInsertions", "fpp" ]
true
1
6.48
google/guava
51,352
javadoc
false
parse
public static FetchSnapshotResponse parse(Readable readable, short version) { return new FetchSnapshotResponse(new FetchSnapshotResponseData(readable, version)); }
Finds the PartitionSnapshot for a given topic partition. @param data the fetch snapshot response data @param topicPartition the topic partition to find @return the response partition snapshot if found, otherwise an empty Optional
java
clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java
101
[ "readable", "version" ]
FetchSnapshotResponse
true
1
6.32
apache/kafka
31,560
javadoc
false
optLong
public long optLong(String name, long fallback) { Object object = opt(name); Long result = JSON.toLong(object); return result != null ? result : fallback; }
Returns the value mapped by {@code name} if it exists and is a long or can be coerced to a long. Returns {@code fallback} otherwise. Note that JSON represents numbers as doubles, so this is <a href="#lossy">lossy</a>; use strings to transfer numbers over JSON. @param name the name of the property @param fallback a fallback value @return the value or {@code fallback}
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
544
[ "name", "fallback" ]
true
2
8.24
spring-projects/spring-boot
79,428
javadoc
false
prepare_function_arguments
def prepare_function_arguments( func: Callable, args: tuple, kwargs: dict, *, num_required_args: int ) -> tuple[tuple, dict]: """ Prepare arguments for jitted function. As numba functions do not support kwargs, we try to move kwargs into args if possible. Parameters ---------- func : function User defined function args : tuple User input positional arguments kwargs : dict User input keyword arguments num_required_args : int The number of leading positional arguments we will pass to udf. These are not supplied by the user. e.g. for groupby we require "values", "index" as the first two arguments: `numba_func(group, group_index, *args)`, in this case num_required_args=2. See :func:`pandas.core.groupby.numba_.generate_numba_agg_func` Returns ------- tuple[tuple, dict] args, kwargs """ if not kwargs: return args, kwargs # the udf should have this pattern: def udf(arg1, arg2, ..., *args, **kwargs):... signature = inspect.signature(func) arguments = signature.bind(*[_sentinel] * num_required_args, *args, **kwargs) arguments.apply_defaults() # Ref: https://peps.python.org/pep-0362/ # Arguments which could be passed as part of either *args or **kwargs # will be included only in the BoundArguments.args attribute. args = arguments.args kwargs = arguments.kwargs if kwargs: # Note: in case numba supports keyword-only arguments in # a future version, we should remove this check. But this # seems unlikely to happen soon. raise NumbaUtilError( "numba does not support keyword-only arguments" "https://github.com/numba/numba/issues/2916, " "https://github.com/numba/numba/issues/6846" ) args = args[num_required_args:] return args, kwargs
Prepare arguments for jitted function. As numba functions do not support kwargs, we try to move kwargs into args if possible. Parameters ---------- func : function User defined function args : tuple User input positional arguments kwargs : dict User input keyword arguments num_required_args : int The number of leading positional arguments we will pass to udf. These are not supplied by the user. e.g. for groupby we require "values", "index" as the first two arguments: `numba_func(group, group_index, *args)`, in this case num_required_args=2. See :func:`pandas.core.groupby.numba_.generate_numba_agg_func` Returns ------- tuple[tuple, dict] args, kwargs
python
pandas/core/util/numba_.py
97
[ "func", "args", "kwargs", "num_required_args" ]
tuple[tuple, dict]
true
3
6.4
pandas-dev/pandas
47,362
numpy
false
requestingClass
Class<?> requestingClass(Class<?> callerClass) { if (callerClass != null) { // fast path return callerClass; } Optional<Class<?>> result = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) .walk(frames -> findRequestingFrame(frames).map(StackWalker.StackFrame::getDeclaringClass)); return result.orElse(null); }
Walks the stack to determine which class should be checked for entitlements. @param callerClass when non-null will be returned; this is a fast-path check that can avoid the stack walk in cases where the caller class is available. @return the requesting class, or {@code null} if the entire call stack comes from the entitlement library itself.
java
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyCheckerImpl.java
408
[ "callerClass" ]
true
2
8.24
elastic/elasticsearch
75,680
javadoc
false
describeTopics
default DescribeTopicsResult describeTopics(Collection<String> topicNames, DescribeTopicsOptions options) { return describeTopics(TopicCollection.ofTopicNames(topicNames), options); }
Describe some topics in the cluster. @param topicNames The names of the topics to describe. @param options The options to use when describing the topic. @return The DescribeTopicsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
306
[ "topicNames", "options" ]
DescribeTopicsResult
true
1
6.8
apache/kafka
31,560
javadoc
false
equals
@Override public boolean equals(final Object obj) { if (obj instanceof MutableBoolean) { return value == ((MutableBoolean) obj).booleanValue(); } return false; }
Compares this object to the specified object. The result is {@code true} if and only if the argument is not {@code null} and is an {@link MutableBoolean} object that contains the same {@code boolean} value as this object. @param obj the object to compare with, null returns false @return {@code true} if the objects are the same; {@code false} otherwise.
java
src/main/java/org/apache/commons/lang3/mutable/MutableBoolean.java
104
[ "obj" ]
true
2
8.08
apache/commons-lang
2,896
javadoc
false
getEndOfTrailingComment
static SourceLocation getEndOfTrailingComment(SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) { // We consider any following comment token that is indented more than the // first comment to be part of the trailing comment. const unsigned Column = SM.getPresumedColumnNumber(Loc); std::optional<Token> Tok = Lexer::findNextToken(Loc, SM, LangOpts, /*IncludeComments=*/true); while (Tok && Tok->is(tok::comment) && SM.getPresumedColumnNumber(Tok->getLocation()) > Column) { Loc = Tok->getEndLoc(); Tok = Lexer::findNextToken(Loc, SM, LangOpts, /*IncludeComments=*/true); } return Loc; }
Returns the end of the trailing comments after `Loc`.
cpp
clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp
330
[ "Loc" ]
true
4
6
llvm/llvm-project
36,021
doxygen
false
compare
private int compare(ConfigurationPropertyName n1, ConfigurationPropertyName n2) { int l1 = n1.getNumberOfElements(); int l2 = n2.getNumberOfElements(); int i1 = 0; int i2 = 0; while (i1 < l1 || i2 < l2) { try { ElementType type1 = (i1 < l1) ? n1.elements.getType(i1) : null; ElementType type2 = (i2 < l2) ? n2.elements.getType(i2) : null; String e1 = (i1 < l1) ? n1.getElement(i1++, Form.UNIFORM) : null; String e2 = (i2 < l2) ? n2.getElement(i2++, Form.UNIFORM) : null; int result = compare(e1, type1, e2, type2); if (result != 0) { return result; } } catch (ArrayIndexOutOfBoundsException ex) { throw new RuntimeException(ex); } } return 0; }
Returns {@code true} if this element is an ancestor (immediate or nested parent) of the specified name. @param name the name to check @return {@code true} if this name is an ancestor
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
311
[ "n1", "n2" ]
true
9
8
spring-projects/spring-boot
79,428
javadoc
false
FunctionLocation
function FunctionLocation({location, displayName}: FunctionLocationProps) { // TODO: We should support symbolication here as well, but // symbolicating the whole stack can be expensive const [canViewSource, viewSource] = useOpenResource(location, null); return ( <li> <Button className={ canViewSource ? styles.ClickableSource : styles.UnclickableSource } disabled={!canViewSource} onClick={viewSource}> {displayName} </Button> </li> ); }
Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. @flow
javascript
packages/react-devtools-shared/src/devtools/views/Profiler/SidebarEventInfo.js
35
[]
false
2
6.4
facebook/react
241,750
jsdoc
false
hashCode
@Deprecated public static int hashCode(final Object obj) { // hashCode(Object) for performance vs. hashCodeMulti(Object[]), as hash code is often critical return Objects.hashCode(obj); }
Gets the hash code of an object returning zero when the object is {@code null}. <pre> ObjectUtils.hashCode(null) = 0 ObjectUtils.hashCode(obj) = obj.hashCode() </pre> @param obj the object to obtain the hash code of, may be {@code null}. @return the hash code of the object, or zero if null. @since 2.1 @deprecated this method has been replaced by {@code java.util.Objects.hashCode(Object)} in Java 7 and will be removed in future releases.
java
src/main/java/org/apache/commons/lang3/ObjectUtils.java
711
[ "obj" ]
true
1
7.04
apache/commons-lang
2,896
javadoc
false
of
@Contract("_, _, !null, _ -> !null") static @Nullable ConfigurationProperty of(@Nullable ConfigurationPropertySource source, ConfigurationPropertyName name, @Nullable Object value, @Nullable Origin origin) { if (value == null) { return null; } return new ConfigurationProperty(source, name, value, origin); }
Return the value of the configuration property. @return the configuration property value
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationProperty.java
136
[ "source", "name", "value", "origin" ]
ConfigurationProperty
true
2
6.4
spring-projects/spring-boot
79,428
javadoc
false
forOperation
public CacheEvaluationContext forOperation(CacheExpressionRootObject rootObject, Method targetMethod, @Nullable Object[] args) { CacheEvaluationContext evaluationContext = new CacheEvaluationContext( rootObject, targetMethod, args, getParameterNameDiscoverer()); this.originalContext.applyDelegatesTo(evaluationContext); return evaluationContext; }
Creates a {@link CacheEvaluationContext} for the specified operation. @param rootObject the {@code root} object to use for the context @param targetMethod the target cache {@link Method} @param args the arguments of the method invocation @return a context suitable for this cache operation
java
spring-context/src/main/java/org/springframework/cache/interceptor/CacheEvaluationContextFactory.java
67
[ "rootObject", "targetMethod", "args" ]
CacheEvaluationContext
true
1
6.4
spring-projects/spring-framework
59,386
javadoc
false
applyRollingPolicy
private <T> void applyRollingPolicy(RollingPolicySystemProperty property, PropertyResolver resolver, Class<T> type) { T value = getProperty(resolver, property.getApplicationPropertyName(), type); if (value != null) { String stringValue = String.valueOf((value instanceof DataSize dataSize) ? dataSize.toBytes() : value); setSystemProperty(property.getEnvironmentVariableName(), stringValue); } }
Create a new {@link LoggingSystemProperties} instance. @param environment the source environment @param defaultValueResolver function used to resolve default values or {@code null} @param setter setter used to apply the property or {@code null} for system properties @since 3.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/logging/logback/LogbackLoggingSystemProperties.java
106
[ "property", "resolver", "type" ]
void
true
3
6.24
spring-projects/spring-boot
79,428
javadoc
false
renew_from_kt
def renew_from_kt(principal: str | None, keytab: str, exit_on_fail: bool = True): """ Renew kerberos token from keytab. :param principal: principal :param keytab: keytab file :return: None """ # The config is specified in seconds. But we ask for that same amount in # minutes to give ourselves a large renewal buffer. renewal_lifetime = f"{conf.getint('kerberos', 'reinit_frequency')}m" cmd_principal = get_kerberos_principal(principal) if conf.getboolean("kerberos", "forwardable"): forwardable = "-f" else: forwardable = "-F" if conf.getboolean("kerberos", "include_ip"): include_ip = "-a" else: include_ip = "-A" cmdv: list[str] = [ conf.get_mandatory_value("kerberos", "kinit_path"), forwardable, include_ip, "-r", renewal_lifetime, "-k", # host ticket "-t", keytab, # specify keytab "-c", conf.get_mandatory_value("kerberos", "ccache"), # specify credentials cache cmd_principal, ] log.info("Re-initialising kerberos from keytab: %s", " ".join(shlex.quote(f) for f in cmdv)) with subprocess.Popen( cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, bufsize=-1, universal_newlines=True, ) as subp: subp.wait() if subp.returncode != 0: log.error( "Couldn't reinit from keytab! `kinit` exited with %s.\n%s\n%s", subp.returncode, "\n".join(subp.stdout.readlines() if subp.stdout else []), "\n".join(subp.stderr.readlines() if subp.stderr else []), ) if exit_on_fail: sys.exit(subp.returncode) else: return subp.returncode if detect_conf_var(): # (From: HUE-640). Kerberos clock have seconds level granularity. Make sure we # renew the ticket after the initial valid time. time.sleep(1.5) ret = perform_krb181_workaround(cmd_principal) if exit_on_fail and ret != 0: sys.exit(ret) else: return ret return 0
Renew kerberos token from keytab. :param principal: principal :param keytab: keytab file :return: None
python
airflow-core/src/airflow/security/kerberos.py
67
[ "principal", "keytab", "exit_on_fail" ]
true
14
8
apache/airflow
43,597
sphinx
false
determineDefaultCandidate
@Nullable private String determineDefaultCandidate(Map<String, Object> candidates) { String defaultBeanName = null; for (String candidateBeanName : candidates.keySet()) { if (AutowireUtils.isDefaultCandidate(this, candidateBeanName)) { if (defaultBeanName != null) { return null; } defaultBeanName = candidateBeanName; } } return defaultBeanName; }
Return a unique "default-candidate" among remaining non-default candidates. @param candidates a Map of candidate names and candidate instances (or candidate classes if not created yet) that match the required type @return the name of the default candidate, or {@code null} if none found @since 6.2.4 @see AbstractBeanDefinition#isDefaultCandidate()
java
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
2,233
[ "candidates" ]
String
true
3
7.44
spring-projects/spring-framework
59,386
javadoc
false
concat
public static ByteSource concat(Iterator<? extends ByteSource> sources) { return concat(ImmutableList.copyOf(sources)); }
Concatenates multiple {@link ByteSource} instances into a single source. Streams returned from the source will contain the concatenated data from the streams of the underlying sources. <p>Only one underlying stream will be open at a time. Closing the concatenated stream will close the open underlying stream. <p>Note: The input {@code Iterator} will be copied to an {@code ImmutableList} when this method is called. This will fail if the iterator is infinite and may cause problems if the iterator eagerly fetches data for each source when iterated (rather than producing sources that only load data through their streams). Prefer using the {@link #concat(Iterable)} overload if possible. @param sources the sources to concatenate @return a {@code ByteSource} containing the concatenated data @throws NullPointerException if any of {@code sources} is {@code null} @since 15.0
java
android/guava/src/com/google/common/io/ByteSource.java
396
[ "sources" ]
ByteSource
true
1
6.64
google/guava
51,352
javadoc
false
__getitem__
def __getitem__(self, target): """ Return the value of the given string attribute node, None if the node doesn't exist. Can also take a tuple as a parameter, (target, child), where child is the index of the attribute in the WKT. For example: >>> wkt = ( ... 'GEOGCS["WGS 84",' ... ' DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]' ... ']' ... ) >>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326 >>> print(srs['GEOGCS']) WGS 84 >>> print(srs['DATUM']) WGS_1984 >>> print(srs['AUTHORITY']) EPSG >>> print(srs['AUTHORITY', 1]) # The authority value 4326 >>> print(srs['TOWGS84', 4]) # the fourth value in this wkt 0 >>> # For the units authority, have to use the pipe symbole. >>> print(srs['UNIT|AUTHORITY']) EPSG >>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units 9122 """ if isinstance(target, tuple): return self.attr_value(*target) else: return self.attr_value(target)
Return the value of the given string attribute node, None if the node doesn't exist. Can also take a tuple as a parameter, (target, child), where child is the index of the attribute in the WKT. For example: >>> wkt = ( ... 'GEOGCS["WGS 84",' ... ' DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]' ... ']' ... ) >>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326 >>> print(srs['GEOGCS']) WGS 84 >>> print(srs['DATUM']) WGS_1984 >>> print(srs['AUTHORITY']) EPSG >>> print(srs['AUTHORITY', 1]) # The authority value 4326 >>> print(srs['TOWGS84', 4]) # the fourth value in this wkt 0 >>> # For the units authority, have to use the pipe symbole. >>> print(srs['UNIT|AUTHORITY']) EPSG >>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units 9122
python
django/contrib/gis/gdal/srs.py
113
[ "self", "target" ]
false
3
6.32
django/django
86,204
unknown
false
merge
public ZeroBucket merge(ZeroBucket other) { if (other.count == 0) { return this; } else if (count == 0) { return other; } else { long totalCount = count + other.count; // Both are populated, so we need to use the higher zero-threshold. if (this.compareZeroThreshold(other) >= 0) { return new ZeroBucket(this, totalCount); } else { return new ZeroBucket(other, totalCount); } } }
Merges this zero bucket with another one. <ul> <li>If the other zero bucket or both are empty, this instance is returned unchanged.</li> <li>If the this zero bucket is empty and the other one is populated, the other instance is returned unchanged.</li> <li>Otherwise, the zero threshold is increased if necessary (by taking the maximum of the two), and the counts are summed.</li> </ul> @param other The other zero bucket to merge with. @return A new {@link ZeroBucket} representing the merged result.
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
192
[ "other" ]
ZeroBucket
true
4
8.08
elastic/elasticsearch
75,680
javadoc
false
_shutdown_handler
def _shutdown_handler(worker: Worker, sig='SIGTERM', how='Warm', callback=None, exitcode=EX_OK, verbose=True): """Install signal handler for warm/cold shutdown. The handler will run from the MainProcess. Args: worker (Worker): The worker that received the signal. sig (str, optional): The signal that was received. Defaults to 'TERM'. how (str, optional): The type of shutdown to perform. Defaults to 'Warm'. callback (Callable, optional): Signal handler. Defaults to None. exitcode (int, optional): The exit code to use. Defaults to EX_OK. verbose (bool, optional): Whether to print the type of shutdown. Defaults to True. """ def _handle_request(*args): with in_sighandler(): from celery.worker import state if current_process()._name == 'MainProcess': if callback: callback(worker) if verbose: safe_say(f'worker: {how} shutdown (MainProcess)', sys.__stdout__) signals.worker_shutting_down.send( sender=worker.hostname, sig=sig, how=how, exitcode=exitcode, ) setattr(state, {'Warm': 'should_stop', 'Cold': 'should_terminate'}[how], exitcode) _handle_request.__name__ = str(f'worker_{how}') platforms.signals[sig] = _handle_request
Install signal handler for warm/cold shutdown. The handler will run from the MainProcess. Args: worker (Worker): The worker that received the signal. sig (str, optional): The signal that was received. Defaults to 'TERM'. how (str, optional): The type of shutdown to perform. Defaults to 'Warm'. callback (Callable, optional): Signal handler. Defaults to None. exitcode (int, optional): The exit code to use. Defaults to EX_OK. verbose (bool, optional): Whether to print the type of shutdown. Defaults to True.
python
celery/apps/worker.py
282
[ "worker", "sig", "how", "callback", "exitcode", "verbose" ]
true
4
6.88
celery/celery
27,741
google
false
isAssignable
private static boolean isAssignable(final Type type, final ParameterizedType toParameterizedType, final Map<TypeVariable<?>, Type> typeVarAssigns) { if (type == null) { return true; } // only a null type can be assigned to null type which // would have cause the previous to return true if (toParameterizedType == null) { return false; } // cannot cast an array type to a parameterized type. if (type instanceof GenericArrayType) { return false; } // all types are assignable to themselves if (toParameterizedType.equals(type)) { return true; } // get the target type's raw type final Class<?> toClass = getRawType(toParameterizedType); // get the subject type's type arguments including owner type arguments // and supertype arguments up to and including the target class. final Map<TypeVariable<?>, Type> fromTypeVarAssigns = getTypeArguments(type, toClass, null); // null means the two types are not compatible if (fromTypeVarAssigns == null) { return false; } // compatible types, but there's no type arguments. this is equivalent // to comparing Map< ?, ? > to Map, and raw types are always assignable // to parameterized types. if (fromTypeVarAssigns.isEmpty()) { return true; } // get the target type's type arguments including owner type arguments final Map<TypeVariable<?>, Type> toTypeVarAssigns = getTypeArguments(toParameterizedType, toClass, typeVarAssigns); // now to check each type argument for (final TypeVariable<?> var : toTypeVarAssigns.keySet()) { final Type toTypeArg = unrollVariableAssignments(var, toTypeVarAssigns); final Type fromTypeArg = unrollVariableAssignments(var, fromTypeVarAssigns); if (toTypeArg == null && fromTypeArg instanceof Class) { continue; } // parameters must either be absent from the subject type, within // the bounds of the wildcard type, or be an exact match to the // parameters of the target type. if (fromTypeArg != null && toTypeArg != null && !toTypeArg.equals(fromTypeArg) && !(toTypeArg instanceof WildcardType && isAssignable(fromTypeArg, toTypeArg, typeVarAssigns))) { return false; } } return true; }
Tests if the subject type may be implicitly cast to the target parameterized type following the Java generics rules. @param type the subject type to be assigned to the target type. @param toParameterizedType the target parameterized type. @param typeVarAssigns a map with type variables. @return {@code true} if {@code type} is assignable to {@code toType}.
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
1,049
[ "type", "toParameterizedType", "typeVarAssigns" ]
true
14
8
apache/commons-lang
2,896
javadoc
false
_approximate_mode
def _approximate_mode(class_counts, n_draws, rng): """Computes approximate mode of multivariate hypergeometric. This is an approximation to the mode of the multivariate hypergeometric given by class_counts and n_draws. It shouldn't be off by more than one. It is the mostly likely outcome of drawing n_draws many samples from the population given by class_counts. Parameters ---------- class_counts : ndarray of int Population per class. n_draws : int Number of draws (samples to draw) from the overall population. rng : random state Used to break ties. Returns ------- sampled_classes : ndarray of int Number of samples drawn from each class. np.sum(sampled_classes) == n_draws Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import _approximate_mode >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0) array([2, 1]) >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0) array([3, 1]) >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), ... n_draws=2, rng=0) array([0, 1, 1, 0]) >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), ... n_draws=2, rng=42) array([1, 1, 0, 0]) """ rng = check_random_state(rng) # this computes a bad approximation to the mode of the # multivariate hypergeometric given by class_counts and n_draws continuous = class_counts / class_counts.sum() * n_draws # floored means we don't overshoot n_samples, but probably undershoot floored = np.floor(continuous) # we add samples according to how much "left over" probability # they had, until we arrive at n_samples need_to_add = int(n_draws - floored.sum()) if need_to_add > 0: remainder = continuous - floored values = np.sort(np.unique(remainder))[::-1] # add according to remainder, but break ties # randomly to avoid biases for value in values: (inds,) = np.where(remainder == value) # if we need_to_add less than what's in inds # we draw randomly from them. # if we need to add more, we add them all and # go to the next value add_now = min(len(inds), need_to_add) inds = rng.choice(inds, size=add_now, replace=False) floored[inds] += 1 need_to_add -= add_now if need_to_add == 0: break return floored.astype(int)
Computes approximate mode of multivariate hypergeometric. This is an approximation to the mode of the multivariate hypergeometric given by class_counts and n_draws. It shouldn't be off by more than one. It is the mostly likely outcome of drawing n_draws many samples from the population given by class_counts. Parameters ---------- class_counts : ndarray of int Population per class. n_draws : int Number of draws (samples to draw) from the overall population. rng : random state Used to break ties. Returns ------- sampled_classes : ndarray of int Number of samples drawn from each class. np.sum(sampled_classes) == n_draws Examples -------- >>> import numpy as np >>> from sklearn.utils.extmath import _approximate_mode >>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0) array([2, 1]) >>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0) array([3, 1]) >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), ... n_draws=2, rng=0) array([0, 1, 1, 0]) >>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]), ... n_draws=2, rng=42) array([1, 1, 0, 0])
python
sklearn/utils/extmath.py
1,412
[ "class_counts", "n_draws", "rng" ]
false
4
7.12
scikit-learn/scikit-learn
64,340
numpy
false
renewDelegationToken
default RenewDelegationTokenResult renewDelegationToken(byte[] hmac) { return renewDelegationToken(hmac, new RenewDelegationTokenOptions()); }
Renew a Delegation Token. <p> This is a convenience method for {@link #renewDelegationToken(byte[], RenewDelegationTokenOptions)} with default options. See the overload for more details. @param hmac HMAC of the Delegation token @return The RenewDelegationTokenResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
751
[ "hmac" ]
RenewDelegationTokenResult
true
1
6.16
apache/kafka
31,560
javadoc
false
create_stack
def create_stack(self, stack_name: str, cloudformation_parameters: dict) -> None: """ Create stack in CloudFormation. .. seealso:: - :external+boto3:py:meth:`CloudFormation.Client.create_stack` :param stack_name: stack_name. :param cloudformation_parameters: parameters to be passed to CloudFormation. """ if "StackName" not in cloudformation_parameters: cloudformation_parameters["StackName"] = stack_name self.get_conn().create_stack(**cloudformation_parameters)
Create stack in CloudFormation. .. seealso:: - :external+boto3:py:meth:`CloudFormation.Client.create_stack` :param stack_name: stack_name. :param cloudformation_parameters: parameters to be passed to CloudFormation.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/cloud_formation.py
67
[ "self", "stack_name", "cloudformation_parameters" ]
None
true
2
6.08
apache/airflow
43,597
sphinx
false
_get_nearest_indexer
def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer
Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples).
python
pandas/core/indexes/base.py
3,973
[ "self", "target", "limit", "tolerance" ]
npt.NDArray[np.intp]
true
4
6
pandas-dev/pandas
47,362
unknown
false
equals
@Override public boolean equals(@Nullable Object other) { return (this == other || (other instanceof NameMatchCacheOperationSource otherCos && ObjectUtils.nullSafeEquals(this.nameMap, otherCos.nameMap))); }
Return if the given method name matches the mapped name. <p>The default implementation checks for "xxx*", "*xxx" and "*xxx*" matches, as well as direct equality. Can be overridden in subclasses. @param methodName the method name of the class @param mappedName the name in the descriptor @return if the names match @see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
java
spring-context/src/main/java/org/springframework/cache/interceptor/NameMatchCacheOperationSource.java
111
[ "other" ]
true
3
7.6
spring-projects/spring-framework
59,386
javadoc
false
checkPositionIndexes
public static void checkPositionIndexes(int start, int end, int size) { // Carefully optimized for execution by hotspot (explanatory comment above) if (start < 0 || end < start || end > size) { throw new IndexOutOfBoundsException(badPositionIndexes(start, end, size)); } }
Ensures that {@code start} and {@code end} specify valid <i>positions</i> in an array, list or string of size {@code size}, and are in order. A position index may range from zero to {@code size}, inclusive. @param start a user-supplied index identifying a starting position in an array, list or string @param end a user-supplied index identifying an ending position in an array, list or string @param size the size of that array, list or string @throws IndexOutOfBoundsException if either index is negative or is greater than {@code size}, or if {@code end} is less than {@code start} @throws IllegalArgumentException if {@code size} is negative
java
android/guava/src/com/google/common/base/Preconditions.java
1,445
[ "start", "end", "size" ]
void
true
4
6.72
google/guava
51,352
javadoc
false
isStartOfParameter
function isStartOfParameter(isJSDocParameter: boolean): boolean { return token() === SyntaxKind.DotDotDotToken || isBindingIdentifierOrPrivateIdentifierOrPattern() || isModifierKind(token()) || token() === SyntaxKind.AtToken || isStartOfType(/*inStartOfParameter*/ !isJSDocParameter); }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
3,993
[ "isJSDocParameter" ]
true
5
6.72
microsoft/TypeScript
107,154
jsdoc
false
mapPropertiesWithReplacement
private @Nullable PropertySource<?> mapPropertiesWithReplacement(PropertiesMigrationReport report, String name, List<PropertyMigration> properties) { report.add(name, properties); List<PropertyMigration> renamed = properties.stream().filter(PropertyMigration::isCompatibleType).toList(); if (renamed.isEmpty()) { return null; } NameTrackingPropertySource nameTrackingPropertySource = new NameTrackingPropertySource(); this.environment.getPropertySources().addFirst(nameTrackingPropertySource); try { String target = "migrate-" + name; Map<String, OriginTrackedValue> content = new LinkedHashMap<>(); for (PropertyMigration candidate : renamed) { String newPropertyName = candidate.getNewPropertyName(); Object value = candidate.getProperty().getValue(); if (nameTrackingPropertySource.isPlaceholderThatAccessesName(value, newPropertyName)) { continue; } OriginTrackedValue originTrackedValue = OriginTrackedValue.of(value, candidate.getProperty().getOrigin()); content.put(newPropertyName, originTrackedValue); } return new OriginTrackedMapPropertySource(target, content); } finally { this.environment.getPropertySources().remove(nameTrackingPropertySource.getName()); } }
Analyse the {@link ConfigurableEnvironment environment} and attempt to rename legacy properties if a replacement exists. @return a report of the migration
java
core/spring-boot-properties-migrator/src/main/java/org/springframework/boot/context/properties/migrator/PropertiesMigrationReporter.java
187
[ "report", "name", "properties" ]
true
3
6.08
spring-projects/spring-boot
79,428
javadoc
false
addInterface
public void addInterface(Class<?> ifc) { Assert.notNull(ifc, "Interface must not be null"); if (!ifc.isInterface()) { throw new IllegalArgumentException("Specified class [" + ifc.getName() + "] must be an interface"); } this.interfaces.add(ifc); }
Add the specified interface to the list of interfaces to introduce. @param ifc the interface to introduce
java
spring-aop/src/main/java/org/springframework/aop/support/DefaultIntroductionAdvisor.java
99
[ "ifc" ]
void
true
2
6.88
spring-projects/spring-framework
59,386
javadoc
false
pathToPackage
public static String pathToPackage(final String path) { return Objects.requireNonNull(path, "path").replace('/', '.'); }
Converts a Java path ('/') to a package name. @param path the source path. @return a package name. @throws NullPointerException if {@code path} is null. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/ClassPathUtils.java
53
[ "path" ]
String
true
1
6.64
apache/commons-lang
2,896
javadoc
false
isInternalLanguageInterface
protected boolean isInternalLanguageInterface(Class<?> ifc) { return (ifc.getName().equals("groovy.lang.GroovyObject") || ifc.getName().endsWith(".cglib.proxy.Factory") || ifc.getName().endsWith(".bytebuddy.MockAccess")); }
Determine whether the given interface is a well-known internal language interface and therefore not to be considered as a reasonable proxy interface. <p>If no reasonable proxy interface is found for a given bean, it will get proxied with its full target class, assuming that as the user's intention. @param ifc the interface to check @return whether the given interface is an internal language interface
java
spring-aop/src/main/java/org/springframework/aop/framework/ProxyProcessorSupport.java
145
[ "ifc" ]
true
3
7.92
spring-projects/spring-framework
59,386
javadoc
false
withCommonFrames
public StandardStackTracePrinter withCommonFrames() { return withOption(Option.SHOW_COMMON_FRAMES); }
Return a new {@link StandardStackTracePrinter} from this one that will print all common frames rather the replacing them with the {@literal "... N more"} message. @return a new {@link StandardStackTracePrinter} instance
java
core/spring-boot/src/main/java/org/springframework/boot/logging/StandardStackTracePrinter.java
159
[]
StandardStackTracePrinter
true
1
6.32
spring-projects/spring-boot
79,428
javadoc
false
instancesOf
public static <E> Stream<E> instancesOf(final Class<? super E> clazz, final Collection<? super E> collection) { return instancesOf(clazz, of(collection)); }
Streams only instances of the give Class in a collection. <p> This method shorthand for: </p> <pre> {@code (Stream<E>) Streams.toStream(collection).filter(collection, SomeClass.class::isInstance);} </pre> @param <E> the type of elements in the collection we want to stream. @param clazz the type of elements in the collection we want to stream. @param collection the collection to stream or null. @return A non-null stream that only provides instances we want. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/stream/Streams.java
609
[ "clazz", "collection" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
max
public static float max(final float a, final float b, final float c) { return Math.max(Math.max(a, b), c); }
Gets the maximum of three {@code float} values. <p> If any value is {@code NaN}, {@code NaN} is returned. Infinity is handled. </p> @param a value 1. @param b value 2. @param c value 3. @return the largest of the values. @see IEEE754rUtils#max(float, float, float) for a version of this method that handles NaN differently.
java
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
969
[ "a", "b", "c" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
rmdir
function rmdir(path, options, callback) { if (typeof options === 'function') { callback = options; options = undefined; } if (options?.recursive !== undefined) { // This API previously accepted a `recursive` option that was deprecated // and removed. However, in order to make the change more visible, we // opted to throw an error if recursive is specified rather than removing it // entirely. throw new ERR_INVALID_ARG_VALUE( 'options.recursive', options.recursive, 'is no longer supported', ); } callback = makeCallback(callback); path = getValidatedPath(path); validateRmdirOptions(options); const req = new FSReqCallback(); req.oncomplete = callback; binding.rmdir(path, req); }
Asynchronously removes a directory. @param {string | Buffer | URL} path @param {object} [options] @param {(err?: Error) => any} callback @returns {void}
javascript
lib/fs.js
1,122
[ "path", "options", "callback" ]
false
3
6.24
nodejs/node
114,839
jsdoc
false
inverse
@Override public BiMap<V, K> inverse() { BiMap<V, K> result = inverse; return (result == null) ? inverse = new Inverse<>(this) : result; }
An {@code Entry} implementation that attempts to follow its key around the map -- that is, if the key is moved, deleted, or reinserted, it will account for that -- while not doing any extra work if the key has not moved. One quirk: The {@link #getValue()} method can return {@code null} even for a map which supposedly does not contain null elements, if the key is not present when {@code getValue()} is called.
java
android/guava/src/com/google/common/collect/HashBiMap.java
949
[]
true
2
6.56
google/guava
51,352
javadoc
false
stripEnd
public static String stripEnd(final String str, final String stripChars) { int end = length(str); if (end == 0) { return str; } if (stripChars == null) { while (end != 0 && Character.isWhitespace(str.charAt(end - 1))) { end--; } } else if (stripChars.isEmpty()) { return str; } else { while (end != 0 && stripChars.indexOf(str.charAt(end - 1)) != INDEX_NOT_FOUND) { end--; } } return str.substring(0, end); }
Strips any of a set of characters from the end of a String. <p> A {@code null} input String returns {@code null}. An empty string ("") input returns the empty string. </p> <p> If the stripChars String is {@code null}, whitespace is stripped as defined by {@link Character#isWhitespace(char)}. </p> <pre> StringUtils.stripEnd(null, *) = null StringUtils.stripEnd("", *) = "" StringUtils.stripEnd("abc", "") = "abc" StringUtils.stripEnd("abc", null) = "abc" StringUtils.stripEnd(" abc", null) = " abc" StringUtils.stripEnd("abc ", null) = "abc" StringUtils.stripEnd(" abc ", null) = " abc" StringUtils.stripEnd(" abcyx", "xyz") = " abc" StringUtils.stripEnd("120.00", ".0") = "12" </pre> @param str the String to remove characters from, may be null. @param stripChars the set of characters to remove, null treated as whitespace. @return the stripped String, {@code null} if null String input.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
7,953
[ "str", "stripChars" ]
String
true
8
7.76
apache/commons-lang
2,896
javadoc
false
opj_mqc_raw_decode
static INLINE OPJ_UINT32 opj_mqc_raw_decode(opj_mqc_t *mqc) { OPJ_UINT32 d; if (mqc->ct == 0) { /* Given opj_mqc_raw_init_dec() we know that at some point we will */ /* have a 0xFF 0xFF artificial marker */ if (mqc->c == 0xff) { if (*mqc->bp > 0x8f) { mqc->c = 0xff; mqc->ct = 8; } else { mqc->c = *mqc->bp; mqc->bp ++; mqc->ct = 7; } } else { mqc->c = *mqc->bp; mqc->bp ++; mqc->ct = 8; } } mqc->ct--; d = ((OPJ_UINT32)mqc->c >> mqc->ct) & 0x01U; return d; }
Decode a symbol using raw-decoder. Cfr p.506 TAUBMAN @param mqc MQC handle @return Returns the decoded symbol (0 or 1)
cpp
3rdparty/openjpeg/openjp2/mqc_inl.h
74
[]
true
6
7.84
opencv/opencv
85,374
doxygen
false
_fill_limit_area_2d
def _fill_limit_area_2d( mask: npt.NDArray[np.bool_], limit_area: Literal["outside", "inside"] ) -> None: """Prepare 2d mask for ffill/bfill with limit_area. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { "outside", "inside" } Whether to limit filling to outside or inside the outer most non-NA value. """ neg_mask = ~mask.T if limit_area == "outside": # Identify inside la_mask = ( np.maximum.accumulate(neg_mask, axis=0) & np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1] ) else: # Identify outside la_mask = ( ~np.maximum.accumulate(neg_mask, axis=0) | ~np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1] ) mask[la_mask.T] = False
Prepare 2d mask for ffill/bfill with limit_area. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { "outside", "inside" } Whether to limit filling to outside or inside the outer most non-NA value.
python
pandas/core/missing.py
992
[ "mask", "limit_area" ]
None
true
3
6.88
pandas-dev/pandas
47,362
numpy
false
_box_pa
def _box_pa( cls, value, pa_type: pa.DataType | None = None ) -> pa.Array | pa.ChunkedArray | pa.Scalar: """ Box value into a pyarrow Array, ChunkedArray or Scalar. Parameters ---------- value : any pa_type : pa.DataType | None Returns ------- pa.Array or pa.ChunkedArray or pa.Scalar """ if isinstance(value, pa.Scalar) or not is_list_like(value): return cls._box_pa_scalar(value, pa_type) return cls._box_pa_array(value, pa_type)
Box value into a pyarrow Array, ChunkedArray or Scalar. Parameters ---------- value : any pa_type : pa.DataType | None Returns ------- pa.Array or pa.ChunkedArray or pa.Scalar
python
pandas/core/arrays/arrow/array.py
508
[ "cls", "value", "pa_type" ]
pa.Array | pa.ChunkedArray | pa.Scalar
true
3
6.72
pandas-dev/pandas
47,362
numpy
false
value
public XContentBuilder value(byte[] value) throws IOException { if (value == null) { return nullValue(); } generator.writeBinary(value); return this; }
@return the value of the "human readable" flag. When the value is equal to true, some types of values are written in a format easier to read for a human.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
777
[ "value" ]
XContentBuilder
true
2
7.04
elastic/elasticsearch
75,680
javadoc
false
may_share_memory
def may_share_memory(a, b, /, max_work=0): """ may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem. See `shares_memory` for details. Default for ``may_share_memory`` is to do a bounds check. Returns ------- out : bool See Also -------- shares_memory Examples -------- >>> import numpy as np >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) >>> np.may_share_memory(x[:,0], x[:,1]) True """ return (a, b)
may_share_memory(a, b, /, max_work=0) Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : ndarray Input arrays max_work : int, optional Effort to spend on solving the overlap problem. See `shares_memory` for details. Default for ``may_share_memory`` is to do a bounds check. Returns ------- out : bool See Also -------- shares_memory Examples -------- >>> import numpy as np >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) >>> np.may_share_memory(x[:,0], x[:,1]) True
python
numpy/_core/multiarray.py
1,398
[ "a", "b", "max_work" ]
false
1
6.48
numpy/numpy
31,054
numpy
false
getConfiguredInstances
public <T> List<T> getConfiguredInstances(List<String> classNames, Class<T> t, Map<String, Object> configOverrides) { List<T> objects = new ArrayList<>(); if (classNames == null) return objects; Map<String, Object> configPairs = originals(); configPairs.putAll(configOverrides); try { for (Object klass : classNames) { Object o = getConfiguredInstance(klass, t, configPairs); objects.add(t.cast(o)); } } catch (Exception e) { for (Object object : objects) { maybeClose(object, "AutoCloseable object constructed and configured during failed call to getConfiguredInstances"); } throw e; } return objects; }
Get a list of configured instances of the given class specified by the given configuration key. The configuration may specify either null or an empty string to indicate no configured instances. In both cases, this method returns an empty list to indicate no configured instances. @param classNames The list of class names of the instances to create @param t The interface the class should implement @param configOverrides Configuration overrides to use. @return The list of configured instances
java
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
488
[ "classNames", "t", "configOverrides" ]
true
3
7.92
apache/kafka
31,560
javadoc
false
dtype
def dtype(self, idx: int = 0) -> torch.dtype: """ Get the dtype of a specific input node. Args: idx: Index of the node to get the dtype from (default: 0) Returns: The dtype of the specified input node """ return self._input_nodes[idx].get_dtype()
Get the dtype of a specific input node. Args: idx: Index of the node to get the dtype from (default: 0) Returns: The dtype of the specified input node
python
torch/_inductor/kernel_inputs.py
165
[ "self", "idx" ]
torch.dtype
true
1
6.56
pytorch/pytorch
96,034
google
false
meanOf
public static double meanOf(double... values) { checkArgument(values.length > 0); double mean = values[0]; for (int index = 1; index < values.length; index++) { double value = values[index]; if (isFinite(value) && isFinite(mean)) { // Art of Computer Programming vol. 2, Knuth, 4.2.2, (15) mean += (value - mean) / (index + 1); } else { mean = calculateNewMeanNonFinite(mean, value); } } return mean; }
Returns the <a href="http://en.wikipedia.org/wiki/Arithmetic_mean">arithmetic mean</a> of the values. The count must be non-zero. <p>The definition of the mean is the same as {@link Stats#mean}. @param values a series of values @throws IllegalArgumentException if the dataset is empty
java
android/guava/src/com/google/common/math/Stats.java
518
[]
true
4
6.72
google/guava
51,352
javadoc
false
get
URL get(JarFile jarFile) { synchronized (this) { return this.jarFileToJarFileUrl.get(jarFile); } }
Get a jar file URL from the cache given a jar file. @param jarFile the jar file @return the cached {@link URL} or {@code null}
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFiles.java
168
[ "jarFile" ]
URL
true
1
6.4
spring-projects/spring-boot
79,428
javadoc
false
containsNoDescendantOf
private boolean containsNoDescendantOf(Iterable<ConfigurationPropertySource> sources, ConfigurationPropertyName name) { for (ConfigurationPropertySource source : sources) { if (source.containsDescendantOf(name) != ConfigurationPropertyState.ABSENT) { return false; } } return true; }
Bind the specified target {@link Bindable} using this binder's {@link ConfigurationPropertySource property sources} or create a new instance using the type of the {@link Bindable} if the result of the binding is {@code null}. @param name the configuration property name to bind @param target the target bindable @param handler the bind handler (may be {@code null}) @param <T> the bound or created type @return the bound or created object @since 2.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
539
[ "sources", "name" ]
true
2
7.92
spring-projects/spring-boot
79,428
javadoc
false
processBackgroundEvents
boolean processBackgroundEvents() { AtomicReference<KafkaException> firstError = new AtomicReference<>(); List<BackgroundEvent> events = backgroundEventHandler.drainEvents(); if (!events.isEmpty()) { long startMs = time.milliseconds(); for (BackgroundEvent event : events) { asyncConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent<?>) event); backgroundEventProcessor.process(event); } catch (Throwable t) { KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); if (!firstError.compareAndSet(null, e)) log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } asyncConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); if (firstError.get() != null) throw firstError.get(); return !events.isEmpty(); }
Process the events-if any-that were produced by the {@link ConsumerNetworkThread network thread}. It is possible that {@link ErrorEvent an error} could occur when processing the events. In such cases, the processor will take a reference to the first error, continue to process the remaining events, and then throw the first error that occurred. Visible for testing.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
2,199
[]
true
6
6.72
apache/kafka
31,560
javadoc
false
combine_kwargs
def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict: """ Used to combine two sources of kwargs for the backend engine. Use of kwargs is deprecated, this function is solely for use in 1.3 and should be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs or kwargs must be None or empty respectively. Parameters ---------- engine_kwargs: dict kwargs to be passed through to the engine. kwargs: dict kwargs to be psased through to the engine (deprecated) Returns ------- engine_kwargs combined with kwargs """ if engine_kwargs is None: result = {} else: result = engine_kwargs.copy() result.update(kwargs) return result
Used to combine two sources of kwargs for the backend engine. Use of kwargs is deprecated, this function is solely for use in 1.3 and should be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs or kwargs must be None or empty respectively. Parameters ---------- engine_kwargs: dict kwargs to be passed through to the engine. kwargs: dict kwargs to be psased through to the engine (deprecated) Returns ------- engine_kwargs combined with kwargs
python
pandas/io/excel/_util.py
304
[ "engine_kwargs", "kwargs" ]
dict
true
3
6.88
pandas-dev/pandas
47,362
numpy
false
stringify_path
def stringify_path( filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool = False, ) -> str | BaseBufferT: """ Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ if not convert_file_like and is_file_like(filepath_or_buffer): # GH 38125: some fsspec objects implement os.PathLike but have already opened a # file. This prevents opening the file a second time. infer_compression calls # this function with convert_file_like=True to infer the compression. return cast(BaseBufferT, filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() return _expand_user(filepath_or_buffer)
Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like.
python
pandas/io/common.py
243
[ "filepath_or_buffer", "convert_file_like" ]
str | BaseBufferT
true
4
7.2
pandas-dev/pandas
47,362
numpy
false
of
public static TaggedFields of(Object... fields) { if (fields.length % 2 != 0) { throw new RuntimeException("TaggedFields#of takes an even " + "number of parameters."); } TreeMap<Integer, Field> newFields = new TreeMap<>(); for (int i = 0; i < fields.length; i += 2) { Integer tag = (Integer) fields[i]; Field field = (Field) fields[i + 1]; newFields.put(tag, field); } return new TaggedFields(newFields); }
Create a new TaggedFields object with the given tags and fields. @param fields This is an array containing Integer tags followed by associated Field objects. @return The new {@link TaggedFields}
java
clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java
43
[]
TaggedFields
true
3
7.92
apache/kafka
31,560
javadoc
false
createReplacementArray
@VisibleForTesting static char[][] createReplacementArray(Map<Character, String> map) { checkNotNull(map); // GWT specific check (do not optimize) if (map.isEmpty()) { return EMPTY_REPLACEMENT_ARRAY; } char max = max(map.keySet()); char[][] replacements = new char[max + 1][]; for (Character c : map.keySet()) { replacements[c] = map.get(c).toCharArray(); } return replacements; }
Returns a new ArrayBasedEscaperMap for creating ArrayBasedCharEscaper or ArrayBasedUnicodeEscaper instances. @param replacements a map of characters to their escaped representations
java
android/guava/src/com/google/common/escape/ArrayBasedEscaperMap.java
66
[ "map" ]
true
2
6.08
google/guava
51,352
javadoc
false
advance
private void advance() throws IOException { close(); if (it.hasNext()) { current = it.next().openStream(); } }
Closes the current reader and opens the next one, if any.
java
android/guava/src/com/google/common/io/MultiReader.java
45
[]
void
true
2
7.04
google/guava
51,352
javadoc
false
andThen
default ByteConsumer andThen(final ByteConsumer after) { Objects.requireNonNull(after); return (final byte t) -> { accept(t); after.accept(t); }; }
Returns a composed {@link ByteConsumer} that performs, in sequence, this operation followed by the {@code after} operation. If performing either operation throws an exception, it is relayed to the caller of the composed operation. If performing this operation throws an exception, the {@code after} operation will not be performed. @param after the operation to perform after this operation @return a composed {@link ByteConsumer} that performs in sequence this operation followed by the {@code after} operation @throws NullPointerException if {@code after} is null
java
src/main/java/org/apache/commons/lang3/function/ByteConsumer.java
61
[ "after" ]
ByteConsumer
true
1
6.56
apache/commons-lang
2,896
javadoc
false
emptyToNull
static @Nullable String emptyToNull(@Nullable String string) { return stringIsNullOrEmpty(string) ? null : string; }
Returns the string if it is not empty, or a null string otherwise. @param string the string to test and possibly return @return {@code string} if it is not empty; {@code null} otherwise
java
android/guava/src/com/google/common/base/Platform.java
77
[ "string" ]
String
true
2
8.16
google/guava
51,352
javadoc
false
toApiVersion
private Optional<ApiVersionsResponseData.ApiVersion> toApiVersion(boolean enableUnstableLastVersion, Optional<ApiMessageType.ListenerType> listenerType) { // see `PRODUCE_API_VERSIONS_RESPONSE_MIN_VERSION` for details on why we do this short oldestVersion = (this == PRODUCE && listenerType.map(l -> l == ApiMessageType.ListenerType.BROKER).orElse(false)) ? PRODUCE_API_VERSIONS_RESPONSE_MIN_VERSION : oldestVersion(); short latestVersion = latestVersion(enableUnstableLastVersion); // API is entirely disabled if latestStableVersion is smaller than oldestVersion. if (latestVersion >= oldestVersion) { return Optional.of(new ApiVersionsResponseData.ApiVersion() .setApiKey(messageType.apiKey()) .setMinVersion(oldestVersion) .setMaxVersion(latestVersion)); } else { return Optional.empty(); } }
To workaround a critical bug in librdkafka, the api versions response is inconsistent with the actual versions supported by `produce` - this method handles that. It should be called in the context of the api response protocol handling. It should not be used by code generating protocol documentation - we keep that consistent with the actual versions supported by `produce`. See `PRODUCE_API_VERSIONS_RESPONSE_MIN_VERSION` for details.
java
clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java
285
[ "enableUnstableLastVersion", "listenerType" ]
true
4
6
apache/kafka
31,560
javadoc
false
getMaxAssignmentSize
private int getMaxAssignmentSize(List<String> allSubscribedTopics) { int maxAssignmentSize; if (allSubscribedTopics.size() == partitionsPerTopic.size()) { maxAssignmentSize = totalPartitionsCount; } else { maxAssignmentSize = allSubscribedTopics.stream().map(partitionsPerTopic::get).map(List::size).reduce(0, Integer::sum); } return maxAssignmentSize; }
get the maximum assigned partition size of the {@code allSubscribedTopics} @param allSubscribedTopics the subscribed topics of a consumer @return maximum assigned partition size
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
1,228
[ "allSubscribedTopics" ]
true
2
6.8
apache/kafka
31,560
javadoc
false
bindAggregate
private <T> @Nullable Object bindAggregate(ConfigurationPropertyName name, Bindable<T> target, BindHandler handler, Context context, AggregateBinder<?> aggregateBinder) { AggregateElementBinder elementBinder = (itemName, itemTarget, source) -> { boolean allowRecursiveBinding = aggregateBinder.isAllowRecursiveBinding(source); Supplier<?> supplier = () -> bind(itemName, itemTarget, handler, context, allowRecursiveBinding, false); return context.withSource(source, supplier); }; Supplier<@Nullable Object> supplier = () -> aggregateBinder.bind(name, target, elementBinder); return context.withIncreasedDepth(supplier); }
Bind the specified target {@link Bindable} using this binder's {@link ConfigurationPropertySource property sources} or create a new instance using the type of the {@link Bindable} if the result of the binding is {@code null}. @param name the configuration property name to bind @param target the target bindable @param handler the bind handler (may be {@code null}) @param <T> the bound or created type @return the bound or created object @since 2.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
462
[ "name", "target", "handler", "context", "aggregateBinder" ]
Object
true
1
6.72
spring-projects/spring-boot
79,428
javadoc
false
opt_func_info
def opt_func_info(func_name=None, signature=None): """ Returns a dictionary containing the currently supported CPU dispatched features for all optimized functions. Parameters ---------- func_name : str (optional) Regular expression to filter by function name. signature : str (optional) Regular expression to filter by data type. Returns ------- dict A dictionary where keys are optimized function names and values are nested dictionaries indicating supported targets based on data types. Examples -------- Retrieve dispatch information for functions named 'add' or 'sub' and data types 'float64' or 'float32': >>> import numpy as np >>> dict = np.lib.introspect.opt_func_info( ... func_name="add|abs", signature="float64|complex64" ... ) >>> import json >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { "current": "SSE41", "available": "SSE41 baseline(SSE SSE2 SSE3)" }, "Ff": { "current": "FMA3__AVX2", "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" }, "Dd": { "current": "FMA3__AVX2", "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" } }, "add": { "ddd": { "current": "FMA3__AVX2", "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" }, "FFF": { "current": "FMA3__AVX2", "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" } } } """ import re from numpy._core._multiarray_umath import __cpu_targets_info__ as targets, dtype if func_name is not None: func_pattern = re.compile(func_name) matching_funcs = { k: v for k, v in targets.items() if func_pattern.search(k) } else: matching_funcs = targets if signature is not None: sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): matching_chars = {} for chars, targets in v.items(): if any( sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars ): matching_chars[chars] = targets # noqa: PERF403 if matching_chars: matching_sigs[k] = matching_chars else: matching_sigs = matching_funcs return matching_sigs
Returns a dictionary containing the currently supported CPU dispatched features for all optimized functions. Parameters ---------- func_name : str (optional) Regular expression to filter by function name. signature : str (optional) Regular expression to filter by data type. Returns ------- dict A dictionary where keys are optimized function names and values are nested dictionaries indicating supported targets based on data types. Examples -------- Retrieve dispatch information for functions named 'add' or 'sub' and data types 'float64' or 'float32': >>> import numpy as np >>> dict = np.lib.introspect.opt_func_info( ... func_name="add|abs", signature="float64|complex64" ... ) >>> import json >>> print(json.dumps(dict, indent=2)) # may vary (architecture) { "absolute": { "dd": { "current": "SSE41", "available": "SSE41 baseline(SSE SSE2 SSE3)" }, "Ff": { "current": "FMA3__AVX2", "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" }, "Dd": { "current": "FMA3__AVX2", "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" } }, "add": { "ddd": { "current": "FMA3__AVX2", "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" }, "FFF": { "current": "FMA3__AVX2", "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" } } }
python
numpy/lib/introspect.py
8
[ "func_name", "signature" ]
false
10
6.96
numpy/numpy
31,054
numpy
false