function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
hasStaticLocalVariable
static bool hasStaticLocalVariable(const Stmt *Cond) { if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) { if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) if (VD->isStaticLocal()) return true; if (const auto *BD = dyn_cast<BindingDecl>(DRE->getDecl())) if (const auto *DD = dyn_cast<DecompositionDecl>(BD->getDecomposedDecl())) if (DD->isStaticLocal()) return true; } return llvm::any_of(Cond->children(), [](const Stmt *Child) { return Child && hasStaticLocalVariable(Child); }); }
returns true iff `Cond` involves at least one static local variable.
cpp
clang-tools-extra/clang-tidy/bugprone/InfiniteLoopCheck.cpp
225
[]
true
8
6.88
llvm/llvm-project
36,021
doxygen
false
getTypeArguments
private static Map<TypeVariable<?>, Type> getTypeArguments(final Type type, final Class<?> toClass, final Map<TypeVariable<?>, Type> subtypeVarAssigns) { if (type instanceof Class<?>) { return getTypeArguments((Class<?>) type, toClass, subtypeVarAssigns); } if (type instanceof ParameterizedType) { return getTypeArguments((ParameterizedType) type, toClass, subtypeVarAssigns); } if (type instanceof GenericArrayType) { return getTypeArguments(((GenericArrayType) type).getGenericComponentType(), toClass.isArray() ? toClass.getComponentType() : toClass, subtypeVarAssigns); } // since wildcard types are not assignable to classes, should this just // return null? if (type instanceof WildcardType) { for (final Type bound : getImplicitUpperBounds((WildcardType) type)) { // find the first bound that is assignable to the target class if (isAssignable(bound, toClass)) { return getTypeArguments(bound, toClass, subtypeVarAssigns); } } return null; } if (type instanceof TypeVariable<?>) { for (final Type bound : getImplicitBounds((TypeVariable<?>) type)) { // find the first bound that is assignable to the target class if (isAssignable(bound, toClass)) { return getTypeArguments(bound, toClass, subtypeVarAssigns); } } return null; } throw new IllegalStateException("found an unhandled type: " + type); }
Gets a map of the type arguments of {@code type} in the context of {@code toClass}. @param type the type in question. @param toClass the class. @param subtypeVarAssigns a map with type variables. @return the {@link Map} with type arguments.
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
884
[ "type", "toClass", "subtypeVarAssigns" ]
true
9
8.08
apache/commons-lang
2,896
javadoc
false
invoke
@Override public @Nullable Object invoke(final MethodInvocation invocation) throws Throwable { Class<?> targetClass = (invocation.getThis() != null ? AopUtils.getTargetClass(invocation.getThis()) : null); final Method userMethod = BridgeMethodResolver.getMostSpecificMethod(invocation.getMethod(), targetClass); AsyncTaskExecutor executor = determineAsyncExecutor(userMethod); if (executor == null) { throw new IllegalStateException( "No executor specified and no default executor set on AsyncExecutionInterceptor either"); } Callable<Object> task = () -> { try { Object result = invocation.proceed(); if (result instanceof Future<?> future) { return future.get(); } } catch (ExecutionException ex) { Throwable cause = ex.getCause(); handleError(cause == null ? ex : cause, userMethod, invocation.getArguments()); } catch (Throwable ex) { handleError(ex, userMethod, invocation.getArguments()); } return null; }; return doSubmit(task, executor, userMethod.getReturnType()); }
Intercept the given method invocation, submit the actual calling of the method to the correct task executor and return immediately to the caller. @param invocation the method to intercept and make asynchronous @return {@link Future} if the original method returns {@code Future}; {@code null} otherwise.
java
spring-aop/src/main/java/org/springframework/aop/interceptor/AsyncExecutionInterceptor.java
99
[ "invocation" ]
Object
true
7
7.92
spring-projects/spring-framework
59,386
javadoc
false
handleResponse
LookupResult<T> handleResponse(Set<T> keys, AbstractResponse response);
Callback that is invoked when a lookup request returns successfully. The handler should parse the response, check for errors, and return a result indicating which keys were mapped to a brokerId successfully and which keys received a fatal error (e.g. a topic authorization failure). Note that keys which receive a retriable error should be left out of the result. They will be retried automatically. For example, if the response of `FindCoordinator` request indicates an unavailable coordinator, then the key should be left out of the result so that the request will be retried. @param keys the set of keys from the associated request @param response the response received from the broker @return a result indicating which keys mapped successfully to a brokerId and which encountered a fatal error
java
clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminApiLookupStrategy.java
84
[ "keys", "response" ]
true
1
6.48
apache/kafka
31,560
javadoc
false
truncatedEquals
public static boolean truncatedEquals(final Calendar cal1, final Calendar cal2, final int field) { return truncatedCompareTo(cal1, cal2, field) == 0; }
Determines if two calendars are equal up to no more than the specified most significant field. @param cal1 the first calendar, not {@code null}. @param cal2 the second calendar, not {@code null}. @param field the field from {@link Calendar}. @return {@code true} if equal; otherwise {@code false}. @throws NullPointerException if any argument is {@code null}. @see #truncate(Calendar, int) @see #truncatedEquals(Date, Date, int) @since 3.0
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
1,824
[ "cal1", "cal2", "field" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
header_data_from_array_1_0
def header_data_from_array_1_0(array): """ Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file. """ d = {'shape': array.shape} if array.flags.c_contiguous: d['fortran_order'] = False elif array.flags.f_contiguous: d['fortran_order'] = True else: # Totally non-contiguous data. We will have to make it C-contiguous # before writing. Note that we need to test for C_CONTIGUOUS first # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. d['fortran_order'] = False d['descr'] = dtype_to_descr(array.dtype) return d
Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file.
python
numpy/lib/_format_impl.py
369
[ "array" ]
false
4
6.08
numpy/numpy
31,054
numpy
false
clone
public static int[] clone(final int[] array) { return array != null ? array.clone() : null; }
Clones an array or returns {@code null}. <p> This method returns {@code null} for a {@code null} input array. </p> @param array the array to clone, may be {@code null}. @return the cloned array, {@code null} if {@code null} input.
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
1,518
[ "array" ]
true
2
8.16
apache/commons-lang
2,896
javadoc
false
codeActionForFixWorker
function codeActionForFixWorker( changes: textChanges.ChangeTracker, sourceFile: SourceFile, symbolName: string, fix: ImportFix, includeSymbolNameInDescription: boolean, program: Program, preferences: UserPreferences, ): DiagnosticOrDiagnosticAndArguments { const quotePreference = getQuotePreference(sourceFile, preferences); switch (fix.kind) { case ImportFixKind.UseNamespace: addNamespaceQualifier(changes, sourceFile, fix); return [Diagnostics.Change_0_to_1, symbolName, `${fix.namespacePrefix}.${symbolName}`]; case ImportFixKind.JsdocTypeImport: addImportType(changes, sourceFile, fix, quotePreference); return [Diagnostics.Change_0_to_1, symbolName, getImportTypePrefix(fix.moduleSpecifier, quotePreference) + symbolName]; case ImportFixKind.AddToExisting: { const { importClauseOrBindingPattern, importKind, addAsTypeOnly, moduleSpecifier } = fix; doAddExistingFix( changes, sourceFile, importClauseOrBindingPattern, importKind === ImportKind.Default ? { name: symbolName, addAsTypeOnly } : undefined, importKind === ImportKind.Named ? [{ name: symbolName, addAsTypeOnly }] : emptyArray, /*removeExistingImportSpecifiers*/ undefined, preferences, ); const moduleSpecifierWithoutQuotes = stripQuotes(moduleSpecifier); return includeSymbolNameInDescription ? [Diagnostics.Import_0_from_1, symbolName, moduleSpecifierWithoutQuotes] : [Diagnostics.Update_import_from_0, moduleSpecifierWithoutQuotes]; } case ImportFixKind.AddNew: { const { importKind, moduleSpecifier, addAsTypeOnly, useRequire, qualification } = fix; const getDeclarations = useRequire ? getNewRequires : getNewImports; const defaultImport: Import | undefined = importKind === ImportKind.Default ? { name: symbolName, addAsTypeOnly } : undefined; const namedImports: Import[] | undefined = importKind === ImportKind.Named ? [{ name: symbolName, addAsTypeOnly }] : undefined; const namespaceLikeImport = importKind === ImportKind.Namespace || importKind === ImportKind.CommonJS ? { importKind, name: qualification?.namespacePrefix || symbolName, addAsTypeOnly } : undefined; insertImports( changes, sourceFile, getDeclarations( moduleSpecifier, quotePreference, defaultImport, namedImports, namespaceLikeImport, program.getCompilerOptions(), preferences, ), /*blankLineBetween*/ true, preferences, ); if (qualification) { addNamespaceQualifier(changes, sourceFile, qualification); } return includeSymbolNameInDescription ? [Diagnostics.Import_0_from_1, symbolName, moduleSpecifier] : [Diagnostics.Add_import_from_0, moduleSpecifier]; } case ImportFixKind.PromoteTypeOnly: { const { typeOnlyAliasDeclaration } = fix; const promotedDeclaration = promoteFromTypeOnly(changes, typeOnlyAliasDeclaration, program, sourceFile, preferences); return promotedDeclaration.kind === SyntaxKind.ImportSpecifier ? [Diagnostics.Remove_type_from_import_of_0_from_1, symbolName, getModuleSpecifierText(promotedDeclaration.parent.parent)] : [Diagnostics.Remove_type_from_import_declaration_from_0, getModuleSpecifierText(promotedDeclaration)]; } default: return Debug.assertNever(fix, `Unexpected fix kind ${(fix as ImportFix).kind}`); } }
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`. (In other words, do not allow `const x = require("...")` for JS files.) @internal
typescript
src/services/codefixes/importFixes.ts
1,711
[ "changes", "sourceFile", "symbolName", "fix", "includeSymbolNameInDescription", "program", "preferences" ]
true
13
6.64
microsoft/TypeScript
107,154
jsdoc
false
create_program_file
def create_program_file(python_code: str) -> str: """ Create a temporary Python file from the generated code. Args: python_code: String containing Python code to write Returns: Path to the created temporary file """ import hashlib # Generate a deterministic filename based on code content hash code_hash = hashlib.md5(python_code.encode()).hexdigest()[:8] # noqa: S324 tmp_dir = "/tmp/torchfuzz" os.makedirs(tmp_dir, exist_ok=True) generated_file_path = os.path.join(tmp_dir, f"fuzz_{code_hash}.py") # Write the generated code to the specified file with open(generated_file_path, "w") as f: f.write(python_code) return generated_file_path
Create a temporary Python file from the generated code. Args: python_code: String containing Python code to write Returns: Path to the created temporary file
python
tools/experimental/torchfuzz/codegen.py
722
[ "python_code" ]
str
true
1
6.72
pytorch/pytorch
96,034
google
false
fuzz_spec
def fuzz_spec(template: str = "default") -> Spec: """ Generate a random Spec (either TensorSpec or ScalarSpec) using template's distribution preferences. Args: template: Template name to determine configuration and distribution Returns: Spec: Either a TensorSpec or ScalarSpec according to template's distribution """ # Try to use template's custom distribution if available try: # Instantiate template if template == "dtensor": from torchfuzz.codegen import DTensorFuzzTemplate fuzz_template = DTensorFuzzTemplate() elif template == "unbacked": from torchfuzz.codegen import UnbackedFuzzTemplate fuzz_template = UnbackedFuzzTemplate() else: from torchfuzz.codegen import DefaultFuzzTemplate fuzz_template = DefaultFuzzTemplate() # Use template's custom spec generation return fuzz_template.fuzz_spec_custom() except Exception: # Fallback to original hardcoded behavior if template fails # Get random dtype based on template dtype = fuzz_torch_tensor_type(template) # 20% probability of returning ScalarSpec if random.random() < 0.2: return ScalarSpec(dtype=dtype) # 80% probability of returning TensorSpec # Get random size and corresponding stride size = fuzz_tensor_size() stride = fuzz_valid_stride(size) return TensorSpec(size=size, stride=stride, dtype=dtype)
Generate a random Spec (either TensorSpec or ScalarSpec) using template's distribution preferences. Args: template: Template name to determine configuration and distribution Returns: Spec: Either a TensorSpec or ScalarSpec according to template's distribution
python
tools/experimental/torchfuzz/ops_fuzzer.py
226
[ "template" ]
Spec
true
5
7.44
pytorch/pytorch
96,034
google
false
destroyPrototypeInstance
protected void destroyPrototypeInstance(Object target) { if (logger.isDebugEnabled()) { logger.debug("Destroying instance of bean '" + this.targetBeanName + "'"); } if (getBeanFactory() instanceof ConfigurableBeanFactory cbf) { cbf.destroyBean(getTargetBeanName(), target); } else if (target instanceof DisposableBean disposableBean) { try { disposableBean.destroy(); } catch (Throwable ex) { logger.warn("Destroy method on bean with name '" + this.targetBeanName + "' threw an exception", ex); } } }
Subclasses should call this method to destroy an obsolete prototype instance. @param target the bean instance to destroy
java
spring-aop/src/main/java/org/springframework/aop/target/AbstractPrototypeBasedTargetSource.java
76
[ "target" ]
void
true
5
6.88
spring-projects/spring-framework
59,386
javadoc
false
get_table_primary_key
def get_table_primary_key( self, table: str, database: str, schema: str | None = "public", cluster_identifier: str | None = None, workgroup_name: str | None = None, db_user: str | None = None, secret_arn: str | None = None, statement_name: str | None = None, with_event: bool = False, wait_for_completion: bool = True, poll_interval: int = 10, ) -> list[str] | None: """ Return the table primary key. Copied from ``RedshiftSQLHook.get_table_primary_key()`` :param table: Name of the target table :param database: the name of the database :param schema: Name of the target schema, public by default :param cluster_identifier: unique identifier of a cluster :param workgroup_name: name of the Redshift Serverless workgroup. Mutually exclusive with `cluster_identifier`. Specify this parameter to query Redshift Serverless. More info https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-serverless.html :param db_user: the database username :param secret_arn: the name or ARN of the secret that enables db access :param statement_name: the name of the SQL statement :param with_event: indicates whether to send an event to EventBridge :param wait_for_completion: indicates whether to wait for a result, if True wait, if False don't wait :param poll_interval: how often in seconds to check the query status :return: Primary key columns list """ sql = f""" select kcu.column_name from information_schema.table_constraints tco join information_schema.key_column_usage kcu on kcu.constraint_name = tco.constraint_name and kcu.constraint_schema = tco.constraint_schema and kcu.constraint_name = tco.constraint_name where tco.constraint_type = 'PRIMARY KEY' and kcu.table_schema = {schema} and kcu.table_name = {table} """ stmt_id = self.execute_query( sql=sql, database=database, cluster_identifier=cluster_identifier, workgroup_name=workgroup_name, db_user=db_user, secret_arn=secret_arn, statement_name=statement_name, with_event=with_event, wait_for_completion=wait_for_completion, poll_interval=poll_interval, ).statement_id pk_columns = [] token = "" while True: kwargs = {"Id": stmt_id} if token: kwargs["NextToken"] = token response = self.conn.get_statement_result(**kwargs) # we only select a single column (that is a string), # so safe to assume that there is only a single col in the record pk_columns += [y["stringValue"] for x in response["Records"] for y in x] if "NextToken" in response: token = response["NextToken"] else: break return pk_columns or None
Return the table primary key. Copied from ``RedshiftSQLHook.get_table_primary_key()`` :param table: Name of the target table :param database: the name of the database :param schema: Name of the target schema, public by default :param cluster_identifier: unique identifier of a cluster :param workgroup_name: name of the Redshift Serverless workgroup. Mutually exclusive with `cluster_identifier`. Specify this parameter to query Redshift Serverless. More info https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-serverless.html :param db_user: the database username :param secret_arn: the name or ARN of the secret that enables db access :param statement_name: the name of the SQL statement :param with_event: indicates whether to send an event to EventBridge :param wait_for_completion: indicates whether to wait for a result, if True wait, if False don't wait :param poll_interval: how often in seconds to check the query status :return: Primary key columns list
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_data.py
195
[ "self", "table", "database", "schema", "cluster_identifier", "workgroup_name", "db_user", "secret_arn", "statement_name", "with_event", "wait_for_completion", "poll_interval" ]
list[str] | None
true
6
7.52
apache/airflow
43,597
sphinx
false
isocalendar
def isocalendar(self) -> DataFrame: """ Calculate year, week, and day according to the ISO 8601 standard. Returns ------- DataFrame With columns year, week and day. See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start="2019-12-29", freq="D", periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32 """ from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values, reso=self._creso) iso_calendar_df = DataFrame( sarray, columns=["year", "week", "day"], dtype="UInt32" ) if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df
Calculate year, week, and day according to the ISO 8601 standard. Returns ------- DataFrame With columns year, week and day. See Also -------- Timestamp.isocalendar : Function return a 3-tuple containing ISO year, week number, and weekday for the given Timestamp object. datetime.date.isocalendar : Return a named tuple object with three components: year, week and weekday. Examples -------- >>> idx = pd.date_range(start="2019-12-29", freq="D", periods=4) >>> idx.isocalendar() year week day 2019-12-29 2019 52 7 2019-12-30 2020 1 1 2019-12-31 2020 1 2 2020-01-01 2020 1 3 >>> idx.isocalendar().week 2019-12-29 52 2019-12-30 1 2019-12-31 1 2020-01-01 1 Freq: D, Name: week, dtype: UInt32
python
pandas/core/arrays/datetimes.py
1,554
[ "self" ]
DataFrame
true
2
8.48
pandas-dev/pandas
47,362
unknown
false
crossOverUp
int crossOverUp(int index, E x) { if (index == 0) { queue[0] = x; return 0; } int parentIndex = getParentIndex(index); E parentElement = elementData(parentIndex); if (parentIndex != 0) { /* * This is a guard for the case of the childless aunt node. Since the end of the array is * actually the middle of the heap, a smaller childless aunt node can become a child of x * when we bubble up alternate levels, violating the invariant. */ int grandparentIndex = getParentIndex(parentIndex); int auntIndex = getRightChildIndex(grandparentIndex); if (auntIndex != parentIndex && getLeftChildIndex(auntIndex) >= size) { E auntElement = elementData(auntIndex); if (ordering.compare(auntElement, parentElement) < 0) { parentIndex = auntIndex; parentElement = auntElement; } } } if (ordering.compare(parentElement, x) < 0) { queue[index] = parentElement; queue[parentIndex] = x; return parentIndex; } queue[index] = x; return index; }
Moves an element one level up from a min level to a max level (or vice versa). Returns the new position of the element.
java
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
633
[ "index", "x" ]
true
7
6
google/guava
51,352
javadoc
false
equalsPropertyValues
private boolean equalsPropertyValues(AbstractBeanDefinition other) { if (!hasPropertyValues()) { return !other.hasPropertyValues(); } return ObjectUtils.nullSafeEquals(this.propertyValues, other.propertyValues); }
Clone this bean definition. To be implemented by concrete subclasses. @return the cloned bean definition object
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanDefinition.java
1,332
[ "other" ]
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
poll
@Override public ConsumerRecords<K, V> poll(final Duration timeout) { Timer timer = time.timer(timeout); acquireAndEnsureOpen(); try { kafkaConsumerMetrics.recordPollStart(timer.currentTimeMs()); if (subscriptions.hasNoSubscriptionOrUserAssignment()) { throw new IllegalStateException("Consumer is not subscribed to any topics or assigned any partitions"); } // This distinguishes the first pass of the inner do/while loop from subsequent passes for the // inflight poll event logic. boolean firstPass = true; do { // We must not allow wake-ups between polling for fetches and returning the records. // If the polled fetches are not empty the consumed position has already been updated in the polling // of the fetches. A wakeup between returned fetches and returning records would lead to never // returning the records in the fetches. Thus, we trigger a possible wake-up before we poll fetches. wakeupTrigger.maybeTriggerWakeup(); checkInflightPoll(timer, firstPass); firstPass = false; final Fetch<K, V> fetch = pollForFetches(timer); if (!fetch.isEmpty()) { // before returning the fetched records, we can send off the next round of fetches // and avoid block waiting for their responses to enable pipelining while the user // is handling the fetched records. // // NOTE: since the consumed position has already been updated, we must not allow // wakeups or any other errors to be triggered prior to returning the fetched records. sendPrefetches(timer); if (fetch.records().isEmpty()) { log.trace("Returning empty records from `poll()` " + "since the consumer's position has advanced for at least one topic partition"); } return interceptors.onConsume(new ConsumerRecords<>(fetch.records(), fetch.nextOffsets())); } // We will wait for retryBackoffMs } while (timer.notExpired()); return ConsumerRecords.empty(); } finally { kafkaConsumerMetrics.recordPollEnd(timer.currentTimeMs()); release(); } }
poll implementation using {@link ApplicationEventHandler}. 1. Poll for background events. If there's a fetch response event, process the record and return it. If it is another type of event, process it. 2. Send fetches if needed. If the timeout expires, return an empty ConsumerRecord. @param timeout timeout of the poll loop @return ConsumerRecord. It can be empty if time timeout expires. @throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this function is called @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while this function is called @throws org.apache.kafka.common.errors.RecordTooLargeException if the fetched record is larger than the maximum allowable size @throws org.apache.kafka.common.KafkaException for any other unrecoverable errors @throws java.lang.IllegalStateException if the consumer is not subscribed to any topics or manually assigned any partitions to consume from or an unexpected error occurred @throws org.apache.kafka.clients.consumer.OffsetOutOfRangeException if the fetch position of the consumer is out of range and no offset reset policy is configured. @throws org.apache.kafka.common.errors.TopicAuthorizationException if the consumer is not authorized to read from a partition @throws org.apache.kafka.common.errors.SerializationException if the fetched records cannot be deserialized @throws org.apache.kafka.common.errors.UnsupportedAssignorException if the `group.remote.assignor` configuration is set to an assignor that is not available on the broker.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
835
[ "timeout" ]
true
4
7.6
apache/kafka
31,560
javadoc
false
readLines
@Deprecated @InlineMe( replacement = "Files.asCharSource(file, charset).readLines(callback)", imports = "com.google.common.io.Files") @CanIgnoreReturnValue // some processors won't return a useful result @ParametricNullness public static <T extends @Nullable Object> T readLines( File file, Charset charset, LineProcessor<T> callback) throws IOException { return asCharSource(file, charset).readLines(callback); }
Streams lines from a {@link File}, stopping when our callback returns false, or we have read all of the lines. @param file the file to read from @param charset the charset used to decode the input stream; see {@link StandardCharsets} for helpful predefined constants @param callback the {@link LineProcessor} to use to handle the lines @return the output of processing the lines @throws IOException if an I/O error occurs @deprecated Prefer {@code asCharSource(file, charset).readLines(callback)}.
java
android/guava/src/com/google/common/io/Files.java
576
[ "file", "charset", "callback" ]
T
true
1
6.72
google/guava
51,352
javadoc
false
processTemplateIntoString
public static String processTemplateIntoString(Template template, Object model) throws IOException, TemplateException { StringWriter result = new StringWriter(1024); template.process(model, result); return result.toString(); }
Process the specified FreeMarker template with the given model and write the result to a String. <p>When using this method to prepare text for a mail to be sent with Spring's mail support, consider wrapping IO/TemplateException in MailPreparationException. @param model the model object, typically a Map that contains model names as keys and model objects as values @return the result as a String @throws IOException if the template wasn't found or couldn't be read @throws freemarker.template.TemplateException if rendering failed @see org.springframework.mail.MailPreparationException
java
spring-context-support/src/main/java/org/springframework/ui/freemarker/FreeMarkerTemplateUtils.java
47
[ "template", "model" ]
String
true
1
6.4
spring-projects/spring-framework
59,386
javadoc
false
parseIdentifierNameErrorOnUnicodeEscapeSequence
function parseIdentifierNameErrorOnUnicodeEscapeSequence(): Identifier { if (scanner.hasUnicodeEscape() || scanner.hasExtendedUnicodeEscape()) { parseErrorAtCurrentToken(Diagnostics.Unicode_escape_sequence_cannot_appear_here); } return createIdentifier(tokenIsIdentifierOrKeyword(token())); }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
2,696
[]
true
3
6.72
microsoft/TypeScript
107,154
jsdoc
false
mayNeedHiddenThis
private boolean mayNeedHiddenThis() { Class<?> declaringClass = constructor.getDeclaringClass(); if (declaringClass.getEnclosingConstructor() != null) { // Enclosed in a constructor, needs hidden this return true; } Method enclosingMethod = declaringClass.getEnclosingMethod(); if (enclosingMethod != null) { // Enclosed in a method, if it's not static, must need hidden this. return !Modifier.isStatic(enclosingMethod.getModifiers()); } else { // Strictly, this doesn't necessarily indicate a hidden 'this' in the case of // static initializer. But there seems no way to tell in that case. :( // This may cause issues when an anonymous class is created inside a static initializer, // and the class's constructor's first parameter happens to be the enclosing class. // In such case, we may mistakenly think that the class is within a non-static context // and the first parameter is the hidden 'this'. return declaringClass.getEnclosingClass() != null && !Modifier.isStatic(declaringClass.getModifiers()); } }
{@inheritDoc} <p>{@code [<E>]} will be returned for ArrayList's constructor. When both the class and the constructor have type parameters, the class parameters are prepended before those of the constructor's. This is an arbitrary rule since no existing language spec mandates one way or the other. From the declaration syntax, the class type parameter appears first, but the call syntax may show up in opposite order such as {@code new <A>Foo<B>()}.
java
android/guava/src/com/google/common/reflect/Invokable.java
489
[]
true
4
6.88
google/guava
51,352
javadoc
false
pop
def pop(self, name: str, default: t.Any = _sentinel) -> t.Any: """Get and remove an attribute by name. Like :meth:`dict.pop`. :param name: Name of attribute to pop. :param default: Value to return if the attribute is not present, instead of raising a ``KeyError``. .. versionadded:: 0.11 """ if default is _sentinel: return self.__dict__.pop(name) else: return self.__dict__.pop(name, default)
Get and remove an attribute by name. Like :meth:`dict.pop`. :param name: Name of attribute to pop. :param default: Value to return if the attribute is not present, instead of raising a ``KeyError``. .. versionadded:: 0.11
python
src/flask/ctx.py
78
[ "self", "name", "default" ]
t.Any
true
3
6.88
pallets/flask
70,946
sphinx
false
newReference
public static <V> AtomicReference<@Nullable V> newReference() { return new AtomicReference<>(); }
Creates an {@code AtomicReference} instance with no initial value. @return a new {@code AtomicReference} with no initial value
java
android/guava/src/com/google/common/util/concurrent/Atomics.java
37
[]
true
1
6
google/guava
51,352
javadoc
false
readHeader
private void readHeader() throws IOException { // read first 6 bytes into buffer to check magic and FLG/BD descriptor flags if (in.remaining() < 6) { throw new IOException(PREMATURE_EOS); } if (MAGIC != in.getInt()) { throw new IOException(NOT_SUPPORTED); } // mark start of data to checksum in.mark(); flg = FLG.fromByte(in.get()); maxBlockSize = BD.fromByte(in.get()).getBlockMaximumSize(); if (flg.isContentSizeSet()) { if (in.remaining() < 8) { throw new IOException(PREMATURE_EOS); } in.position(in.position() + 8); } // Final byte of Frame Descriptor is HC checksum // Old implementations produced incorrect HC checksums if (ignoreFlagDescriptorChecksum) { in.position(in.position() + 1); return; } int len = in.position() - in.reset().position(); int hash = CHECKSUM.hash(in, in.position(), len, 0); in.position(in.position() + len); if (in.get() != (byte) ((hash >> 8) & 0xFF)) { throw new IOException(DESCRIPTOR_HASH_MISMATCH); } }
Reads the magic number and frame descriptor from input buffer. @throws IOException
java
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockInputStream.java
115
[]
void
true
7
7.04
apache/kafka
31,560
javadoc
false
request
function request(...args) { let options = {}; if (typeof args[0] === 'string') { const urlStr = ArrayPrototypeShift(args); options = urlToHttpOptions(new URL(urlStr)); } else if (isURL(args[0])) { options = urlToHttpOptions(ArrayPrototypeShift(args)); } if (args[0] && typeof args[0] !== 'function') { ObjectAssign(options, ArrayPrototypeShift(args)); } options._defaultAgent = module.exports.globalAgent; ArrayPrototypeUnshift(args, options); return ReflectConstruct(ClientRequest, args); }
Makes a request to a secure web server. @param {...any} args @returns {ClientRequest}
javascript
lib/https.js
612
[]
false
6
7.28
nodejs/node
114,839
jsdoc
false
getHostAndPortFromBracketedHost
private static String[] getHostAndPortFromBracketedHost(String hostPortString) { checkArgument( hostPortString.charAt(0) == '[', "Bracketed host-port string must start with a bracket: %s", hostPortString); int colonIndex = hostPortString.indexOf(':'); int closeBracketIndex = hostPortString.lastIndexOf(']'); checkArgument( colonIndex > -1 && closeBracketIndex > colonIndex, "Invalid bracketed host/port: %s", hostPortString); String host = hostPortString.substring(1, closeBracketIndex); if (closeBracketIndex + 1 == hostPortString.length()) { return new String[] {host, ""}; } else { checkArgument( hostPortString.charAt(closeBracketIndex + 1) == ':', "Only a colon may follow a close bracket: %s", hostPortString); for (int i = closeBracketIndex + 2; i < hostPortString.length(); ++i) { checkArgument( Character.isDigit(hostPortString.charAt(i)), "Port must be numeric: %s", hostPortString); } return new String[] {host, hostPortString.substring(closeBracketIndex + 2)}; } }
Parses a bracketed host-port string, throwing IllegalArgumentException if parsing fails. @param hostPortString the full bracketed host-port specification. Port might not be specified. @return an array with 2 strings: host and port, in that order. @throws IllegalArgumentException if parsing the bracketed host-port string fails.
java
android/guava/src/com/google/common/net/HostAndPort.java
210
[ "hostPortString" ]
true
4
7.6
google/guava
51,352
javadoc
false
run
public int run(boolean waitForProcess, Collection<String> args, Map<String, String> environmentVariables) throws IOException { ProcessBuilder builder = new ProcessBuilder(this.command); builder.directory(this.workingDirectory); builder.command().addAll(args); builder.environment().putAll(environmentVariables); builder.redirectErrorStream(true); builder.inheritIO(); try { Process process = builder.start(); this.process = process; SignalUtils.attachSignalHandler(this::handleSigInt); if (waitForProcess) { try { return process.waitFor(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); return 1; } } return 5; } finally { if (waitForProcess) { this.endTime = System.currentTimeMillis(); this.process = null; } } }
Creates new {@link RunProcess} instance for the specified working directory and command. @param workingDirectory the working directory of the child process or {@code null} to run in the working directory of the current Java process @param command the program to execute and its arguments
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/RunProcess.java
74
[ "waitForProcess", "args", "environmentVariables" ]
true
4
6.4
spring-projects/spring-boot
79,428
javadoc
false
getLocalPropertyHandler
@Override protected @Nullable PropertyHandler getLocalPropertyHandler(String propertyName) { FieldPropertyHandler propertyHandler = this.fieldMap.get(propertyName); if (propertyHandler == null) { Field field = ReflectionUtils.findField(getWrappedClass(), propertyName); if (field != null) { propertyHandler = new FieldPropertyHandler(field); this.fieldMap.put(propertyName, propertyHandler); } } return propertyHandler; }
Create a new DirectFieldAccessor for the given object, registering a nested path that the object is in. @param object the object wrapped by this DirectFieldAccessor @param nestedPath the nested path of the object @param parent the containing DirectFieldAccessor (must not be {@code null})
java
spring-beans/src/main/java/org/springframework/beans/DirectFieldAccessor.java
75
[ "propertyName" ]
PropertyHandler
true
3
6.4
spring-projects/spring-framework
59,386
javadoc
false
strict
function strict(...args) { innerOk(strict, ...args); }
Expose a strict only variant of assert. @param {...any} args @returns {void}
javascript
lib/assert.js
881
[]
false
1
6.16
nodejs/node
114,839
jsdoc
false
createProxyClassAndInstance
@Override protected Object createProxyClassAndInstance(Enhancer enhancer, Callback[] callbacks) { Class<?> proxyClass = enhancer.createClass(); Object proxyInstance = null; if (objenesis.isWorthTrying()) { try { proxyInstance = objenesis.newInstance(proxyClass, enhancer.getUseCache()); } catch (Throwable ex) { logger.debug("Unable to instantiate proxy using Objenesis, " + "falling back to regular proxy construction", ex); } } if (proxyInstance == null) { // Regular instantiation via default constructor... try { Constructor<?> ctor = (this.constructorArgs != null ? proxyClass.getDeclaredConstructor(this.constructorArgTypes) : proxyClass.getDeclaredConstructor()); ReflectionUtils.makeAccessible(ctor); proxyInstance = (this.constructorArgs != null ? ctor.newInstance(this.constructorArgs) : ctor.newInstance()); } catch (Throwable ex) { throw new AopConfigException("Unable to instantiate proxy using Objenesis, " + "and regular proxy instantiation via default constructor fails as well", ex); } } ((Factory) proxyInstance).setCallbacks(callbacks); return proxyInstance; }
Create a new ObjenesisCglibAopProxy for the given AOP configuration. @param config the AOP configuration as AdvisedSupport object
java
spring-aop/src/main/java/org/springframework/aop/framework/ObjenesisCglibAopProxy.java
60
[ "enhancer", "callbacks" ]
Object
true
7
6.24
spring-projects/spring-framework
59,386
javadoc
false
secure
public static RandomUtils secure() { return SECURE; }
Gets the singleton instance based on {@link SecureRandom#SecureRandom()} which uses the default algorithm and provider of {@link SecureRandom}. <p> The method {@link SecureRandom#SecureRandom()} is called on-demand. </p> @return the singleton instance based on {@link SecureRandom#SecureRandom()}. @see SecureRandom#SecureRandom() @since 3.16.0
java
src/main/java/org/apache/commons/lang3/RandomUtils.java
250
[]
RandomUtils
true
1
6.16
apache/commons-lang
2,896
javadoc
false
get_dagrun
def get_dagrun(self, session: Session = NEW_SESSION) -> DagRun: """ Return the DagRun for this TaskInstance. :param session: SQLAlchemy ORM Session :return: DagRun """ info: Any = inspect(self) if info.attrs.dag_run.loaded_value is not NO_VALUE: if getattr(self, "task", None) is not None: if TYPE_CHECKING: assert self.task self.dag_run.dag = self.task.dag return self.dag_run dr = self._get_dagrun(self.dag_id, self.run_id, session) if getattr(self, "task", None) is not None: if TYPE_CHECKING: assert self.task dr.dag = self.task.dag # Record it in the instance for next time. This means that `self.logical_date` will work correctly set_committed_value(self, "dag_run", dr) return dr
Return the DagRun for this TaskInstance. :param session: SQLAlchemy ORM Session :return: DagRun
python
airflow-core/src/airflow/models/taskinstance.py
1,005
[ "self", "session" ]
DagRun
true
6
7.6
apache/airflow
43,597
sphinx
false
_prepareBenchpressSetup
async function _prepareBenchpressSetup(): Promise<BenchpressSetup> { const module = await loadBenchpressModule(); const { SeleniumWebDriverAdapter, Options, JsonFileReporter, RegressionSlopeValidator, Validator, MultiReporter, ConsoleReporter, SizeValidator, MultiMetric, Runner, } = module; let runId = randomUUID(); if (process.env.GIT_SHA) { runId = process.env.GIT_SHA + ' ' + runId; } const testOutputDirectory = process.env.TEST_UNDECLARED_OUTPUTS_DIR; if (testOutputDirectory === undefined) { throw new Error( 'Unexpected execution outside of a Bazel test. ' + 'Missing `TEST_UNDECLARED_OUTPUTS_DIR` environment variable.', ); } const providers: benchpress.StaticProvider[] = [ SeleniumWebDriverAdapter.PROTRACTOR_PROVIDERS, {provide: Options.FORCE_GC, useValue: globalOptions.forceGc}, {provide: Options.DEFAULT_DESCRIPTION, useValue: {'runId': runId}}, JsonFileReporter.PROVIDERS, {provide: JsonFileReporter.PATH, useValue: testOutputDirectory}, ]; if (!globalOptions.dryRun) { providers.push({provide: Validator, useExisting: RegressionSlopeValidator}); providers.push({ provide: RegressionSlopeValidator.SAMPLE_SIZE, useValue: globalOptions.sampleSize, }); providers.push(MultiReporter.provideWith([ConsoleReporter, JsonFileReporter])); } else { providers.push({provide: Validator, useExisting: SizeValidator}); providers.push({provide: SizeValidator.SAMPLE_SIZE, useValue: 1}); providers.push(MultiReporter.provideWith([])); providers.push(MultiMetric.provideWith([])); } return { runner: new Runner(providers), module, }; }
@license Copyright Google LLC Use of this source code is governed by an MIT-style license that can be found in the LICENSE file at https://angular.io/license
typescript
modules/utilities/perf_util.ts
71
[]
true
5
6.56
angular/angular
99,544
jsdoc
true
from_spmatrix
def from_spmatrix(cls, data: _SparseMatrixLike) -> Self: """ Create a SparseArray from a scipy.sparse matrix. Parameters ---------- data : scipy.sparse.sp_matrix This should be a SciPy sparse matrix where the size of the second dimension is 1. In other words, a sparse matrix with a single column. Returns ------- SparseArray Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.coo_matrix((4, 1)) >>> pd.arrays.SparseArray.from_spmatrix(mat) [0.0, 0.0, 0.0, 0.0] Fill: 0.0 IntIndex Indices: array([], dtype=int32) """ length, ncol = data.shape if ncol != 1: raise ValueError(f"'data' must have a single column, not '{ncol}'") # our sparse index classes require that the positions be strictly # increasing. So we need to sort loc, and arr accordingly. data_csc = data.tocsc() data_csc.sort_indices() arr = data_csc.data idx = data_csc.indices zero = np.array(0, dtype=arr.dtype).item() dtype = SparseDtype(arr.dtype, zero) index = IntIndex(length, idx) return cls._simple_new(arr, index, dtype)
Create a SparseArray from a scipy.sparse matrix. Parameters ---------- data : scipy.sparse.sp_matrix This should be a SciPy sparse matrix where the size of the second dimension is 1. In other words, a sparse matrix with a single column. Returns ------- SparseArray Examples -------- >>> import scipy.sparse >>> mat = scipy.sparse.coo_matrix((4, 1)) >>> pd.arrays.SparseArray.from_spmatrix(mat) [0.0, 0.0, 0.0, 0.0] Fill: 0.0 IntIndex Indices: array([], dtype=int32)
python
pandas/core/arrays/sparse/array.py
522
[ "cls", "data" ]
Self
true
2
8.16
pandas-dev/pandas
47,362
numpy
false
resolveTargetType
protected @Nullable Class<?> resolveTargetType(TypedStringValue value) throws ClassNotFoundException { if (value.hasTargetType()) { return value.getTargetType(); } return value.resolveTargetType(this.beanFactory.getBeanClassLoader()); }
Resolve the target type in the given TypedStringValue. @param value the TypedStringValue to resolve @return the resolved target type (or {@code null} if none specified) @throws ClassNotFoundException if the specified type cannot be resolved @see TypedStringValue#resolveTargetType
java
spring-beans/src/main/java/org/springframework/beans/factory/support/BeanDefinitionValueResolver.java
322
[ "value" ]
true
2
7.28
spring-projects/spring-framework
59,386
javadoc
false
deprecate
def deprecate( klass: type[Warning], name: str, alternative: Callable[..., Any], version: str, alt_name: str | None = None, stacklevel: int = 2, msg: str | None = None, ) -> Callable[[F], F]: """ Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- klass : Warning The warning class to use. name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ warning_msg = msg or f"{name} is deprecated, use {alt_name} instead." @wraps(alternative) def wrapper(*args, **kwargs) -> Callable[..., Any]: warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) # adding deprecated directive to the docstring msg = msg or f"Use `{alt_name}` instead." doc_error_msg = ( "deprecate needs a correctly formatted docstring in " "the target function (should have a one liner short " "summary, and opening quotes should be in their own " f"line). Found:\n{alternative.__doc__}" ) # when python is running in optimized mode (i.e. `-OO`), docstrings are # removed, so we check that a docstring with correct formatting is used # but we allow empty docstrings if alternative.__doc__: if alternative.__doc__.count("\n") < 3: raise AssertionError(doc_error_msg) empty1, summary, empty2, doc_string = alternative.__doc__.split("\n", 3) if empty1 or (empty2 and not summary): raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent( f""" {summary.strip()} .. deprecated:: {version} {msg} {dedent(doc_string)}""" ) # error: Incompatible return value type (got "Callable[[VarArg(Any), KwArg(Any)], # Callable[...,Any]]", expected "Callable[[F], F]") return wrapper # type: ignore[return-value]
Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- klass : Warning The warning class to use. name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.'
python
pandas/util/_decorators.py
29
[ "klass", "name", "alternative", "version", "alt_name", "stacklevel", "msg" ]
Callable[[F], F]
true
9
6.8
pandas-dev/pandas
47,362
numpy
false
setConfigLocations
public void setConfigLocations(String @Nullable ... locations) { if (locations != null) { Assert.noNullElements(locations, "Config locations must not be null"); this.configLocations = new String[locations.length]; for (int i = 0; i < locations.length; i++) { this.configLocations[i] = resolvePath(locations[i]).trim(); } } else { this.configLocations = null; } }
Set the config locations for this application context. <p>If not set, the implementation may use a default as appropriate.
java
spring-context/src/main/java/org/springframework/context/support/AbstractRefreshableConfigApplicationContext.java
76
[]
void
true
3
6.88
spring-projects/spring-framework
59,386
javadoc
false
hasPrevious
@Override public boolean hasPrevious() { checkTokenized(); return tokenPos > 0; }
Checks whether there are any previous tokens that can be iterated to. @return true if there are previous tokens.
java
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
578
[]
true
1
6.88
apache/commons-lang
2,896
javadoc
false
_validate_extra_conf
def _validate_extra_conf(conf: dict[Any, Any]) -> bool: """ Check configuration values are either strings or ints. :param conf: configuration variable """ if conf: if not isinstance(conf, dict): raise ValueError("'conf' argument must be a dict") if not all(isinstance(v, (str, int)) and v != "" for v in conf.values()): raise ValueError("'conf' values must be either strings or ints") return True
Check configuration values are either strings or ints. :param conf: configuration variable
python
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/analyticdb_spark.py
315
[ "conf" ]
bool
true
5
6.4
apache/airflow
43,597
sphinx
false
getX509CertificateChain
public List<? extends X509Certificate> getX509CertificateChain() { try { final Certificate[] certificates = store.getCertificateChain(alias); if (certificates == null || certificates.length == 0) { return List.of(); } return Stream.of(certificates).filter(c -> c instanceof X509Certificate).map(X509Certificate.class::cast).toList(); } catch (KeyStoreException e) { throw exceptionHandler.apply(e); } }
If this entry is a private key entry (see {@link #isKeyEntry()}), returns the certificate chain that is stored in the entry. If the entry contains any certificates that are not X.509 certificates, they are ignored. If the entry is not a private key entry, or it does not contain any X.509 certificates, then an empty list is returned.
java
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/KeyStoreUtil.java
286
[]
true
4
6.88
elastic/elasticsearch
75,680
javadoc
false
findProperty
private <T> @Nullable ConfigurationProperty findProperty(ConfigurationPropertyName name, Bindable<T> target, Context context) { if (name.isEmpty() || target.hasBindRestriction(BindRestriction.NO_DIRECT_PROPERTY)) { return null; } for (ConfigurationPropertySource source : context.getSources()) { ConfigurationProperty property = source.getConfigurationProperty(name); if (property != null) { return property; } } return null; }
Bind the specified target {@link Bindable} using this binder's {@link ConfigurationPropertySource property sources} or create a new instance using the type of the {@link Bindable} if the result of the binding is {@code null}. @param name the configuration property name to bind @param target the target bindable @param handler the bind handler (may be {@code null}) @param <T> the bound or created type @return the bound or created object @since 2.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
473
[ "name", "target", "context" ]
ConfigurationProperty
true
4
7.92
spring-projects/spring-boot
79,428
javadoc
false
append
public static Formatter append(final CharSequence seq, final Formatter formatter, final int flags, final int width, final int precision, final char padChar, final CharSequence ellipsis) { Validate.isTrue(ellipsis == null || precision < 0 || ellipsis.length() <= precision, "Specified ellipsis '%1$s' exceeds precision of %2$s", ellipsis, Integer.valueOf(precision)); final StringBuilder buf = new StringBuilder(seq); if (precision >= 0 && precision < seq.length()) { final CharSequence actualEllipsis = ObjectUtils.getIfNull(ellipsis, StringUtils.EMPTY); buf.replace(precision - actualEllipsis.length(), seq.length(), actualEllipsis.toString()); } final boolean leftJustify = (flags & FormattableFlags.LEFT_JUSTIFY) == FormattableFlags.LEFT_JUSTIFY; for (int i = buf.length(); i < width; i++) { buf.insert(leftJustify ? i : 0, padChar); } formatter.format(buf.toString()); return formatter; }
Handles the common {@link Formattable} operations of truncate-pad-append. @param seq the string to handle, not null. @param formatter the destination formatter, not null. @param flags the flags for formatting, see {@link Formattable}. @param width the width of the output, see {@link Formattable}. @param precision the precision of the output, see {@link Formattable}. @param padChar the pad character to use. @param ellipsis the ellipsis to use when precision dictates truncation, null or empty causes a hard truncation. @return the {@code formatter} instance, not null.
java
src/main/java/org/apache/commons/lang3/text/FormattableUtils.java
94
[ "seq", "formatter", "flags", "width", "precision", "padChar", "ellipsis" ]
Formatter
true
7
8.08
apache/commons-lang
2,896
javadoc
false
cloneException
private static <T extends Throwable> @Nullable T cloneException(T exception) { try { return SerializationUtils.clone(exception); } catch (Exception ex) { return null; // exception parameter cannot be cloned } }
Rewrite the call stack of the specified {@code exception} so that it matches the current call stack up to (included) the specified method invocation. <p>Clone the specified exception. If the exception is not {@code serializable}, the original exception is returned. If no common ancestor can be found, returns the original exception. <p>Used to make sure that a cached exception has a valid invocation context. @param exception the exception to merge with the current call stack @param className the class name of the common ancestor @param methodName the method name of the common ancestor @return a clone exception with a rewritten call stack composed of the current call stack up to (included) the common ancestor specified by the {@code className} and {@code methodName} arguments, followed by stack trace elements of the specified {@code exception} after the common ancestor.
java
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/CacheResultInterceptor.java
148
[ "exception" ]
T
true
2
7.92
spring-projects/spring-framework
59,386
javadoc
false
round
def round(self, decimals: int = 0, *args, **kwargs): """ Round each value in the array a to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- NumericArray Rounded values of the NumericArray. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Series.round : Round values of a Series. """ if self.dtype.kind == "b": return self nv.validate_round(args, kwargs) values = np.round(self._data, decimals=decimals, **kwargs) # Usually we'll get same type as self, but ndarray[bool] casts to float return self._maybe_mask_result(values, self._mask.copy())
Round each value in the array a to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- NumericArray Rounded values of the NumericArray. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Series.round : Round values of a Series.
python
pandas/core/arrays/masked.py
463
[ "self", "decimals" ]
true
2
6.88
pandas-dev/pandas
47,362
numpy
false
invokeBeanSupplier
private T invokeBeanSupplier(@Nullable Executable executable, ThrowingSupplier<T> beanSupplier) { if (executable instanceof Method method) { return SimpleInstantiationStrategy.instantiateWithFactoryMethod(method, beanSupplier); } return beanSupplier.get(); }
Return a new {@link BeanInstanceSupplier} instance that uses direct bean name injection shortcuts for specific parameters. @param beanNames the bean names to use as shortcut (aligned with the constructor or factory method parameters) @return a new {@link BeanInstanceSupplier} instance that uses the given shortcut bean names @since 6.2
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
227
[ "executable", "beanSupplier" ]
T
true
2
7.28
spring-projects/spring-framework
59,386
javadoc
false
cloneBuffer
function cloneBuffer(buffer, isDeep) { if (isDeep) { return buffer.slice(); } var length = buffer.length, result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length); buffer.copy(result); return result; }
Creates a clone of `buffer`. @private @param {Buffer} buffer The buffer to clone. @param {boolean} [isDeep] Specify a deep clone. @returns {Buffer} Returns the cloned buffer.
javascript
lodash.js
4,607
[ "buffer", "isDeep" ]
false
3
6.08
lodash/lodash
61,490
jsdoc
false
_extract_template_params
def _extract_template_params(bundle_instance: BaseDagBundle) -> dict: """ Extract template parameters from a bundle instance's view_url_template method. :param bundle_instance: The bundle instance to extract parameters from :return: Dictionary of template parameters """ import re params: dict[str, str] = {} template = bundle_instance.view_url_template() if not template: return params # Extract template placeholders using regex # This matches {placeholder} patterns in the template PLACEHOLDER_PATTERN = re.compile(r"\{([^}]+)\}") placeholders = PLACEHOLDER_PATTERN.findall(template) # Extract values for each placeholder found in the template for placeholder in placeholders: field_value = getattr(bundle_instance, placeholder, None) if field_value: params[placeholder] = field_value return params
Extract template parameters from a bundle instance's view_url_template method. :param bundle_instance: The bundle instance to extract parameters from :return: Dictionary of template parameters
python
airflow-core/src/airflow/dag_processing/bundles/manager.py
297
[ "bundle_instance" ]
dict
true
4
7.76
apache/airflow
43,597
sphinx
false
newInstance
public static CorrelationIdConverter newInstance(String @Nullable [] options) { String pattern = (!ObjectUtils.isEmpty(options)) ? options[0] : null; return new CorrelationIdConverter(CorrelationIdFormatter.of(pattern)); }
Factory method to create a new {@link CorrelationIdConverter}. @param options options, may be null or first element contains name of property to format. @return instance of PropertiesPatternConverter.
java
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/CorrelationIdConverter.java
65
[ "options" ]
CorrelationIdConverter
true
2
7.36
spring-projects/spring-boot
79,428
javadoc
false
_allclose_dense_sparse
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9): """Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0. """ if sp.issparse(x) and sp.issparse(y): x = x.tocsr() y = y.tocsr() x.sum_duplicates() y.sum_duplicates() return ( np.array_equal(x.indices, y.indices) and np.array_equal(x.indptr, y.indptr) and np.allclose(x.data, y.data, rtol=rtol, atol=atol) ) elif not sp.issparse(x) and not sp.issparse(y): return np.allclose(x, y, rtol=rtol, atol=atol) raise ValueError( "Can only compare two sparse matrices, not a sparse matrix and an array" )
Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0.
python
sklearn/utils/validation.py
2,184
[ "x", "y", "rtol", "atol" ]
false
7
6.08
scikit-learn/scikit-learn
64,340
numpy
false
resetIdempotentProducerId
private void resetIdempotentProducerId() { if (isTransactional()) throw new IllegalStateException("Cannot reset producer state for a transactional producer. " + "You must either abort the ongoing transaction or reinitialize the transactional producer instead"); log.debug("Resetting idempotent producer ID. ID and epoch before reset are {}", this.producerIdAndEpoch); setProducerIdAndEpoch(ProducerIdAndEpoch.NONE); transitionTo(State.UNINITIALIZED); }
This method resets the producer ID and epoch and sets the state to UNINITIALIZED, which will trigger a new InitProducerId request. This method is only called when the producer epoch is exhausted; we will bump the epoch instead.
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
620
[]
void
true
2
6.56
apache/kafka
31,560
javadoc
false
ensure_key_mapped
def ensure_key_mapped( values: ArrayLike | Index | Series, key: Callable | None, levels=None ) -> ArrayLike | Index | Series: """ Applies a callable key function to the values function and checks that the resulting value has the same shape. Can be called on Index subclasses, Series, DataFrames, or ndarrays. Parameters ---------- values : Series, DataFrame, Index subclass, or ndarray key : Optional[Callable], key to be called on the values array levels : Optional[List], if values is a MultiIndex, list of levels to apply the key to. """ from pandas.core.indexes.api import Index if not key: return values if isinstance(values, ABCMultiIndex): return _ensure_key_mapped_multiindex(values, key, level=levels) result = key(values.copy()) if len(result) != len(values): raise ValueError( "User-provided `key` function must not change the shape of the array." ) try: if isinstance( values, Index ): # convert to a new Index subclass, not necessarily the same result = Index(result, tupleize_cols=False) else: # try to revert to original type otherwise type_of_values = type(values) # error: Too many arguments for "ExtensionArray" result = type_of_values(result) # type: ignore[call-arg] except TypeError as err: raise TypeError( f"User-provided `key` function returned an invalid type {type(result)} \ which could not be converted to {type(values)}." ) from err return result
Applies a callable key function to the values function and checks that the resulting value has the same shape. Can be called on Index subclasses, Series, DataFrames, or ndarrays. Parameters ---------- values : Series, DataFrame, Index subclass, or ndarray key : Optional[Callable], key to be called on the values array levels : Optional[List], if values is a MultiIndex, list of levels to apply the key to.
python
pandas/core/sorting.py
551
[ "values", "key", "levels" ]
ArrayLike | Index | Series
true
6
6.88
pandas-dev/pandas
47,362
numpy
false
get_sorted_triggers
def get_sorted_triggers(cls, capacity: int, alive_triggerer_ids: list[int] | Select, session: Session): """ Get sorted triggers based on capacity and alive triggerer ids. :param capacity: The capacity of the triggerer. :param alive_triggerer_ids: The alive triggerer ids as a list or a select query. :param session: The database session. """ result: list[Row[Any]] = [] # Add triggers associated to callbacks first, then tasks, then assets # It prioritizes callbacks, then DAGs over event driven scheduling which is fair queries = [ # Callback triggers select(cls.id) .join(Callback, isouter=False) .order_by(Callback.priority_weight.desc(), cls.created_date), # Task Instance triggers select(cls.id) .prefix_with("STRAIGHT_JOIN", dialect="mysql") .join(TaskInstance, cls.id == TaskInstance.trigger_id, isouter=False) .where(or_(cls.triggerer_id.is_(None), cls.triggerer_id.not_in(alive_triggerer_ids))) .order_by(coalesce(TaskInstance.priority_weight, 0).desc(), cls.created_date), # Asset triggers select(cls.id).where(cls.assets.any()).order_by(cls.created_date), ] # Process each query while avoiding unnecessary queries when capacity is reached for query in queries: remaining_capacity = capacity - len(result) if remaining_capacity <= 0: break # Limit the number of triggers selected per loop to avoid one triggerer # picking up too many triggers and starving other triggerers for HA setup. remaining_capacity = min(remaining_capacity, cls.max_trigger_to_select_per_loop) locked_query = with_row_locks(query.limit(remaining_capacity), session, skip_locked=True) result.extend(session.execute(locked_query).all()) return result
Get sorted triggers based on capacity and alive triggerer ids. :param capacity: The capacity of the triggerer. :param alive_triggerer_ids: The alive triggerer ids as a list or a select query. :param session: The database session.
python
airflow-core/src/airflow/models/trigger.py
365
[ "cls", "capacity", "alive_triggerer_ids", "session" ]
true
3
7.2
apache/airflow
43,597
sphinx
false
equalsConstructorArgumentValues
private boolean equalsConstructorArgumentValues(AbstractBeanDefinition other) { if (!hasConstructorArgumentValues()) { return !other.hasConstructorArgumentValues(); } return ObjectUtils.nullSafeEquals(this.constructorArgumentValues, other.constructorArgumentValues); }
Clone this bean definition. To be implemented by concrete subclasses. @return the cloned bean definition object
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanDefinition.java
1,325
[ "other" ]
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
cleanUp
void cleanUp() { if (threadStarted) { return; } Reference<?> reference; while ((reference = queue.poll()) != null) { /* * This is for the benefit of phantom references. Weak and soft references will have already * been cleared by this point. */ reference.clear(); try { ((FinalizableReference) reference).finalizeReferent(); } catch (Throwable t) { logger.log(Level.SEVERE, "Error cleaning up after reference.", t); } } }
Repeatedly dequeues references from the queue and invokes {@link FinalizableReference#finalizeReferent()} on them until the queue is empty. This method is a no-op if the background thread was created successfully.
java
android/guava/src/com/google/common/base/FinalizableReferenceQueue.java
236
[]
void
true
4
6.24
google/guava
51,352
javadoc
false
maybeTransitionToErrorState
public synchronized void maybeTransitionToErrorState(RuntimeException exception) { if (exception instanceof ClusterAuthorizationException || exception instanceof TransactionalIdAuthorizationException || exception instanceof ProducerFencedException || exception instanceof UnsupportedVersionException || exception instanceof InvalidPidMappingException) { transitionToFatalError(exception); } else if (isTransactional()) { // RetriableExceptions from the Sender thread are converted to Abortable errors // because they indicate that the transaction cannot be completed after all retry attempts. // This conversion ensures the application layer treats these errors as abortable, // preventing duplicate message delivery. if (exception instanceof RetriableException || exception instanceof InvalidTxnStateException) { exception = new TransactionAbortableException("Transaction Request was aborted after exhausting retries.", exception); } if (needToTriggerEpochBumpFromClient() && !isCompleting()) { clientSideEpochBumpRequired = true; } transitionToAbortableError(exception); } }
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with the lowest sequence number. @return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition. If there are no inflight requests being tracked for this partition, this method will return RecordBatch.NO_SEQUENCE.
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
766
[ "exception" ]
void
true
11
6.88
apache/kafka
31,560
javadoc
false
toIntegerObject
public static Integer toIntegerObject(final boolean bool, final Integer trueValue, final Integer falseValue) { return bool ? trueValue : falseValue; }
Converts a boolean to an Integer specifying the conversion values. <pre> BooleanUtils.toIntegerObject(true, Integer.valueOf(1), Integer.valueOf(0)) = Integer.valueOf(1) BooleanUtils.toIntegerObject(false, Integer.valueOf(1), Integer.valueOf(0)) = Integer.valueOf(0) </pre> @param bool the to convert @param trueValue the value to return if {@code true}, may be {@code null} @param falseValue the value to return if {@code false}, may be {@code null} @return the appropriate value
java
src/main/java/org/apache/commons/lang3/BooleanUtils.java
958
[ "bool", "trueValue", "falseValue" ]
Integer
true
2
7.2
apache/commons-lang
2,896
javadoc
false
expireAfterWrite
@Deprecated // GoodTime @CanIgnoreReturnValue public CacheBuilder<K, V> expireAfterWrite(long duration, TimeUnit unit) { checkState( expireAfterWriteNanos == UNSET_INT, "expireAfterWrite was already set to %s ns", expireAfterWriteNanos); checkArgument(duration >= 0, "duration cannot be negative: %s %s", duration, unit); this.expireAfterWriteNanos = unit.toNanos(duration); return this; }
Specifies that each entry should be automatically removed from the cache once a fixed duration has elapsed after the entry's creation, or the most recent replacement of its value. <p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long) maximumSize}{@code (0)}, ignoring any otherwise-specified maximum size or weight. This can be useful in testing, or to disable caching temporarily without a code change. <p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or write operations. Expired entries are cleaned up as part of the routine maintenance described in the class javadoc. <p>If you can represent the duration as a {@link Duration} (which should be preferred when feasible), use {@link #expireAfterWrite(Duration)} instead. @param duration the length of time after an entry is created that it should be automatically removed @param unit the unit that {@code duration} is expressed in @return this {@code CacheBuilder} instance (for chaining) @throws IllegalArgumentException if {@code duration} is negative @throws IllegalStateException if {@link #expireAfterWrite} was already set @deprecated Use {@link #expireAfterWrite(Duration)} instead.
java
android/guava/src/com/google/common/cache/CacheBuilder.java
756
[ "duration", "unit" ]
true
1
6.24
google/guava
51,352
javadoc
false
transformConstructorBodyWorker
function transformConstructorBodyWorker(statementsOut: Statement[], statementsIn: NodeArray<Statement>, statementOffset: number, superPath: readonly number[], superPathDepth: number, initializerStatements: readonly Statement[]) { const superStatementIndex = superPath[superPathDepth]; const superStatement = statementsIn[superStatementIndex]; addRange(statementsOut, visitNodes(statementsIn, visitor, isStatement, statementOffset, superStatementIndex - statementOffset)); if (isTryStatement(superStatement)) { const tryBlockStatements: Statement[] = []; transformConstructorBodyWorker( tryBlockStatements, superStatement.tryBlock.statements, /*statementOffset*/ 0, superPath, superPathDepth + 1, initializerStatements, ); const tryBlockStatementsArray = factory.createNodeArray(tryBlockStatements); setTextRange(tryBlockStatementsArray, superStatement.tryBlock.statements); statementsOut.push(factory.updateTryStatement( superStatement, factory.updateBlock(superStatement.tryBlock, tryBlockStatements), visitNode(superStatement.catchClause, visitor, isCatchClause), visitNode(superStatement.finallyBlock, visitor, isBlock), )); } else { addRange(statementsOut, visitNodes(statementsIn, visitor, isStatement, superStatementIndex, 1)); addRange(statementsOut, initializerStatements); } addRange(statementsOut, visitNodes(statementsIn, visitor, isStatement, superStatementIndex + 1)); }
Determines whether to emit a function-like declaration. We should not emit the declaration if it does not have a body. @param node The declaration node.
typescript
src/compiler/transformers/ts.ts
1,345
[ "statementsOut", "statementsIn", "statementOffset", "superPath", "superPathDepth", "initializerStatements" ]
false
3
6.08
microsoft/TypeScript
107,154
jsdoc
false
generate_config_file
def generate_config_file( self, eks_cluster_name: str, pod_namespace: str | None, credentials_file, ) -> Generator[str, None, None]: """ Write the kubeconfig file given an EKS Cluster. :param eks_cluster_name: The name of the cluster to generate kubeconfig file for. :param pod_namespace: The namespace to run within kubernetes. """ args = "" if self.region_name is not None: args = args + f" --region-name {self.region_name}" # We need to determine which python executable the host is running in order to correctly # call the eks_get_token.py script. python_executable = f"python{sys.version_info[0]}.{sys.version_info[1]}" # Set up the client eks_client = self.conn session = self.get_session() # Get cluster details cluster = eks_client.describe_cluster(name=eks_cluster_name) cluster_cert = cluster["cluster"]["certificateAuthority"]["data"] cluster_ep = cluster["cluster"]["endpoint"] os.environ["AWS_STS_REGIONAL_ENDPOINTS"] = "regional" try: sts_url = f"{StsHook(region_name=session.region_name).conn_client_meta.endpoint_url}/?Action=GetCallerIdentity&Version=2011-06-15" finally: del os.environ["AWS_STS_REGIONAL_ENDPOINTS"] cluster_config = { "apiVersion": "v1", "kind": "Config", "clusters": [ { "cluster": {"server": cluster_ep, "certificate-authority-data": cluster_cert}, "name": eks_cluster_name, } ], "contexts": [ { "context": { "cluster": eks_cluster_name, "namespace": pod_namespace, "user": _POD_USERNAME, }, "name": _CONTEXT_NAME, } ], "current-context": _CONTEXT_NAME, "preferences": {}, "users": [ { "name": _POD_USERNAME, "user": { "exec": { "apiVersion": AUTHENTICATION_API_VERSION, "command": "sh", "args": [ "-c", COMMAND.format( credentials_file=credentials_file, sts_url=sts_url, python_executable=python_executable, eks_cluster_name=eks_cluster_name, args=args, ), ], "interactiveMode": "Never", } }, } ], } config_text = yaml.dump(cluster_config, default_flow_style=False) with tempfile.NamedTemporaryFile(mode="w") as config_file: config_file.write(config_text) config_file.flush() yield config_file.name
Write the kubeconfig file given an EKS Cluster. :param eks_cluster_name: The name of the cluster to generate kubeconfig file for. :param pod_namespace: The namespace to run within kubernetes.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
595
[ "self", "eks_cluster_name", "pod_namespace", "credentials_file" ]
Generator[str, None, None]
true
2
7.12
apache/airflow
43,597
sphinx
false
expireAfterWrite
@J2ObjCIncompatible @GwtIncompatible // Duration @SuppressWarnings("GoodTime") // Duration decomposition @IgnoreJRERequirement // No more dangerous than wherever the caller got the Duration from @CanIgnoreReturnValue public CacheBuilder<K, V> expireAfterWrite(Duration duration) { return expireAfterWrite(toNanosSaturated(duration), NANOSECONDS); }
Specifies that each entry should be automatically removed from the cache once a fixed duration has elapsed after the entry's creation, or the most recent replacement of its value. <p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long) maximumSize}{@code (0)}, ignoring any otherwise-specified maximum size or weight. This can be useful in testing, or to disable caching temporarily without a code change. <p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or write operations. Expired entries are cleaned up as part of the routine maintenance described in the class javadoc. @param duration the length of time after an entry is created that it should be automatically removed @return this {@code CacheBuilder} instance (for chaining) @throws IllegalArgumentException if {@code duration} is negative @throws IllegalStateException if {@link #expireAfterWrite} was already set @throws ArithmeticException for durations greater than +/- approximately 292 years @since 33.3.0 (but since 25.0 in the JRE <a href="https://github.com/google/guava#guava-google-core-libraries-for-java">flavor</a>)
java
android/guava/src/com/google/common/cache/CacheBuilder.java
724
[ "duration" ]
true
1
6.24
google/guava
51,352
javadoc
false
isNarrowingExpression
function isNarrowingExpression(expr: Expression): boolean { switch (expr.kind) { case SyntaxKind.Identifier: case SyntaxKind.ThisKeyword: return true; case SyntaxKind.PropertyAccessExpression: case SyntaxKind.ElementAccessExpression: return containsNarrowableReference(expr); case SyntaxKind.CallExpression: return hasNarrowableArgument(expr as CallExpression); case SyntaxKind.ParenthesizedExpression: if (isJSDocTypeAssertion(expr)) { return false; } // fallthrough case SyntaxKind.NonNullExpression: return isNarrowingExpression((expr as ParenthesizedExpression | NonNullExpression).expression); case SyntaxKind.BinaryExpression: return isNarrowingBinaryExpression(expr as BinaryExpression); case SyntaxKind.PrefixUnaryExpression: return (expr as PrefixUnaryExpression).operator === SyntaxKind.ExclamationToken && isNarrowingExpression((expr as PrefixUnaryExpression).operand); case SyntaxKind.TypeOfExpression: return isNarrowingExpression((expr as TypeOfExpression).expression); } return false; }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
1,241
[ "expr" ]
true
3
6.88
microsoft/TypeScript
107,154
jsdoc
false
format
@Deprecated @Override public StringBuffer format(final Object obj, final StringBuffer toAppendTo, final FieldPosition pos) { if (obj instanceof Date) { return format((Date) obj, toAppendTo); } if (obj instanceof Calendar) { return format((Calendar) obj, toAppendTo); } if (obj instanceof Long) { return format(((Long) obj).longValue(), toAppendTo); } throw new IllegalArgumentException("Unknown class: " + ClassUtils.getName(obj, "<null>")); }
Formats a {@link Date}, {@link Calendar} or {@link Long} (milliseconds) object. @deprecated Use {{@link #format(Date)}, {{@link #format(Calendar)}, {{@link #format(long)}. @param obj the object to format. @param toAppendTo the buffer to append to. @param pos the position; ignored. @return the buffer passed in.
java
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
1,238
[ "obj", "toAppendTo", "pos" ]
StringBuffer
true
4
7.6
apache/commons-lang
2,896
javadoc
false
toArray
function toArray(value) { if (!value) { return []; } if (isArrayLike(value)) { return isString(value) ? stringToArray(value) : copyArray(value); } if (symIterator && value[symIterator]) { return iteratorToArray(value[symIterator]()); } var tag = getTag(value), func = tag == mapTag ? mapToArray : (tag == setTag ? setToArray : values); return func(value); }
Converts `value` to an array. @static @since 0.1.0 @memberOf _ @category Lang @param {*} value The value to convert. @returns {Array} Returns the converted array. @example _.toArray({ 'a': 1, 'b': 2 }); // => [1, 2] _.toArray('abc'); // => ['a', 'b', 'c'] _.toArray(1); // => [] _.toArray(null); // => []
javascript
lodash.js
12,426
[ "value" ]
false
8
7.68
lodash/lodash
61,490
jsdoc
false
_if_else
def _if_else( cls, cond: npt.NDArray[np.bool_] | bool, left: ArrayLike | Scalar, right: ArrayLike | Scalar, ) -> pa.Array: """ Choose values based on a condition. Analogous to pyarrow.compute.if_else, with logic to fallback to numpy for unsupported types. Parameters ---------- cond : npt.NDArray[np.bool_] or bool left : ArrayLike | Scalar right : ArrayLike | Scalar Returns ------- pa.Array """ try: return pc.if_else(cond, left, right) except pa.ArrowNotImplementedError: pass def _to_numpy_and_type(value) -> tuple[np.ndarray, pa.DataType | None]: if isinstance(value, (pa.Array, pa.ChunkedArray)): pa_type = value.type elif isinstance(value, pa.Scalar): pa_type = value.type value = value.as_py() else: pa_type = None return np.array(value, dtype=object), pa_type left, left_type = _to_numpy_and_type(left) right, right_type = _to_numpy_and_type(right) pa_type = left_type or right_type result = np.where(cond, left, right) return pa.array(result, type=pa_type, from_pandas=is_nan_na())
Choose values based on a condition. Analogous to pyarrow.compute.if_else, with logic to fallback to numpy for unsupported types. Parameters ---------- cond : npt.NDArray[np.bool_] or bool left : ArrayLike | Scalar right : ArrayLike | Scalar Returns ------- pa.Array
python
pandas/core/arrays/arrow/array.py
2,501
[ "cls", "cond", "left", "right" ]
pa.Array
true
5
6.4
pandas-dev/pandas
47,362
numpy
false
BinarySection
BinarySection(BinarySection &&) = delete;
Patcher used to apply simple changes to sections of the input binary.
cpp
bolt/include/bolt/Core/BinarySection.h
108
[]
true
2
6.64
llvm/llvm-project
36,021
doxygen
false
createTypeDescriptor
private TypeDescriptor createTypeDescriptor(TypeElement element) { TypeDescriptor descriptor = new TypeDescriptor(); process(descriptor, element.asType()); this.typeDescriptors.put(element, descriptor); return descriptor; }
Return the {@link PrimitiveType} of the specified type or {@code null} if the type does not represent a valid wrapper type. @param typeMirror a type @return the primitive type or {@code null} if the type is not a wrapper type
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/TypeUtils.java
228
[ "element" ]
TypeDescriptor
true
1
6.72
spring-projects/spring-boot
79,428
javadoc
false
randomFloat
public float randomFloat() { return randomFloat(0, Float.MAX_VALUE); }
Generates a random float between 0 (inclusive) and Float.MAX_VALUE (exclusive). @return the random float. @see #randomFloat(float, float) @since 3.16.0
java
src/main/java/org/apache/commons/lang3/RandomUtils.java
358
[]
true
1
6.32
apache/commons-lang
2,896
javadoc
false
closeNow
public final void closeNow() { if (closed) throw new IllegalStateException("Attempt to close a channel that has already been closed"); handleCloseOnAuthenticationFailure(channel); closed = true; }
Close the channel now, regardless of whether the delay has expired or not.
java
clients/src/main/java/org/apache/kafka/common/network/Selector.java
1,418
[]
void
true
2
7.04
apache/kafka
31,560
javadoc
false
recordsIterator
private Iterator<Record> recordsIterator() { return new AbstractIterator<>() { private final Iterator<? extends RecordBatch> batches = batches().iterator(); private Iterator<Record> records; @Override protected Record makeNext() { if (records != null && records.hasNext()) return records.next(); if (batches.hasNext()) { records = batches.next().iterator(); return makeNext(); } return allDone(); } }; }
Get an iterator over the deep records. @return An iterator over the records
java
clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java
73
[]
true
4
8.08
apache/kafka
31,560
javadoc
false
invokeJoinpointUsingReflection
public static @Nullable Object invokeJoinpointUsingReflection(@Nullable Object target, Method method, @Nullable Object[] args) throws Throwable { // Use reflection to invoke the method. try { Method originalMethod = BridgeMethodResolver.findBridgedMethod(method); ReflectionUtils.makeAccessible(originalMethod); return (COROUTINES_REACTOR_PRESENT && KotlinDetector.isSuspendingFunction(originalMethod) ? KotlinDelegate.invokeSuspendingFunction(originalMethod, target, args) : originalMethod.invoke(target, args)); } catch (InvocationTargetException ex) { // Invoked method threw a checked exception. // We must rethrow it. The client won't see the interceptor. throw ex.getTargetException(); } catch (IllegalArgumentException ex) { throw new AopInvocationException("AOP configuration seems to be invalid: tried calling method [" + method + "] on target [" + target + "]", ex); } catch (IllegalAccessException | InaccessibleObjectException ex) { throw new AopInvocationException("Could not access method [" + method + "]", ex); } }
Invoke the given target via reflection, as part of an AOP method invocation. @param target the target object @param method the method to invoke @param args the arguments for the method @return the invocation result, if any @throws Throwable if thrown by the target method @throws org.springframework.aop.AopInvocationException in case of a reflection error
java
spring-aop/src/main/java/org/springframework/aop/support/AopUtils.java
351
[ "target", "method", "args" ]
Object
true
6
8.08
spring-projects/spring-framework
59,386
javadoc
false
apply_self_termination_task
def apply_self_termination_task( self, worker: CeleryTestWorker, method: TaskTermination.Method, ) -> AsyncResult: """Apply a task that will terminate itself. Args: worker (CeleryTestWorker): Take the queue of this worker. method (TaskTermination.Method): The method to terminate the task. Returns: AsyncResult: The result of applying the task. """ try: self_termination_sig: Signature = { TaskTermination.Method.SIGKILL: self_termination_sigkill.si(), TaskTermination.Method.SYSTEM_EXIT: self_termination_system_exit.si(), TaskTermination.Method.DELAY_TIMEOUT: self_termination_delay_timeout.si(), TaskTermination.Method.EXHAUST_MEMORY: self_termination_exhaust_memory.si(), }[method] return self_termination_sig.apply_async(queue=worker.worker_queue) finally: # If there's an unexpected bug and the termination of the task caused the worker # to crash, this will refresh the container object with the updated container status # which can be asserted/checked during a test (for dev/debug) worker.container.reload()
Apply a task that will terminate itself. Args: worker (CeleryTestWorker): Take the queue of this worker. method (TaskTermination.Method): The method to terminate the task. Returns: AsyncResult: The result of applying the task.
python
t/smoke/operations/task_termination.py
21
[ "self", "worker", "method" ]
AsyncResult
true
1
6.56
celery/celery
27,741
google
false
listConfigResources
default ListConfigResourcesResult listConfigResources() { return listConfigResources(Set.of(), new ListConfigResourcesOptions()); }
List all configuration resources available in the cluster with the default options. <p> This is a convenience method for {@link #listConfigResources(Set, ListConfigResourcesOptions)} with default options. See the overload for more details. @return The ListConfigurationResourcesResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
1,797
[]
ListConfigResourcesResult
true
1
6.16
apache/kafka
31,560
javadoc
false
contains
@Override public boolean contains(String propertyName) { return (getPropertyValue(propertyName) != null || (this.processedProperties != null && this.processedProperties.contains(propertyName))); }
Get the raw property value, if any. @param propertyName the name to search for @return the raw property value, or {@code null} if none found @since 4.0 @see #getPropertyValue(String) @see PropertyValue#getValue()
java
spring-beans/src/main/java/org/springframework/beans/MutablePropertyValues.java
314
[ "propertyName" ]
true
3
7.76
spring-projects/spring-framework
59,386
javadoc
false
logn
def logn(n, x): """ Take log base n of x. If `x` contains negative inputs, the answer is computed and returned in the complex domain. Parameters ---------- n : array_like The integer base(s) in which the log is taken. x : array_like The value(s) whose log base `n` is (are) required. Returns ------- out : ndarray or scalar The log base `n` of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array is returned. Examples -------- >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.logn(2, [4, 8]) array([2., 3.]) >>> np.emath.logn(2, [-4, -8, 8]) array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) n = _fix_real_lt_zero(n) return nx.log(x) / nx.log(n)
Take log base n of x. If `x` contains negative inputs, the answer is computed and returned in the complex domain. Parameters ---------- n : array_like The integer base(s) in which the log is taken. x : array_like The value(s) whose log base `n` is (are) required. Returns ------- out : ndarray or scalar The log base `n` of the `x` value(s). If `x` was a scalar, so is `out`, otherwise an array is returned. Examples -------- >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.logn(2, [4, 8]) array([2., 3.]) >>> np.emath.logn(2, [-4, -8, 8]) array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
python
numpy/lib/_scimath_impl.py
349
[ "n", "x" ]
false
1
6.48
numpy/numpy
31,054
numpy
false
item
def item(self): """ Return the first element of the underlying data as a Python scalar. Returns ------- scalar The first element of Series or Index. Raises ------ ValueError If the data is not length = 1. See Also -------- Index.values : Returns an array representing the data in the Index. Series.head : Returns the first `n` rows. Examples -------- >>> s = pd.Series([1]) >>> s.item() 1 For an index: >>> s = pd.Series([1], index=["a"]) >>> s.index.item() 'a' """ if len(self) == 1: return next(iter(self)) raise ValueError("can only convert an array of size 1 to a Python scalar")
Return the first element of the underlying data as a Python scalar. Returns ------- scalar The first element of Series or Index. Raises ------ ValueError If the data is not length = 1. See Also -------- Index.values : Returns an array representing the data in the Index. Series.head : Returns the first `n` rows. Examples -------- >>> s = pd.Series([1]) >>> s.item() 1 For an index: >>> s = pd.Series([1], index=["a"]) >>> s.index.item() 'a'
python
pandas/core/base.py
401
[ "self" ]
false
2
6.48
pandas-dev/pandas
47,362
unknown
false
getType
String getType(TypeElement element, TypeMirror type) { if (type == null) { return null; } return type.accept(this.typeExtractor, resolveTypeDescriptor(element)); }
Return the type of the specified {@link TypeMirror} including all its generic information. @param element the {@link TypeElement} in which this {@code type} is declared @param type the type to handle @return a representation of the type including all its generic information
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/TypeUtils.java
134
[ "element", "type" ]
String
true
2
7.76
spring-projects/spring-boot
79,428
javadoc
false
getFromAlreadyDoneTrustedFuture
@ParametricNullness @SuppressWarnings("nullness") // TODO(b/147136275): Remove once our checker understands & and |. /* * TODO: b/112550045 - Use this from Futures.getDone when applicable? Note the small difference in * failure message between the two at present. */ final V getFromAlreadyDoneTrustedFuture() throws ExecutionException { @RetainedLocalRef Object localValue = value(); if (localValue == null | localValue instanceof DelegatingToFuture) { throw new IllegalStateException("Cannot get() on a pending future."); } return getDoneValue(localValue); }
Returns the result of this future or throws in case of failure, just like {@link #get()} except that this method <i>also</i> throws if this future is not done. <p>This method computes its result based on the internal state of {@link AbstractFuture}, so it does not necessarily return the same result as {@link #get()} if {@link #get()} has been overridden. Thus, it should be called only on instances of {@link Trusted} or from within {@link #get()} itself.
java
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
265
[]
V
true
2
7.04
google/guava
51,352
javadoc
false
oneHot
public static Boolean oneHot(final Boolean... array) { return Boolean.valueOf(oneHot(ArrayUtils.toPrimitive(array))); }
Performs a one-hot on an array of booleans. <p> This implementation returns true if one, and only one, of the supplied values is true. </p> <p> Null array elements map to false, like {@code Boolean.parseBoolean(null)} and its callers return false. </p> <p> See also <a href="https://en.wikipedia.org/wiki/One-hot">One-hot</a>. </p> @param array an array of {@code boolean}s @return the result of the one-hot operations @throws NullPointerException if {@code array} is {@code null} @throws IllegalArgumentException if {@code array} is empty.
java
src/main/java/org/apache/commons/lang3/BooleanUtils.java
313
[]
Boolean
true
1
6.32
apache/commons-lang
2,896
javadoc
false
indexChunks
Tuple<Integer, String> indexChunks(String name, InputStream is, int chunk, final Checksum checksum, long timestamp) throws IOException { // we have to calculate and return md5 sums as a matter of course (see actualMd5 being return below), // but we don't have to do it *twice* -- so if the passed-in checksum is also md5, then we'll get null here MessageDigest md5 = MessageDigests.md5(); MessageDigest digest = checksum.digest(); // this returns null for md5 for (byte[] buf = getChunk(is); buf.length != 0; buf = getChunk(is)) { md5.update(buf); if (digest != null) { digest.update(buf); } IndexRequest indexRequest = new IndexRequest(DATABASES_INDEX).id(name + "_" + chunk + "_" + timestamp) .create(true) .source(XContentType.SMILE, "name", name, "chunk", chunk, "data", buf); client.index(indexRequest).actionGet(); chunk++; } // May take some time before automatic flush kicks in: // (otherwise the translog will contain large documents for some time without good reason) FlushRequest flushRequest = new FlushRequest(DATABASES_INDEX); client.admin().indices().flush(flushRequest).actionGet(); // Ensure that the chunk documents are visible: RefreshRequest refreshRequest = new RefreshRequest(DATABASES_INDEX); client.admin().indices().refresh(refreshRequest).actionGet(); String actualMd5 = MessageDigests.toHexString(md5.digest()); String actualChecksum = digest == null ? actualMd5 : MessageDigests.toHexString(digest.digest()); String expectedChecksum = checksum.checksum; if (Objects.equals(expectedChecksum, actualChecksum) == false) { throw new IOException("checksum mismatch, expected [" + expectedChecksum + "], actual [" + actualChecksum + "]"); } return Tuple.tuple(chunk, actualMd5); }
This method fetches the database file for the given database from the passed-in source, then indexes that database file into the .geoip_databases Elasticsearch index, deleting any old versions of the database from the index if they exist. @param name The name of the database to be downloaded and indexed into an Elasticsearch index @param checksum The checksum to compare to the computed checksum of the downloaded file @param source The supplier of an InputStream that will actually download the file
java
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java
334
[ "name", "is", "chunk", "checksum", "timestamp" ]
true
5
6.72
elastic/elasticsearch
75,680
javadoc
false
getEndpointAnnotationElements
Set<TypeElement> getEndpointAnnotationElements() { return this.endpointAnnotations.stream() .map(this.elements::getTypeElement) .filter(Objects::nonNull) .collect(Collectors.toSet()); }
Collect the annotations that are annotated or meta-annotated with the specified {@link TypeElement annotation}. @param element the element to inspect @param annotationType the annotation to discover @return the annotations that are annotated or meta-annotated with this annotation
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
363
[]
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
moveToEnd
public static void moveToEnd(MutablePropertySources propertySources) { PropertySource<?> propertySource = propertySources.remove(NAME); if (propertySource != null) { propertySources.addLast(propertySource); } }
Move the 'defaultProperties' property source so that it's the last source in the given {@link MutablePropertySources}. @param propertySources the property sources to update
java
core/spring-boot/src/main/java/org/springframework/boot/env/DefaultPropertiesPropertySource.java
127
[ "propertySources" ]
void
true
2
6.24
spring-projects/spring-boot
79,428
javadoc
false
get_partitions
def get_partitions( self, database_name: str, table_name: str, expression: str = "", page_size: int | None = None, max_items: int | None = None, ) -> set[tuple]: """ Retrieve the partition values for a table. .. seealso:: - :external+boto3:py:class:`Glue.Paginator.GetPartitions` :param database_name: The name of the catalog database where the partitions reside. :param table_name: The name of the partitions' table. :param expression: An expression filtering the partitions to be returned. Please see official AWS documentation for further information. https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions :param page_size: pagination size :param max_items: maximum items to return :return: set of partition values where each value is a tuple since a partition may be composed of multiple columns. For example: ``{('2018-01-01','1'), ('2018-01-01','2')}`` """ config = { "PageSize": page_size, "MaxItems": max_items, } paginator = self.get_conn().get_paginator("get_partitions") response = paginator.paginate( DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config ) partitions = set() for page in response: for partition in page["Partitions"]: partitions.add(tuple(partition["Values"])) return partitions
Retrieve the partition values for a table. .. seealso:: - :external+boto3:py:class:`Glue.Paginator.GetPartitions` :param database_name: The name of the catalog database where the partitions reside. :param table_name: The name of the partitions' table. :param expression: An expression filtering the partitions to be returned. Please see official AWS documentation for further information. https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions :param page_size: pagination size :param max_items: maximum items to return :return: set of partition values where each value is a tuple since a partition may be composed of multiple columns. For example: ``{('2018-01-01','1'), ('2018-01-01','2')}``
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_catalog.py
87
[ "self", "database_name", "table_name", "expression", "page_size", "max_items" ]
set[tuple]
true
3
7.44
apache/airflow
43,597
sphinx
false
on_chain_start
def on_chain_start(self, chain, **headers) -> dict: """Method that is called on chain stamping start. Arguments: chain (chain): Chain that is stamped. headers (Dict): Partial headers that could be merged with existing headers. Returns: Dict: headers to update. """ return {}
Method that is called on chain stamping start. Arguments: chain (chain): Chain that is stamped. headers (Dict): Partial headers that could be merged with existing headers. Returns: Dict: headers to update.
python
celery/canvas.py
144
[ "self", "chain" ]
dict
true
1
6.88
celery/celery
27,741
google
false
reduceRight
function reduceRight(collection, iteratee, accumulator) { var func = isArray(collection) ? arrayReduceRight : baseReduce, initAccum = arguments.length < 3; return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEachRight); }
This method is like `_.reduce` except that it iterates over elements of `collection` from right to left. @static @memberOf _ @since 0.1.0 @category Collection @param {Array|Object} collection The collection to iterate over. @param {Function} [iteratee=_.identity] The function invoked per iteration. @param {*} [accumulator] The initial value. @returns {*} Returns the accumulated value. @see _.reduce @example var array = [[0, 1], [2, 3], [4, 5]]; _.reduceRight(array, function(flattened, other) { return flattened.concat(other); }, []); // => [4, 5, 2, 3, 0, 1]
javascript
lodash.js
9,813
[ "collection", "iteratee", "accumulator" ]
false
2
7.04
lodash/lodash
61,490
jsdoc
false
ensureBuilderMapNonNull
Map<K, ImmutableCollection.Builder<V>> ensureBuilderMapNonNull() { Map<K, ImmutableCollection.Builder<V>> result = builderMap; if (result == null) { result = Platform.preservesInsertionOrderOnPutsMap(); builderMap = result; } return result; }
Creates a new builder with a hint for the number of distinct keys.
java
android/guava/src/com/google/common/collect/ImmutableMultimap.java
184
[]
true
2
6.88
google/guava
51,352
javadoc
false
conforms
function conforms(source) { return baseConforms(baseClone(source, CLONE_DEEP_FLAG)); }
Creates a function that invokes the predicate properties of `source` with the corresponding property values of a given object, returning `true` if all predicates return truthy, else `false`. **Note:** The created function is equivalent to `_.conformsTo` with `source` partially applied. @static @memberOf _ @since 4.0.0 @category Util @param {Object} source The object of property predicates to conform to. @returns {Function} Returns the new spec function. @example var objects = [ { 'a': 2, 'b': 1 }, { 'a': 1, 'b': 2 } ]; _.filter(objects, _.conforms({ 'b': function(n) { return n > 1; } })); // => [{ 'a': 1, 'b': 2 }]
javascript
lodash.js
15,480
[ "source" ]
false
1
6.24
lodash/lodash
61,490
jsdoc
false
inferKeyTypeOrObjectUnderJ2cl
static <K extends Enum<K>> Class<K> inferKeyTypeOrObjectUnderJ2cl(Map<K, ?> map) { if (map instanceof EnumBiMap) { return ((EnumBiMap<K, ?>) map).keyTypeOrObjectUnderJ2cl; } if (map instanceof EnumHashBiMap) { return ((EnumHashBiMap<K, ?>) map).keyTypeOrObjectUnderJ2cl; } checkArgument(!map.isEmpty()); return getDeclaringClassOrObjectForJ2cl(map.keySet().iterator().next()); }
Returns a new bimap with the same mappings as the specified map. If the specified map is an {@code EnumBiMap}, the new bimap has the same types as the provided map. Otherwise, the specified map must contain at least one mapping, in order to determine the key and value types. @param map the map whose mappings are to be placed in this map @throws IllegalArgumentException if map is not an {@code EnumBiMap} instance and contains no mappings
java
android/guava/src/com/google/common/collect/EnumBiMap.java
97
[ "map" ]
true
3
6.88
google/guava
51,352
javadoc
false
__call__
def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ X = np.atleast_2d(X) if Y is None: dists = squareform(pdist(X, metric="euclidean")) arg = np.pi * dists / self.periodicity sin_of_arg = np.sin(arg) K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2) else: if eval_gradient: raise ValueError("Gradient can only be evaluated when Y is None.") dists = cdist(X, Y, metric="euclidean") K = np.exp( -2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2 ) if eval_gradient: cos_of_arg = np.cos(arg) # gradient with respect to length_scale if not self.hyperparameter_length_scale.fixed: length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K length_scale_gradient = length_scale_gradient[:, :, np.newaxis] else: # length_scale is kept fixed length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) # gradient with respect to p if not self.hyperparameter_periodicity.fixed: periodicity_gradient = ( 4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K ) periodicity_gradient = periodicity_gradient[:, :, np.newaxis] else: # p is kept fixed periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0)) return K, np.dstack((length_scale_gradient, periodicity_gradient)) else: return K
Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True.
python
sklearn/gaussian_process/kernels.py
2,028
[ "self", "X", "Y", "eval_gradient" ]
false
10
6
scikit-learn/scikit-learn
64,340
numpy
false
parameterizeWithOwner
public static final ParameterizedType parameterizeWithOwner(final Type owner, final Class<?> rawClass, final Map<TypeVariable<?>, Type> typeVariableMap) { Objects.requireNonNull(rawClass, "rawClass"); Objects.requireNonNull(typeVariableMap, "typeVariableMap"); return parameterizeWithOwner(owner, rawClass, extractTypeArgumentsFrom(typeVariableMap, rawClass.getTypeParameters())); }
Creates a parameterized type instance. @param owner the owning type. @param rawClass the raw class to create a parameterized type instance for. @param typeVariableMap the map used for parameterization. @return {@link ParameterizedType}. @throws NullPointerException if either {@code rawClass} or {@code typeVariableMap} is {@code null}. @since 3.2
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
1,446
[ "owner", "rawClass", "typeVariableMap" ]
ParameterizedType
true
1
6.24
apache/commons-lang
2,896
javadoc
false
_check_data_length
def _check_data_length( self, columns: Sequence[Hashable], data: Sequence[ArrayLike], ) -> None: """Checks if length of data is equal to length of column names. One set of trailing commas is allowed. self.index_col not False results in a ParserError previously when lengths do not match. Parameters ---------- columns: list of column names data: list of array-likes containing the data column-wise. """ if not self.index_col and len(columns) != len(data) and columns: empty_str = is_object_dtype(data[-1]) and data[-1] == "" # error: No overload variant of "__ror__" of "ndarray" matches # argument type "ExtensionArray" empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator] if len(columns) == len(data) - 1 and np.all(empty_str_or_na): return warnings.warn( "Length of header or names does not match length of data. This leads " "to a loss of data with index_col=False.", ParserWarning, stacklevel=find_stack_level(), )
Checks if length of data is equal to length of column names. One set of trailing commas is allowed. self.index_col not False results in a ParserError previously when lengths do not match. Parameters ---------- columns: list of column names data: list of array-likes containing the data column-wise.
python
pandas/io/parsers/base_parser.py
608
[ "self", "columns", "data" ]
None
true
7
7.04
pandas-dev/pandas
47,362
numpy
false
convert_cmake_value_to_python_value
def convert_cmake_value_to_python_value( cmake_value: str, cmake_type: str ) -> CMakeValue: r"""Convert a CMake value in a string form to a Python value. Args: cmake_value (string): The CMake value in a string form (e.g., "ON", "OFF", "1"). cmake_type (string): The CMake type of :attr:`cmake_value`. Returns: A Python value corresponding to :attr:`cmake_value` with type :attr:`cmake_type`. """ cmake_type = cmake_type.upper() up_val = cmake_value.upper() if cmake_type == "BOOL": # https://cmake.org/cmake/help/latest/manual/cmake-generator-expressions.7.html#genex:BOOL return not ( up_val in ("FALSE", "OFF", "N", "NO", "0", "", "NOTFOUND") or up_val.endswith("-NOTFOUND") ) elif cmake_type == "FILEPATH": if up_val.endswith("-NOTFOUND"): return None else: return cmake_value else: # Directly return the cmake_value. return cmake_value
r"""Convert a CMake value in a string form to a Python value. Args: cmake_value (string): The CMake value in a string form (e.g., "ON", "OFF", "1"). cmake_type (string): The CMake type of :attr:`cmake_value`. Returns: A Python value corresponding to :attr:`cmake_value` with type :attr:`cmake_type`.
python
tools/setup_helpers/cmake_utils.py
15
[ "cmake_value", "cmake_type" ]
CMakeValue
true
7
8.08
pytorch/pytorch
96,034
google
false
successful
def successful(self): """Return true if all tasks successful. Returns: bool: true if all of the tasks finished successfully (i.e. didn't raise an exception). """ return all(result.successful() for result in self.results)
Return true if all tasks successful. Returns: bool: true if all of the tasks finished successfully (i.e. didn't raise an exception).
python
celery/result.py
616
[ "self" ]
false
1
6.08
celery/celery
27,741
unknown
false
join
public static String join(final boolean[] array, final char delimiter, final int startIndex, final int endIndex) { if (array == null) { return null; } if (endIndex - startIndex <= 0) { return EMPTY; } final StringBuilder stringBuilder = new StringBuilder(array.length * 5 + array.length - 1); for (int i = startIndex; i < endIndex; i++) { stringBuilder .append(array[i]) .append(delimiter); } return stringBuilder.substring(0, stringBuilder.length() - 1); }
Joins the elements of the provided array into a single String containing the provided list of elements. <p> No delimiter is added before or after the list. Null objects or empty strings within the array are represented by empty strings. </p> <pre> StringUtils.join(null, *) = null StringUtils.join([], *) = "" StringUtils.join([null], *) = "" StringUtils.join([true, false, true], ';') = "true;false;true" </pre> @param array the array of values to join together, may be null. @param delimiter the separator character to use. @param startIndex the first index to start joining from. It is an error to pass in a start index past the end of the array. @param endIndex the index to stop joining from (exclusive). It is an error to pass in an end index past the end of the array. @return the joined String, {@code null} if null array input. @since 3.12.0
java
src/main/java/org/apache/commons/lang3/StringUtils.java
3,856
[ "array", "delimiter", "startIndex", "endIndex" ]
String
true
4
8.08
apache/commons-lang
2,896
javadoc
false
get
@Override public @Nullable V get(@Nullable Object key) { int entry = findEntryByKey(key); return (entry == ABSENT) ? null : values[entry]; }
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or, equivalently, if this inverse view contains a key that is equal to {@code value}). <p>Due to the property that values in a BiMap are unique, this will tend to execute in faster-than-linear time. @param value the object to search for in the values of this BiMap @return true if a mapping exists from a key to the specified value
java
android/guava/src/com/google/common/collect/HashBiMap.java
270
[ "key" ]
V
true
2
7.92
google/guava
51,352
javadoc
false
valueSize
public int valueSize() { return buffer.getInt(valueSizeOffset()); }
The length of the value in bytes @return the size in bytes of the value (0 if the value is null)
java
clients/src/main/java/org/apache/kafka/common/record/LegacyRecord.java
181
[]
true
1
6.32
apache/kafka
31,560
javadoc
false
register
@CanIgnoreReturnValue @ParametricNullness public <C extends @Nullable Closeable> C register(@ParametricNullness C closeable) { if (closeable != null) { stack.addFirst(closeable); } return closeable; }
Registers the given {@code closeable} to be closed when this {@code Closer} is {@linkplain #close closed}. @return the given {@code closeable}
java
android/guava/src/com/google/common/io/Closer.java
107
[ "closeable" ]
C
true
2
6.88
google/guava
51,352
javadoc
false
trimResults
public Splitter trimResults(CharMatcher trimmer) { checkNotNull(trimmer); return new Splitter(strategy, omitEmptyStrings, trimmer, limit); }
Returns a splitter that behaves equivalently to {@code this} splitter, but removes all leading or trailing characters matching the given {@code CharMatcher} from each returned substring. For example, {@code Splitter.on(',').trimResults(CharMatcher.is('_')).split("_a ,_b_ ,c__")} returns an iterable containing {@code ["a ", "b_ ", "c"]}. @param trimmer a {@link CharMatcher} that determines whether a character should be removed from the beginning/end of a subsequence @return a splitter with the desired configuration
java
android/guava/src/com/google/common/base/Splitter.java
355
[ "trimmer" ]
Splitter
true
1
6.16
google/guava
51,352
javadoc
false
run_flow
def run_flow( self, flow_name: str, poll_interval: int = 20, wait_for_completion: bool = True, max_attempts: int = 60, ) -> str: """ Execute an AppFlow run. :param flow_name: The flow name :param poll_interval: Time (seconds) to wait between two consecutive calls to check the run status :param wait_for_completion: whether to wait for the run to end to return :param max_attempts: the number of polls to do before timing out/returning a failure. :return: The run execution ID """ response_start = self.conn.start_flow(flowName=flow_name) execution_id = response_start["executionId"] self.log.info("executionId: %s", execution_id) if wait_for_completion: wait( waiter=self.get_waiter("run_complete", {"EXECUTION_ID": execution_id}), waiter_delay=poll_interval, waiter_max_attempts=max_attempts, args={"flowName": flow_name}, failure_message="error while waiting for flow to complete", status_message="waiting for flow completion, status", status_args=[ f"flowExecutions[?executionId=='{execution_id}'].executionStatus", f"flowExecutions[?executionId=='{execution_id}'].executionResult.errorInfo", ], ) self._log_execution_description(flow_name, execution_id) return execution_id
Execute an AppFlow run. :param flow_name: The flow name :param poll_interval: Time (seconds) to wait between two consecutive calls to check the run status :param wait_for_completion: whether to wait for the run to end to return :param max_attempts: the number of polls to do before timing out/returning a failure. :return: The run execution ID
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/appflow.py
46
[ "self", "flow_name", "poll_interval", "wait_for_completion", "max_attempts" ]
str
true
2
7.76
apache/airflow
43,597
sphinx
false
unstack
def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: # calculating the full mask once and passing it to Block._unstack is # faster than letting calculating it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) blocks, mask = blk._unstack( unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking, ) new_blocks.extend(blocks) columns_mask.extend(mask) # Block._unstack should ensure this holds, assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) # In turn this ensures that in the BlockManager call below # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) # which suffices to allow us to pass verify_inegrity=False new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm
Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager
python
pandas/core/internals/managers.py
1,699
[ "self", "unstacker", "fill_value" ]
BlockManager
true
6
6.24
pandas-dev/pandas
47,362
numpy
false
firstNode
private @Nullable AvlNode<E> firstNode() { AvlNode<E> root = rootReference.get(); if (root == null) { return null; } AvlNode<E> node; if (range.hasLowerBound()) { // The cast is safe because of the hasLowerBound check. E endpoint = uncheckedCastNullableTToT(range.getLowerEndpoint()); node = root.ceiling(comparator(), endpoint); if (node == null) { return null; } if (range.getLowerBoundType() == BoundType.OPEN && comparator().compare(endpoint, node.getElement()) == 0) { node = node.succ(); } } else { node = header.succ(); } return (node == header || !range.contains(node.getElement())) ? null : node; }
Returns the first node in the tree that is in range.
java
android/guava/src/com/google/common/collect/TreeMultiset.java
400
[]
true
8
6.88
google/guava
51,352
javadoc
false
partitionsNeedingValidation
public synchronized Map<TopicPartition, FetchPosition> partitionsNeedingValidation(long nowMs) { Map<TopicPartition, FetchPosition> result = new HashMap<>(); assignment.forEach((tp, tps) -> { if (tps.awaitingValidation() && !tps.awaitingRetryBackoff(nowMs) && tps.position != null) { result.put(tp, tps.position); } }); return result; }
Request reset for partitions that require a position, using the configured reset strategy. @param initPartitionsToInclude Initializing partitions to include in the reset. Assigned partitions that require a positions but are not included in this set won't be reset. @throws NoOffsetForPartitionException If there are partitions assigned that require a position but there is no reset strategy configured.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
882
[ "nowMs" ]
true
4
6.24
apache/kafka
31,560
javadoc
false