function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
get
protected Object get(String key) { if (!values.containsKey(key)) throw new ConfigException(String.format("Unknown configuration '%s'", key)); used.add(key); return values.get(key); }
Called directly after user configs got parsed (and thus default values got set). This allows to change default values for "secondary defaults" if required. @param parsedValues unmodifiable map of current configuration @return a map of updates that should be applied to the configuration (will be validated to prevent bad updates)
java
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
174
[ "key" ]
Object
true
2
7.6
apache/kafka
31,560
javadoc
false
matchesExclusion
protected abstract boolean matchesExclusion(String pattern, int patternIndex);
Does the exclusion pattern at the given index match the given String? @param pattern the {@code String} pattern to match @param patternIndex index of pattern (starting from 0) @return {@code true} if there is a match, {@code false} otherwise
java
spring-aop/src/main/java/org/springframework/aop/support/AbstractRegexpMethodPointcut.java
195
[ "pattern", "patternIndex" ]
true
1
6.48
spring-projects/spring-framework
59,386
javadoc
false
packageToPath
public static String packageToPath(final String path) { return Objects.requireNonNull(path, "path").replace('.', '/'); }
Converts a package name to a Java path ('/'). @param path the source path. @return a package name. @throws NullPointerException if {@code path} is null. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/ClassPathUtils.java
41
[ "path" ]
String
true
1
6.64
apache/commons-lang
2,896
javadoc
false
sort
public static void sort(Object[] source, SortDefinition sortDefinition) throws BeansException { if (StringUtils.hasText(sortDefinition.getProperty())) { Arrays.sort(source, new PropertyComparator<>(sortDefinition)); } }
Sort the given source according to the given sort definition. <p>Note: Contained objects have to provide the given property in the form of a bean property, i.e. a getXXX method. @param source input source @param sortDefinition the parameters to sort by @throws java.lang.IllegalArgumentException in case of a missing propertyName
java
spring-beans/src/main/java/org/springframework/beans/support/PropertyComparator.java
149
[ "source", "sortDefinition" ]
void
true
2
6.72
spring-projects/spring-framework
59,386
javadoc
false
fill
public static int[] fill(final int[] a, final int val) { if (a != null) { Arrays.fill(a, val); } return a; }
Fills and returns the given array, assigning the given {@code int} value to each element of the array. @param a the array to be filled (may be null). @param val the value to be stored in all elements of the array. @return the given array. @see Arrays#fill(int[],int)
java
src/main/java/org/apache/commons/lang3/ArrayFill.java
116
[ "a", "val" ]
true
2
8.08
apache/commons-lang
2,896
javadoc
false
objects_to_datetime64
def objects_to_datetime64( data: np.ndarray, dayfirst, yearfirst, utc: bool = False, errors: DateTimeErrorChoices = "raise", allow_object: bool = False, out_unit: str | None = None, ) -> tuple[np.ndarray, tzinfo | None]: """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert/localize timestamps to UTC. errors : {'raise', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. out_unit : str or None, default None None indicates we should do resolution inference. Returns ------- result : ndarray np.datetime64[out_unit] if returned values represent wall times or UTC timestamps. object if mixed timezones inferred_tz : tzinfo or None If not None, then the datetime64 values in `result` denote UTC timestamps. Raises ------ ValueError : if data cannot be converted to datetimes TypeError : When a type cannot be converted to datetime """ assert errors in ["raise", "coerce"] # if str-dtype, convert data = np.asarray(data, dtype=np.object_) result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, creso=abbrev_to_npy_unit(out_unit), ) if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC return result, tz_parsed elif result.dtype.kind == "M": return result, tz_parsed elif result.dtype == object: # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError("DatetimeIndex has mixed timezones") else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert/localize timestamps to UTC. errors : {'raise', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. out_unit : str or None, default None None indicates we should do resolution inference. Returns ------- result : ndarray np.datetime64[out_unit] if returned values represent wall times or UTC timestamps. object if mixed timezones inferred_tz : tzinfo or None If not None, then the datetime64 values in `result` denote UTC timestamps. Raises ------ ValueError : if data cannot be converted to datetimes TypeError : When a type cannot be converted to datetime
python
pandas/core/arrays/datetimes.py
2,578
[ "data", "dayfirst", "yearfirst", "utc", "errors", "allow_object", "out_unit" ]
tuple[np.ndarray, tzinfo | None]
true
6
6.8
pandas-dev/pandas
47,362
numpy
false
deleteTopics
default DeleteTopicsResult deleteTopics(TopicCollection topics) { return deleteTopics(topics, new DeleteTopicsOptions()); }
This is a convenience method for {@link #deleteTopics(TopicCollection, DeleteTopicsOptions)} with default options. See the overload for more details. <p> When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher. @param topics The topics to delete. @return The DeleteTopicsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
240
[ "topics" ]
DeleteTopicsResult
true
1
6.32
apache/kafka
31,560
javadoc
false
parseTemplateMiddleOrTemplateTail
function parseTemplateMiddleOrTemplateTail(): TemplateMiddle | TemplateTail { const fragment = parseLiteralLikeNode(token()); Debug.assert(fragment.kind === SyntaxKind.TemplateMiddle || fragment.kind === SyntaxKind.TemplateTail, "Template fragment has wrong token kind"); return fragment as TemplateMiddle | TemplateTail; }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
3,748
[]
true
2
6.72
microsoft/TypeScript
107,154
jsdoc
false
metricsRegistry
@Override public Metrics metricsRegistry() { return metrics; }
This method can be used by cases where the caller has an event that needs to both block for completion but also process background events. For some events, in order to fully process the associated logic, the {@link ConsumerNetworkThread background thread} needs assistance from the application thread to complete. If the application thread simply blocked on the event after submitting it, the processing would deadlock. The logic herein is basically a loop that performs two tasks in each iteration: <ol> <li>Process background events, if any</li> <li><em>Briefly</em> wait for {@link CompletableApplicationEvent an event} to complete</li> </ol> <p/> Each iteration gives the application thread an opportunity to process background events, which may be necessary to complete the overall processing. @param future Event that contains a {@link CompletableFuture}; it is on this future that the application thread will wait for completion @param timer Overall timer that bounds how long to wait for the event to complete @param ignoreErrorEventException Predicate to ignore background errors. Any exceptions found while processing background events that match the predicate won't be propagated. @return {@code true} if the event completed within the timeout, {@code false} otherwise
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
1,346
[]
Metrics
true
1
6.48
apache/kafka
31,560
javadoc
false
wrapCanBeObjectOrArrayOfObjects
private static XContentParseException wrapCanBeObjectOrArrayOfObjects(ParseField field, XContentParser p) { return new XContentParseException( p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + "fields or an array where each entry is an object with a single field" ); }
Parses a Value from the given {@link XContentParser} @param parser the parser to build a value from @param value the value to fill from the parser @param context a context that is passed along to all declared field parsers @return the parsed value @throws IOException if an IOException occurs.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java
543
[ "field", "p" ]
XContentParseException
true
1
6.4
elastic/elasticsearch
75,680
javadoc
false
nextClean
public char nextClean() throws JSONException { int nextCleanInt = nextCleanInternal(); return nextCleanInt == -1 ? '\0' : (char) nextCleanInt; }
Returns the current position and the entire input string. @return the current position and the entire input string.
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
493
[]
true
2
8
spring-projects/spring-boot
79,428
javadoc
false
collectPartitions
private Set<TopicPartition> collectPartitions(Predicate<TopicPartitionState> filter) { Set<TopicPartition> result = new HashSet<>(); assignment.forEach((topicPartition, topicPartitionState) -> { if (filter.test(topicPartitionState)) { result.add(topicPartition); } }); return result; }
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches. @param tp The topic partition @return the removed preferred read replica if set, Empty otherwise.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
841
[ "filter" ]
true
2
8.24
apache/kafka
31,560
javadoc
false
setAttribute
@Override public void setAttribute(Traceable traceable, String key, long value) { final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } }
Most of the examples of how to use the OTel API look something like this, where the span context is automatically propagated: <pre>{@code Span span = tracer.spanBuilder("parent").startSpan(); try (Scope scope = parentSpan.makeCurrent()) { // ...do some stuff, possibly creating further spans } finally { span.end(); } }</pre> This typically isn't useful in Elasticsearch, because a {@link Scope} can't be used across threads. However, if a scope is active, then the APM agent can capture additional information, so this method exists to make it possible to use scopes in the few situation where it makes sense. @param traceable provides the ID of a currently-open span for which to open a scope. @return a method to close the scope when you are finished with it.
java
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java
372
[ "traceable", "key", "value" ]
void
true
2
7.92
elastic/elasticsearch
75,680
javadoc
false
getRequiredScaleReductionToReduceBucketCountBy
int getRequiredScaleReductionToReduceBucketCountBy(int desiredCollapsedBucketCount) { assert desiredCollapsedBucketCount >= 0; if (desiredCollapsedBucketCount == 0) { return 0; } int totalCollapsed = 0; for (int i = 0; i < collapsedBucketCount.length; i++) { totalCollapsed += collapsedBucketCount[i]; if (totalCollapsed >= desiredCollapsedBucketCount) { return i + 1; } } throw new IllegalStateException("Cannot reduce the bucket count by " + desiredCollapsedBucketCount); }
Returns the required scale reduction to reduce the number of buckets by at least the given amount. @param desiredCollapsedBucketCount the target number of buckets to collapse @return the required scale reduction
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/DownscaleStats.java
102
[ "desiredCollapsedBucketCount" ]
true
4
7.6
elastic/elasticsearch
75,680
javadoc
false
nextInChain
boolean nextInChain() { if (nextEntry != null) { for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) { if (advanceTo(nextEntry)) { return true; } } } return false; }
Finds the next entry in the current chain. Returns true if an entry was found.
java
android/guava/src/com/google/common/cache/LocalCache.java
4,249
[]
true
4
6.72
google/guava
51,352
javadoc
false
originalBeanName
protected String originalBeanName(String name) { String beanName = transformedBeanName(name); if (!name.isEmpty() && name.charAt(0) == BeanFactory.FACTORY_BEAN_PREFIX_CHAR) { beanName = FACTORY_BEAN_PREFIX + beanName; } return beanName; }
Determine the original bean name, resolving locally defined aliases to canonical names. @param name the user-specified name @return the original bean name
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
1,280
[ "name" ]
String
true
3
7.6
spring-projects/spring-framework
59,386
javadoc
false
getName
public String getName() { String name = this.name; if (name == null) { int offset = ZipContent.this.nameOffsetLookups.get(this.lookupIndex); long pos = getCentralDirectoryFileHeaderRecordPos(this.lookupIndex) + ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET + offset; name = ZipString.readString(ZipContent.this.data, pos, this.centralRecord.fileNameLength() - offset); this.name = name; } return name; }
Return the name of this entry. @return the entry name
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
753
[]
String
true
2
8.08
spring-projects/spring-boot
79,428
javadoc
false
format
public static String format(final long millis, final String pattern, final TimeZone timeZone, final Locale locale) { return format(new Date(millis), pattern, timeZone, locale); }
Formats a date/time into a specific pattern in a time zone and locale. @param millis the date to format expressed in milliseconds. @param pattern the pattern to use to format the date, not null. @param timeZone the time zone to use, may be {@code null}. @param locale the locale to use, may be {@code null}. @return the formatted date.
java
src/main/java/org/apache/commons/lang3/time/DateFormatUtils.java
352
[ "millis", "pattern", "timeZone", "locale" ]
String
true
1
6.64
apache/commons-lang
2,896
javadoc
false
writeEndRaw
public void writeEndRaw() { assert base != null : "JsonGenerator should be of instance GeneratorBase but was: " + generator.getClass(); if (base != null) { JsonStreamContext context = base.getOutputContext(); assert (context instanceof JsonWriteContext) : "Expected an instance of JsonWriteContext but was: " + context.getClass(); ((JsonWriteContext) context).writeValue(); } }
Reference to filtering generator because writing an empty object '{}' when everything is filtered out needs a specific treatment
java
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java
447
[]
void
true
2
6.24
elastic/elasticsearch
75,680
javadoc
false
createLong
public static Long createLong(final String str) { if (str == null) { return null; } return Long.decode(str); }
Creates a {@link Long} from a {@link String}. Handles hexadecimal (0Xhhhh) and octal (0ddd) notations. A leading zero means octal; spaces are not trimmed. <p> Returns {@code null} if the string is {@code null}. </p> @param str a {@link String} to convert, may be null. @return converted {@link Long} (or null if the input is null). @throws NumberFormatException if the value cannot be converted. @since 3.1
java
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
283
[ "str" ]
Long
true
2
8.08
apache/commons-lang
2,896
javadoc
false
convertToClassName
private static String convertToClassName(String name, @Nullable String prefix) { name = name.replace('/', '.'); name = name.replace('\\', '.'); name = name.substring(0, name.length() - DOT_CLASS.length()); if (prefix != null) { name = name.substring(prefix.length()); } return name; }
Perform the given callback operation on all main classes from the given jar. @param <T> the result type @param jarFile the jar file to search @param classesLocation the location within the jar containing classes @param callback the callback @return the first callback result or {@code null} @throws IOException in case of I/O errors
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/MainClassFinder.java
238
[ "name", "prefix" ]
String
true
2
7.76
spring-projects/spring-boot
79,428
javadoc
false
getStringFieldValueInDottedNotation
private String getStringFieldValueInDottedNotation(IngestDocument ingestDocument) { String value = null; Object valueObject = ingestDocument.getCtxMap().get(fieldReference); if (valueObject instanceof String) { value = (String) valueObject; } else if (valueObject != null) { throw new IllegalArgumentException( "field [" + fieldReference + "] of type [" + valueObject.getClass().getName() + "] cannot be cast to [" + String.class.getName() + "]" ); } return value; }
Resolves the field reference from the provided ingest document or returns the static value if this value source doesn't represent a field reference. @param ingestDocument @return the resolved field reference or static value
java
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RerouteProcessor.java
289
[ "ingestDocument" ]
String
true
3
7.6
elastic/elasticsearch
75,680
javadoc
false
keySet
@Override public Set<K> keySet() { return new KeySet(); }
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or, equivalently, if this inverse view contains a key that is equal to {@code value}). <p>Due to the property that values in a BiMap are unique, this will tend to execute in faster-than-linear time. @param value the object to search for in the values of this BiMap @return true if a mapping exists from a key to the specified value
java
guava/src/com/google/common/collect/HashBiMap.java
499
[]
true
1
6.64
google/guava
51,352
javadoc
false
download_file
def download_file( self, key: str, bucket_name: str | None = None, local_path: str | None = None, preserve_file_name: bool = False, use_autogenerated_subdir: bool = True, ) -> str: """ Download a file from the S3 location to the local file system. Note: This function shadows the 'download_file' method of S3 API, but it is not the same. If you want to use the original method from S3 API, please use 'S3Hook.get_conn().download_file()' .. seealso:: - :external+boto3:py:meth:`S3.Object.download_fileobj` :param key: The key path in S3. :param bucket_name: The specific bucket to use. :param local_path: The local path to the downloaded file. If no path is provided it will use the system's temporary directory. :param preserve_file_name: If you want the downloaded file name to be the same name as it is in S3, set this parameter to True. When set to False, a random filename will be generated. Default: False. :param use_autogenerated_subdir: Pairs with 'preserve_file_name = True' to download the file into a random generated folder inside the 'local_path', useful to avoid collisions between various tasks that might download the same file name. Set it to 'False' if you don't want it, and you want a predictable path. Default: True. :return: the file name. """ self.log.info("Downloading source S3 file from Bucket %s with path %s", bucket_name, key) try: s3_obj = self.get_key(key, bucket_name) except ClientError as e: if e.response.get("Error", {}).get("Code") == 404: raise AirflowNotFoundException( f"The source file in Bucket {bucket_name} with path {key} does not exist" ) raise e if preserve_file_name: local_dir = local_path or gettempdir() subdir = f"airflow_tmp_dir_{uuid4().hex[0:8]}" if use_autogenerated_subdir else "" filename_in_s3 = s3_obj.key.rsplit("/", 1)[-1] file_path = Path(local_dir, subdir, filename_in_s3) if file_path.is_file(): self.log.error( "file '%s' already exists. Failing the task and not overwriting it", file_path, ) raise FileExistsError file_path.parent.mkdir(exist_ok=True, parents=True) get_hook_lineage_collector().add_output_asset( context=self, scheme="file", asset_kwargs={ "path": str(file_path) if file_path.is_absolute() else str(file_path.absolute()) }, ) file = open(file_path, "wb") else: file = NamedTemporaryFile(dir=local_path, prefix="airflow_tmp_", delete=False) # type: ignore extra_args = {**self.extra_args} if self._requester_pays: extra_args["RequestPayer"] = "requester" s3_obj.download_fileobj( file, ExtraArgs=extra_args, Config=self.transfer_config, ) get_hook_lineage_collector().add_input_asset( context=self, scheme="s3", asset_kwargs={"bucket": bucket_name, "key": key} ) return file.name
Download a file from the S3 location to the local file system. Note: This function shadows the 'download_file' method of S3 API, but it is not the same. If you want to use the original method from S3 API, please use 'S3Hook.get_conn().download_file()' .. seealso:: - :external+boto3:py:meth:`S3.Object.download_fileobj` :param key: The key path in S3. :param bucket_name: The specific bucket to use. :param local_path: The local path to the downloaded file. If no path is provided it will use the system's temporary directory. :param preserve_file_name: If you want the downloaded file name to be the same name as it is in S3, set this parameter to True. When set to False, a random filename will be generated. Default: False. :param use_autogenerated_subdir: Pairs with 'preserve_file_name = True' to download the file into a random generated folder inside the 'local_path', useful to avoid collisions between various tasks that might download the same file name. Set it to 'False' if you don't want it, and you want a predictable path. Default: True. :return: the file name.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
1,528
[ "self", "key", "bucket_name", "local_path", "preserve_file_name", "use_autogenerated_subdir" ]
str
true
9
7.84
apache/airflow
43,597
sphinx
false
equals
@Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClientQuotaFilterComponent that = (ClientQuotaFilterComponent) o; return Objects.equals(that.entityType, entityType) && Objects.equals(that.match, match); }
@return the optional match string, where: if present, the name that's matched exactly if empty, matches the default name if null, matches any specified name
java
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaFilterComponent.java
92
[ "o" ]
true
5
6.08
apache/kafka
31,560
javadoc
false
setAsText
@Override public void setAsText(String text) throws IllegalArgumentException { if (StringUtils.hasText(text)) { String uri = text.trim(); if (this.classLoader != null && uri.startsWith(ResourceUtils.CLASSPATH_URL_PREFIX)) { ClassPathResource resource = new ClassPathResource( uri.substring(ResourceUtils.CLASSPATH_URL_PREFIX.length()), this.classLoader); try { setValue(resource.getURI()); } catch (IOException ex) { throw new IllegalArgumentException("Could not retrieve URI for " + resource + ": " + ex.getMessage()); } } else { try { setValue(createURI(uri)); } catch (URISyntaxException ex) { throw new IllegalArgumentException("Invalid URI syntax: " + ex.getMessage()); } } } else { setValue(null); } }
Create a new URIEditor, using the given ClassLoader to resolve "classpath:" locations into physical resource URLs. @param classLoader the ClassLoader to use for resolving "classpath:" locations (may be {@code null} to indicate the default ClassLoader) @param encode indicates whether Strings will be encoded or not @since 3.0
java
spring-beans/src/main/java/org/springframework/beans/propertyeditors/URIEditor.java
103
[ "text" ]
void
true
6
6.4
spring-projects/spring-framework
59,386
javadoc
false
nullToEmpty
public static Character[] nullToEmpty(final Character[] array) { return nullTo(array, EMPTY_CHARACTER_OBJECT_ARRAY); }
Defensive programming technique to change a {@code null} reference to an empty one. <p> This method returns an empty array for a {@code null} input array. </p> <p> As a memory optimizing technique an empty array passed in will be overridden with the empty {@code public static} references in this class. </p> @param array the array to check for {@code null} or empty. @return the same array, {@code public static} empty array if {@code null} or empty input. @since 2.5
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
4,375
[ "array" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
maybeCompleteSend
public NetworkSend maybeCompleteSend() { if (send != null && send.completed()) { midWrite = false; transportLayer.removeInterestOps(SelectionKey.OP_WRITE); NetworkSend result = send; send = null; return result; } return null; }
Returns the port to which this channel's socket is connected or 0 if the socket has never been connected. If the socket was connected prior to being closed, then this method will continue to return the connected port number after the socket is closed.
java
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
396
[]
NetworkSend
true
3
6.88
apache/kafka
31,560
javadoc
false
clearMetadataCache
public void clearMetadataCache() { this.mergedBeanDefinitions.forEach((beanName, bd) -> { if (!isBeanEligibleForMetadataCaching(beanName)) { bd.stale = true; } }); }
Clear the merged bean definition cache, removing entries for beans which are not considered eligible for full metadata caching yet. <p>Typically triggered after changes to the original bean definitions, for example, after applying a {@code BeanFactoryPostProcessor}. Note that metadata for beans which have already been created at this point will be kept around. @since 4.2
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
1,538
[]
void
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
registerDefaultFilters
@SuppressWarnings("unchecked") protected void registerDefaultFilters() { this.includeFilters.add(new AnnotationTypeFilter(Component.class)); ClassLoader cl = ClassPathScanningCandidateComponentProvider.class.getClassLoader(); try { this.includeFilters.add(new AnnotationTypeFilter( ((Class<? extends Annotation>) ClassUtils.forName("jakarta.inject.Named", cl)), false)); logger.trace("JSR-330 'jakarta.inject.Named' annotation found and supported for component scanning"); } catch (ClassNotFoundException ex) { // JSR-330 API (as included in Jakarta EE) not available - simply skip. } }
Register the default filter for {@link Component @Component}. <p>This will implicitly register all annotations that have the {@link Component @Component} meta-annotation including the {@link Repository @Repository}, {@link Service @Service}, and {@link Controller @Controller} stereotype annotations. <p>Also supports JSR-330's {@link jakarta.inject.Named} annotation if available.
java
spring-context/src/main/java/org/springframework/context/annotation/ClassPathScanningCandidateComponentProvider.java
215
[]
void
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
set_default_tensor_type
def set_default_tensor_type(t: type["torch.Tensor"] | str, /) -> None: r""" .. warning:: This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and :func:`torch.set_default_device()` as alternatives. Sets the default ``torch.Tensor`` type to floating point tensor type ``t``. This type will also be used as default floating point type for type inference in :func:`torch.tensor`. The default floating point tensor type is initially ``torch.FloatTensor``. Args: t (type or string): the floating point tensor type or its name Example:: >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 torch.float32 >>> torch.set_default_tensor_type(torch.DoubleTensor) >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor torch.float64 """ if isinstance(t, str): t = _import_dotted_name(t) _C._set_default_tensor_type(t)
r""" .. warning:: This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and :func:`torch.set_default_device()` as alternatives. Sets the default ``torch.Tensor`` type to floating point tensor type ``t``. This type will also be used as default floating point type for type inference in :func:`torch.tensor`. The default floating point tensor type is initially ``torch.FloatTensor``. Args: t (type or string): the floating point tensor type or its name Example:: >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 torch.float32 >>> torch.set_default_tensor_type(torch.DoubleTensor) >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor torch.float64
python
torch/__init__.py
1,286
[ "t" ]
None
true
2
8
pytorch/pytorch
96,034
google
false
_maybe_cast_slice_bound
def _maybe_cast_slice_bound(self, label, side: str): """ If label is a string, cast it to scalar type according to resolution. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ if isinstance(label, str): try: parsed, reso = self._parse_with_reso(label) except ValueError as err: # DTI -> parsing.DateParseError # TDI -> 'unit abbreviation w/o a number' # PI -> string cannot be parsed as datetime-like self._raise_invalid_indexer("slice", label, err) lower, upper = self._parsed_string_to_bounds(reso, parsed) return lower if side == "left" else upper elif not isinstance(label, self._data._recognized_scalars): self._raise_invalid_indexer("slice", label) return label
If label is a string, cast it to scalar type according to resolution. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller.
python
pandas/core/indexes/datetimelike.py
457
[ "self", "label", "side" ]
true
4
7.04
pandas-dev/pandas
47,362
numpy
false
endOrCacheInflater
private void endOrCacheInflater(Inflater inflater) { Deque<Inflater> inflaterCache = this.inflaterCache; if (inflaterCache != null) { synchronized (inflaterCache) { if (this.inflaterCache == inflaterCache && inflaterCache.size() < INFLATER_CACHE_LIMIT) { inflater.reset(); this.inflaterCache.add(inflater); return; } } } inflater.end(); }
Either release the given {@link Inflater} by calling {@link Inflater#end()} or add it to the cache for later reuse. @param inflater the inflater to end or cache
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFileResources.java
133
[ "inflater" ]
void
true
4
7.04
spring-projects/spring-boot
79,428
javadoc
false
drop_unused_symbols
def drop_unused_symbols( index: Union[int, sympy.Expr], var_names: list[sympy.Expr], sizes: list[sympy.Expr], ) -> None: """ Reduction has last (reduced) dim in its sizes, but downstream users won't. Normalize this away. """ if not isinstance(index, sympy.Expr): # index can be an int return free_symbols = index.free_symbols while var_names and var_names[-1] not in free_symbols: var_names.pop() sizes.pop()
Reduction has last (reduced) dim in its sizes, but downstream users won't. Normalize this away.
python
torch/_inductor/dependencies.py
519
[ "index", "var_names", "sizes" ]
None
true
4
6
pytorch/pytorch
96,034
unknown
false
negate
default FailablePredicate<T, E> negate() { return t -> !test(t); }
Returns a predicate that negates this predicate. @return a predicate that negates this predicate.
java
src/main/java/org/apache/commons/lang3/function/FailablePredicate.java
82
[]
true
1
6.48
apache/commons-lang
2,896
javadoc
false
format
def format(self, name: str, roffset=True) -> str: """ Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should rn_offset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)" """ f = V.kernel.index_to_str offsets = [*self.offsets] if not roffset: offsets = [self.remove_roffsets(offset) for offset in offsets] args = [ ( f"{name} + ({f(self.constant_offset)})" if self.constant_offset != 0 else name ), f"shape={f(self.shape)}", f"strides={f(self.strides)}", f"block_shape={f(self.block_shape)}", f"order={f(self.order)}", f"offsets={f(offsets)}", ] return f"tl.make_block_ptr({', '.join(args)})"
Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should rn_offset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)"
python
torch/_inductor/codegen/triton.py
693
[ "self", "name", "roffset" ]
str
true
3
7.28
pytorch/pytorch
96,034
google
false
succeeded
public boolean succeeded() { return isDone() && !failed(); }
Check if the request succeeded; @return true if the request completed and was successful
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
82
[]
true
2
7.52
apache/kafka
31,560
javadoc
false
value
public XContentBuilder value(Byte value) throws IOException { return (value == null) ? nullValue() : value(value.byteValue()); }
@return the value of the "human readable" flag. When the value is equal to true, some types of values are written in a format easier to read for a human.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
439
[ "value" ]
XContentBuilder
true
2
6.96
elastic/elasticsearch
75,680
javadoc
false
matchPackage
private static boolean matchPackage(String basePackage, String packageName) { if (pathMatcher.isPattern(basePackage)) { return pathMatcher.match(basePackage, packageName); } else { return packageName.equals(basePackage) || packageName.startsWith(basePackage + "."); } }
Return the candidate types that are associated with the specified stereotype. @param basePackage the package to check for candidates @param stereotype the stereotype to use @return the candidate types associated with the specified {@code stereotype} or an empty set if none has been found for the specified {@code basePackage}
java
spring-context/src/main/java/org/springframework/context/index/CandidateComponentsIndex.java
160
[ "basePackage", "packageName" ]
true
3
7.6
spring-projects/spring-framework
59,386
javadoc
false
isZip
private boolean isZip(InputStream inputStream) throws IOException { for (byte magicByte : ZIP_FILE_HEADER) { if (inputStream.read() != magicByte) { return false; } } return true; }
Writes a signature file if necessary for the given {@code writtenLibraries}. @param writtenLibraries the libraries @param writer the writer to use to write the signature file if necessary @throws IOException if a failure occurs when writing the signature file
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
293
[ "inputStream" ]
true
2
6.56
spring-projects/spring-boot
79,428
javadoc
false
bean
<T> T bean(String name, Class<T> beanClass) throws BeansException;
Return an instance, which may be shared or independent, of the specified bean. @param name the name of the bean to retrieve @param beanClass the type the bean must match; can be an interface or superclass @return an instance of the bean. @see BeanFactory#getBean(String, Class)
java
spring-beans/src/main/java/org/springframework/beans/factory/BeanRegistry.java
248
[ "name", "beanClass" ]
T
true
1
6.48
spring-projects/spring-framework
59,386
javadoc
false
getNodeDIResolutionPath
function getNodeDIResolutionPath(node: ComponentTreeNode): SerializedInjector[] | undefined { // Some nodes are not linked to HTMLElements, for example @defer blocks if (!node.nativeElement) { return undefined; } const nodeInjector = getInjectorFromElementNode(node.nativeElement); if (!nodeInjector) { return []; } // There are legit cases where an angular node will have non-ElementInjector injectors. // For example, components created with createComponent require the API consumer to // pass in an element injector, else it sets the element injector of the component // to the NullInjector if (!isElementInjector(nodeInjector)) { return []; } const element = getElementInjectorElement(nodeInjector); if (!nodeInjectorToResolutionPath.has(element)) { const resolutionPaths = getInjectorResolutionPath(nodeInjector); nodeInjectorToResolutionPath.set(element, serializeResolutionPath(resolutionPaths)); } const serializedPath = nodeInjectorToResolutionPath.get(element)!; for (const injector of serializedPath) { injectorsSeen.add(injector.id); } return serializedPath; }
Opens the source code of a component or a directive in the editor. @param constructName - The name of the class/function that represents a component, provider, guard or other callable to view source for. @param type - The type of the element to view source for component, provider, or directive. @returns - The element instance of the component, provider, or directive.
typescript
devtools/projects/ng-devtools-backend/src/lib/client-event-subscribers.ts
455
[ "node" ]
true
5
8.08
angular/angular
99,544
jsdoc
false
indexOfAny
public static int indexOfAny(final CharSequence str, final CharSequence... searchStrs) { if (str == null || searchStrs == null) { return INDEX_NOT_FOUND; } // String's can't have a MAX_VALUEth index. int ret = Integer.MAX_VALUE; int tmp; for (final CharSequence search : searchStrs) { if (search == null) { continue; } tmp = CharSequenceUtils.indexOf(str, search, 0); if (tmp == INDEX_NOT_FOUND) { continue; } if (tmp < ret) { ret = tmp; } } return ret == Integer.MAX_VALUE ? INDEX_NOT_FOUND : ret; }
Find the first index of any of a set of potential substrings. <p> A {@code null} CharSequence will return {@code -1}. A {@code null} or zero length search array will return {@code -1}. A {@code null} search array entry will be ignored, but a search array containing "" will return {@code 0} if {@code str} is not null. This method uses {@link String#indexOf(String)} if possible. </p> <pre> StringUtils.indexOfAny(null, *) = -1 StringUtils.indexOfAny(*, null) = -1 StringUtils.indexOfAny(*, []) = -1 StringUtils.indexOfAny("zzabyycdxx", ["ab", "cd"]) = 2 StringUtils.indexOfAny("zzabyycdxx", ["cd", "ab"]) = 2 StringUtils.indexOfAny("zzabyycdxx", ["mn", "op"]) = -1 StringUtils.indexOfAny("zzabyycdxx", ["zab", "aby"]) = 1 StringUtils.indexOfAny("zzabyycdxx", [""]) = 0 StringUtils.indexOfAny("", [""]) = 0 StringUtils.indexOfAny("", ["a"]) = -1 </pre> @param str the CharSequence to check, may be null. @param searchStrs the CharSequences to search for, may be null. @return the first index of any of the searchStrs in str, -1 if no match. @since 3.0 Changed signature from indexOfAny(String, String[]) to indexOfAny(CharSequence, CharSequence...)
java
src/main/java/org/apache/commons/lang3/StringUtils.java
2,741
[ "str" ]
true
7
7.92
apache/commons-lang
2,896
javadoc
false
concat
public static <T extends @Nullable Object> T[] concat(T[] array, @ParametricNullness T element) { T[] result = Arrays.copyOf(array, array.length + 1); result[array.length] = element; return result; }
Returns a new array that appends {@code element} to {@code array}. @param array the array of elements to prepend @param element the element to append to the end @return an array whose size is one larger than {@code array}, with the same contents as {@code array}, plus {@code element} occupying the last position.
java
android/guava/src/com/google/common/collect/ObjectArrays.java
104
[ "array", "element" ]
true
1
6.56
google/guava
51,352
javadoc
false
_default_formatter
def _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any: """ Format the display of a value Parameters ---------- x : Any Input variable to be formatted precision : Int Floating point precision used if ``x`` is float or complex. thousands : bool, default False Whether to group digits with thousands separated with ",". Returns ------- value : Any Matches input type, or string if input is float or complex or int with sep. """ if is_float(x) or is_complex(x): return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}" elif is_integer(x): return f"{x:,}" if thousands else str(x) return x
Format the display of a value Parameters ---------- x : Any Input variable to be formatted precision : Int Floating point precision used if ``x`` is float or complex. thousands : bool, default False Whether to group digits with thousands separated with ",". Returns ------- value : Any Matches input type, or string if input is float or complex or int with sep.
python
pandas/io/formats/style_render.py
1,885
[ "x", "precision", "thousands" ]
Any
true
6
6.72
pandas-dev/pandas
47,362
numpy
false
reportTags
private void reportTags(StringBuilder report, ProjectType type) { Map<String, String> tags = type.getTags(); Iterator<Map.Entry<String, String>> iterator = tags.entrySet().iterator(); report.append(" ["); while (iterator.hasNext()) { Map.Entry<String, String> entry = iterator.next(); report.append(entry.getKey()).append(":").append(entry.getValue()); if (iterator.hasNext()) { report.append(", "); } } report.append("]"); }
Generate a report for the specified service. The report contains the available capabilities as advertised by the root endpoint. @param url the url of the service @return the report that describes the service @throws IOException if the report cannot be generated
java
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/ServiceCapabilitiesReportGenerator.java
117
[ "report", "type" ]
void
true
3
8.08
spring-projects/spring-boot
79,428
javadoc
false
parseObject
Object parseObject(String source) throws ParseException;
Parses text from a string to produce a Date. @param source A {@link String} whose beginning should be parsed. @return a {@link java.util.Date} object. @throws ParseException if the beginning of the specified string cannot be parsed. @see java.text.DateFormat#parseObject(String)
java
src/main/java/org/apache/commons/lang3/time/DateParser.java
113
[ "source" ]
Object
true
1
6.16
apache/commons-lang
2,896
javadoc
false
bindEachFunctionsFirst
function bindEachFunctionsFirst(nodes: NodeArray<Node> | undefined): void { bindEach(nodes, n => n.kind === SyntaxKind.FunctionDeclaration ? bind(n) : undefined); bindEach(nodes, n => n.kind !== SyntaxKind.FunctionDeclaration ? bind(n) : undefined); }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
1,078
[ "nodes" ]
true
3
6.64
microsoft/TypeScript
107,154
jsdoc
false
slice
def slice(self, start=None, stop=None, step=None): """ Slice substrings from each element in the Series or Index. Slicing substrings from strings in a Series or Index helps extract specific portions of data, making it easier to analyze or manipulate text. This is useful for tasks like parsing structured text fields or isolating parts of strings with a consistent format. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "dog", "chameleon"]) >>> s 0 koala 1 dog 2 chameleon dtype: str >>> s.str.slice(start=1) 0 oala 1 og 2 hameleon dtype: str >>> s.str.slice(start=-1) 0 a 1 g 2 n dtype: str >>> s.str.slice(stop=2) 0 ko 1 do 2 ch dtype: str >>> s.str.slice(step=2) 0 kaa 1 dg 2 caeen dtype: str >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 d 2 cm dtype: str Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 d 2 cm dtype: str """ result = self._data.array._str_slice(start, stop, step) return self._wrap_result(result)
Slice substrings from each element in the Series or Index. Slicing substrings from strings in a Series or Index helps extract specific portions of data, making it easier to analyze or manipulate text. This is useful for tasks like parsing structured text fields or isolating parts of strings with a consistent format. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "dog", "chameleon"]) >>> s 0 koala 1 dog 2 chameleon dtype: str >>> s.str.slice(start=1) 0 oala 1 og 2 hameleon dtype: str >>> s.str.slice(start=-1) 0 a 1 g 2 n dtype: str >>> s.str.slice(stop=2) 0 ko 1 do 2 ch dtype: str >>> s.str.slice(step=2) 0 kaa 1 dg 2 caeen dtype: str >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 d 2 cm dtype: str Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 d 2 cm dtype: str
python
pandas/core/strings/accessor.py
1,959
[ "self", "start", "stop", "step" ]
false
1
6.4
pandas-dev/pandas
47,362
numpy
false
configs
public NewTopic configs(Map<String, String> configs) { this.configs = configs; return this; }
Set the configuration to use on the new topic. @param configs The configuration map. @return This NewTopic object.
java
clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java
113
[ "configs" ]
NewTopic
true
1
6.64
apache/kafka
31,560
javadoc
false
mapLookup
public static <V> StrLookup<V> mapLookup(final Map<String, V> map) { return new MapStrLookup<>(map); }
Returns a lookup which looks up values using a map. <p> If the map is null, then null will be returned from every lookup. The map result object is converted to a string using toString(). </p> @param <V> the type of the values supported by the lookup. @param map the map of keys to values, may be null. @return a lookup using the map, not null.
java
src/main/java/org/apache/commons/lang3/text/StrLookup.java
121
[ "map" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
hermval2d
def hermval2d(x, y, c): """ Evaluate a 2-D Hermite series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points ``(x, y)``, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- hermval, hermgrid2d, hermval3d, hermgrid3d Examples -------- >>> from numpy.polynomial.hermite import hermval2d >>> x = [1, 2] >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermval2d(x, y, c) array([1035., 2883.]) """ return pu._valnd(hermval, c, x, y)
Evaluate a 2-D Hermite series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) The parameters `x` and `y` are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either `x` and `y` or their elements must support multiplication and addition both with themselves and with the elements of `c`. If `c` is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points ``(x, y)``, where `x` and `y` must have the same shape. If `x` or `y` is a list or tuple, it is first converted to an ndarray, otherwise it is left unchanged and if it isn't an ndarray it is treated as a scalar. c : array_like Array of coefficients ordered so that the coefficient of the term of multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension greater than two the remaining indices enumerate multiple sets of coefficients. Returns ------- values : ndarray, compatible object The values of the two dimensional polynomial at points formed with pairs of corresponding values from `x` and `y`. See Also -------- hermval, hermgrid2d, hermval3d, hermgrid3d Examples -------- >>> from numpy.polynomial.hermite import hermval2d >>> x = [1, 2] >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermval2d(x, y, c) array([1035., 2883.])
python
numpy/polynomial/hermite.py
891
[ "x", "y", "c" ]
false
1
6.32
numpy/numpy
31,054
numpy
false
newConfinedBuffer
CloseableByteBuffer newConfinedBuffer(int len);
Creates a new {@link CloseableByteBuffer} using a confined arena. The buffer must be used within the same thread that it is created. @param len the number of bytes the buffer should allocate @return the buffer
java
libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java
104
[ "len" ]
CloseableByteBuffer
true
1
6.48
elastic/elasticsearch
75,680
javadoc
false
loadValue
private OriginTrackedValue loadValue(StringBuilder buffer, CharacterReader reader, boolean splitLists) throws IOException { buffer.setLength(0); while (reader.isWhiteSpace() && !reader.isEndOfLine()) { reader.read(); } Location location = reader.getLocation(); while (!reader.isEndOfLine() && !(splitLists && reader.isListDelimiter())) { buffer.append(reader.getCharacter()); reader.read(); } Origin origin = new TextResourceOrigin(this.resource, location); return OriginTrackedValue.of(buffer.toString(), origin); }
Load {@code .properties} data and return a map of {@code String} -> {@link OriginTrackedValue}. @param expandLists if list {@code name[]=a,b,c} shortcuts should be expanded @return the loaded properties @throws IOException on read error
java
core/spring-boot/src/main/java/org/springframework/boot/env/OriginTrackedPropertiesLoader.java
150
[ "buffer", "reader", "splitLists" ]
OriginTrackedValue
true
6
7.44
spring-projects/spring-boot
79,428
javadoc
false
orElse
String orElse(String extension) { return (this.matcher != null) ? toString() : extension; }
Return the extension from the hint or return the parameter if the hint is not {@link #isPresent() present}. @param extension the fallback extension @return the extension either from the hint or fallback
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/FileExtensionHint.java
55
[ "extension" ]
String
true
2
7.68
spring-projects/spring-boot
79,428
javadoc
false
createImgixUrl
function createImgixUrl(path: string, config: ImageLoaderConfig) { const url = new URL(`${path}/${config.src}`); // This setting ensures the smallest allowable format is set. url.searchParams.set('auto', 'format'); if (config.width) { url.searchParams.set('w', config.width.toString()); } // When requesting a placeholder image we ask a low quality image to reduce the load time. if (config.isPlaceholder) { url.searchParams.set('q', PLACEHOLDER_QUALITY); } return url.href; }
Function that generates an ImageLoader for Imgix and turns it into an Angular provider. @param path path to the desired Imgix origin, e.g. https://somepath.imgix.net or https://images.mysite.com @returns Set of providers to configure the Imgix loader. @publicApi
typescript
packages/common/src/directives/ng_optimized_image/image_loaders/imgix_loader.ts
43
[ "path", "config" ]
false
3
6.96
angular/angular
99,544
jsdoc
false
parseConditionalExpressionRest
function parseConditionalExpressionRest(leftOperand: Expression, pos: number, allowReturnTypeInArrowFunction: boolean): Expression { // Note: we are passed in an expression which was produced from parseBinaryExpressionOrHigher. const questionToken = parseOptionalToken(SyntaxKind.QuestionToken); if (!questionToken) { return leftOperand; } // Note: we explicitly 'allowIn' in the whenTrue part of the condition expression, and // we do not that for the 'whenFalse' part. let colonToken; return finishNode( factory.createConditionalExpression( leftOperand, questionToken, doOutsideOfContext(disallowInAndDecoratorContext, () => parseAssignmentExpressionOrHigher(/*allowReturnTypeInArrowFunction*/ false)), colonToken = parseExpectedToken(SyntaxKind.ColonToken), nodeIsPresent(colonToken) ? parseAssignmentExpressionOrHigher(allowReturnTypeInArrowFunction) : createMissingNode(SyntaxKind.Identifier, /*reportAtCurrentPosition*/ false, Diagnostics._0_expected, tokenToString(SyntaxKind.ColonToken)), ), pos, ); }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
5,574
[ "leftOperand", "pos", "allowReturnTypeInArrowFunction" ]
true
3
6.88
microsoft/TypeScript
107,154
jsdoc
false
apply_index
def apply_index( self, func: Callable, axis: AxisInt | str = 0, level: Level | list[Level] | None = None, **kwargs, ) -> Styler: """ Apply a CSS-styling function to the index or column headers, {wise}. Updates the HTML representation with the result. .. versionadded:: 2.1.0 Styler.applymap_index was deprecated and renamed to Styler.map_index. Parameters ---------- func : function ``func`` should {func}. axis : {{0, 1, "index", "columns"}} The headers over which to apply the function. level : int, str, list, optional If index is MultiIndex the level(s) over which to apply the function. **kwargs : dict Pass along to ``func``. Returns ------- Styler Instance of class with CSS applied to its HTML representation. See Also -------- Styler.{alt}_index: Apply a CSS-styling function to headers {altwise}. Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. Styler.map: Apply a CSS-styling function elementwise. Notes ----- Each input to ``func`` will be {input_note}. The output of ``func`` should be {output_note}, in the format 'attribute: value; attribute2: value2; ...' or, if nothing is to be applied to that element, an empty string or ``None``. Examples -------- Basic usage to conditionally highlight values in the index. >>> df = pd.DataFrame([[1, 2], [3, 4]], index=["A", "B"]) >>> def color_b({var}): ... return {ret} >>> df.style.{this}_index(color_b) # doctest: +SKIP .. figure:: ../../_static/style/appmaphead1.png Selectively applying to specific levels of MultiIndex columns. >>> midx = pd.MultiIndex.from_product([["ix", "jy"], [0, 1], ["x3", "z4"]]) >>> df = pd.DataFrame([np.arange(8)], columns=midx) >>> def highlight_x({var}): ... return {ret2} >>> df.style.{this}_index( ... highlight_x, axis="columns", level=[0, 2]) # doctest: +SKIP .. figure:: ../../_static/style/appmaphead2.png """ self._todo.append( ( lambda instance: instance._apply_index, (func, axis, level, "apply"), kwargs, ) ) return self
Apply a CSS-styling function to the index or column headers, {wise}. Updates the HTML representation with the result. .. versionadded:: 2.1.0 Styler.applymap_index was deprecated and renamed to Styler.map_index. Parameters ---------- func : function ``func`` should {func}. axis : {{0, 1, "index", "columns"}} The headers over which to apply the function. level : int, str, list, optional If index is MultiIndex the level(s) over which to apply the function. **kwargs : dict Pass along to ``func``. Returns ------- Styler Instance of class with CSS applied to its HTML representation. See Also -------- Styler.{alt}_index: Apply a CSS-styling function to headers {altwise}. Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise. Styler.map: Apply a CSS-styling function elementwise. Notes ----- Each input to ``func`` will be {input_note}. The output of ``func`` should be {output_note}, in the format 'attribute: value; attribute2: value2; ...' or, if nothing is to be applied to that element, an empty string or ``None``. Examples -------- Basic usage to conditionally highlight values in the index. >>> df = pd.DataFrame([[1, 2], [3, 4]], index=["A", "B"]) >>> def color_b({var}): ... return {ret} >>> df.style.{this}_index(color_b) # doctest: +SKIP .. figure:: ../../_static/style/appmaphead1.png Selectively applying to specific levels of MultiIndex columns. >>> midx = pd.MultiIndex.from_product([["ix", "jy"], [0, 1], ["x3", "z4"]]) >>> df = pd.DataFrame([np.arange(8)], columns=midx) >>> def highlight_x({var}): ... return {ret2} >>> df.style.{this}_index( ... highlight_x, axis="columns", level=[0, 2]) # doctest: +SKIP .. figure:: ../../_static/style/appmaphead2.png
python
pandas/io/formats/style.py
2,025
[ "self", "func", "axis", "level" ]
Styler
true
1
6.72
pandas-dev/pandas
47,362
numpy
false
bytes
public static long bytes(String value) { return BytesProcessor.apply(value); }
Uses {@link BytesProcessor} to return the number of bytes in a human-readable byte string such as <code>1kb</code>. @param value human-readable byte string @return number of bytes
java
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/Processors.java
29
[ "value" ]
true
1
6.48
elastic/elasticsearch
75,680
javadoc
false
falsePredicate
@SuppressWarnings("unchecked") static <E extends Throwable> FailableLongPredicate<E> falsePredicate() { return FALSE; }
Gets the FALSE singleton. @param <E> The kind of thrown exception or error. @return The NOP singleton.
java
src/main/java/org/apache/commons/lang3/function/FailableLongPredicate.java
46
[]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
open
def open(self, path, mode='r', encoding=None, newline=None): """ Open and return file-like object. If `path` is an URL, it will be downloaded, stored in the `DataSource` directory and opened from there. Parameters ---------- path : str or pathlib.Path Local file path or URL to open. mode : {'r', 'w', 'a'}, optional Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by `path`. Default is 'r'. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what `open` uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object File object. """ # TODO: There is no support for opening a file for writing which # doesn't exist yet (creating a file). Should there be? # TODO: Add a ``subdir`` parameter for specifying the subdirectory # used to store URLs in self._destpath. if self._isurl(path) and self._iswritemode(mode): raise ValueError("URLs are not writeable") # NOTE: _findfile will fail on a new file opened for writing. found = self._findfile(path) if found: _fname, ext = self._splitzipext(found) if ext == 'bz2': mode.replace("+", "") return _file_openers[ext](found, mode=mode, encoding=encoding, newline=newline) else: raise FileNotFoundError(f"{path} not found.")
Open and return file-like object. If `path` is an URL, it will be downloaded, stored in the `DataSource` directory and opened from there. Parameters ---------- path : str or pathlib.Path Local file path or URL to open. mode : {'r', 'w', 'a'}, optional Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to append. Available modes depend on the type of object specified by `path`. Default is 'r'. encoding : {None, str}, optional Open text file with given encoding. The default encoding will be what `open` uses. newline : {None, str}, optional Newline to use when reading text file. Returns ------- out : file object File object.
python
numpy/lib/_datasource.py
483
[ "self", "path", "mode", "encoding", "newline" ]
false
6
6.24
numpy/numpy
31,054
numpy
false
appendln
public StrBuilder appendln(final char[] chars, final int startIndex, final int length) { return append(chars, startIndex, length).appendNewLine(); }
Appends a char array followed by a new line to the string builder. Appending null will call {@link #appendNull()}. @param chars the char array to append @param startIndex the start index, inclusive, must be valid @param length the length to append, must be valid @return {@code this} instance. @since 2.3
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
971
[ "chars", "startIndex", "length" ]
StrBuilder
true
1
6.8
apache/commons-lang
2,896
javadoc
false
calculate
def calculate(cls, finished_upstreams: Iterator[TaskInstance]) -> _UpstreamTIStates: """ Calculate states for a task instance. ``counter`` is inclusive of ``setup_counter`` -- e.g. if there are 2 skipped upstreams, one of which is a setup, then counter will show 2 skipped and setup counter will show 1. :param finished_upstreams: all the finished upstreams of the dag_run """ counter: Counter[str] = Counter() setup_counter: Counter[str] = Counter() for ti in finished_upstreams: if TYPE_CHECKING: assert ti.task assert ti.state curr_state = {ti.state: 1} counter.update(curr_state) if ti.task.is_setup: setup_counter.update(curr_state) return _UpstreamTIStates( success=counter.get(TaskInstanceState.SUCCESS, 0), skipped=counter.get(TaskInstanceState.SKIPPED, 0), failed=counter.get(TaskInstanceState.FAILED, 0), upstream_failed=counter.get(TaskInstanceState.UPSTREAM_FAILED, 0), removed=counter.get(TaskInstanceState.REMOVED, 0), done=sum(counter.values()), success_setup=setup_counter.get(TaskInstanceState.SUCCESS, 0), skipped_setup=setup_counter.get(TaskInstanceState.SKIPPED, 0), )
Calculate states for a task instance. ``counter`` is inclusive of ``setup_counter`` -- e.g. if there are 2 skipped upstreams, one of which is a setup, then counter will show 2 skipped and setup counter will show 1. :param finished_upstreams: all the finished upstreams of the dag_run
python
airflow-core/src/airflow/ti_deps/deps/trigger_rule_dep.py
63
[ "cls", "finished_upstreams" ]
_UpstreamTIStates
true
4
7.04
apache/airflow
43,597
sphinx
false
configureDeliveryTimeout
private static int configureDeliveryTimeout(ProducerConfig config, Logger log) { int deliveryTimeoutMs = config.getInt(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG); int lingerMs = lingerMs(config); int requestTimeoutMs = config.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); int lingerAndRequestTimeoutMs = (int) Math.min((long) lingerMs + requestTimeoutMs, Integer.MAX_VALUE); if (deliveryTimeoutMs < lingerAndRequestTimeoutMs) { if (config.originals().containsKey(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG)) { // throw an exception if the user explicitly set an inconsistent value throw new ConfigException(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG + " should be equal to or larger than " + ProducerConfig.LINGER_MS_CONFIG + " + " + ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG); } else { // override deliveryTimeoutMs default value to lingerMs + requestTimeoutMs for backward compatibility deliveryTimeoutMs = lingerAndRequestTimeoutMs; log.warn("{} should be equal to or larger than {} + {}. Setting it to {}.", ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, ProducerConfig.LINGER_MS_CONFIG, ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, deliveryTimeoutMs); } } return deliveryTimeoutMs; }
A producer is instantiated by providing a set of key-value pairs as configuration, a key and a value {@link Serializer}. Valid configuration strings are documented <a href="http://kafka.apache.org/documentation.html#producerconfigs">here</a>. <p> Note: after creating a {@code KafkaProducer} you must always {@link #close()} it to avoid resource leaks. @param properties The producer configs @param keySerializer The serializer for key that implements {@link Serializer}. The configure() method won't be called in the producer when the serializer is passed in directly. @param valueSerializer The serializer for value that implements {@link Serializer}. The configure() method won't be called in the producer when the serializer is passed in directly.
java
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
584
[ "config", "log" ]
true
3
6.4
apache/kafka
31,560
javadoc
false
center
public static String center(String str, final int size, String padStr) { if (str == null || size <= 0) { return str; } if (isEmpty(padStr)) { padStr = SPACE; } final int strLen = str.length(); final int pads = size - strLen; if (pads <= 0) { return str; } str = leftPad(str, strLen + pads / 2, padStr); return rightPad(str, size, padStr); }
Centers a String in a larger String of size {@code size}. Uses a supplied String as the value to pad the String with. <p> If the size is less than the String length, the String is returned. A {@code null} String returns {@code null}. A negative size is treated as zero. </p> <pre> StringUtils.center(null, *, *) = null StringUtils.center("", 4, " ") = " " StringUtils.center("ab", -1, " ") = "ab" StringUtils.center("ab", 4, " ") = " ab " StringUtils.center("abcd", 2, " ") = "abcd" StringUtils.center("a", 4, " ") = " a " StringUtils.center("a", 4, "yz") = "yayz" StringUtils.center("abc", 7, null) = " abc " StringUtils.center("abc", 7, "") = " abc " </pre> @param str the String to center, may be null. @param size the int size of new String, negative treated as zero. @param padStr the String to pad the new String with, must not be null or empty. @return centered String, {@code null} if null String input. @throws IllegalArgumentException if padStr is {@code null} or empty.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
633
[ "str", "size", "padStr" ]
String
true
5
7.92
apache/commons-lang
2,896
javadoc
false
_can_use_libjoin
def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implemented in _libs.join. This is driven by whether (in monotonic increasing cases that are guaranteed not to have NAs) we can convert to an np.ndarray without making a copy. If we cannot, this negates the performance benefit of using libjoin. """ if not self.is_monotonic_increasing: # The libjoin functions all assume monotonicity. return False if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self._values, (ArrowExtensionArray, BaseMaskedArray)) or ( isinstance(self.dtype, StringDtype) and self.dtype.storage == "python" ) ) # Exclude index types where the conversion to numpy converts to object dtype, # which negates the performance benefit of libjoin # Subclasses should override to return False if _get_join_target is # not zero-copy. # TODO: exclude RangeIndex (which allocates memory)? # Doing so seems to break test_concat_datetime_timezone return not isinstance(self, (ABCIntervalIndex, ABCMultiIndex))
Whether we can use the fastpaths implemented in _libs.join. This is driven by whether (in monotonic increasing cases that are guaranteed not to have NAs) we can convert to an np.ndarray without making a copy. If we cannot, this negates the performance benefit of using libjoin.
python
pandas/core/indexes/base.py
4,943
[ "self" ]
bool
true
6
6
pandas-dev/pandas
47,362
unknown
false
registerLoggedException
protected void registerLoggedException(Throwable exception) { SpringBootExceptionHandler handler = getSpringBootExceptionHandler(); if (handler != null) { handler.registerLoggedException(exception); } }
Register that the given exception has been logged. By default, if the running in the main thread, this method will suppress additional printing of the stacktrace. @param exception the exception that was logged
java
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
874
[ "exception" ]
void
true
2
7.04
spring-projects/spring-boot
79,428
javadoc
false
elementDiffers
private boolean elementDiffers(Elements e1, Elements e2, int i) { ElementType type1 = e1.getType(i); ElementType type2 = e2.getType(i); if (type1.allowsFastEqualityCheck() && type2.allowsFastEqualityCheck()) { return !fastElementEquals(e1, e2, i); } if (type1.allowsDashIgnoringEqualityCheck() && type2.allowsDashIgnoringEqualityCheck()) { return !dashIgnoringElementEquals(e1, e2, i); } return !defaultElementEquals(e1, e2, i); }
Returns {@code true} if this element is an ancestor (immediate or nested parent) of the specified name. @param name the name to check @return {@code true} if this name is an ancestor
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
394
[ "e1", "e2", "i" ]
true
5
8
spring-projects/spring-boot
79,428
javadoc
false
drainPendingOffsetCommitRequests
public NetworkClientDelegate.PollResult drainPendingOffsetCommitRequests() { if (pendingRequests.unsentOffsetCommits.isEmpty()) return EMPTY; List<NetworkClientDelegate.UnsentRequest> requests = pendingRequests.drainPendingCommits(); return new NetworkClientDelegate.PollResult(Long.MAX_VALUE, requests); }
Drains the inflight offsetCommits during shutdown because we want to make sure all pending commits are sent before closing.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
636
[]
true
2
6.56
apache/kafka
31,560
javadoc
false
handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi
private Map<String, KafkaFuture<TopicDescription>> handleDescribeTopicsByNamesWithDescribeTopicPartitionsApi( final Collection<String> topicNames, DescribeTopicsOptions options ) { final Map<String, KafkaFutureImpl<TopicDescription>> topicFutures = new HashMap<>(topicNames.size()); final ArrayList<String> topicNamesList = new ArrayList<>(); for (String topicName : topicNames) { if (topicNameIsUnrepresentable(topicName)) { KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidTopicException("The given topic name '" + topicName + "' cannot be represented in a request.")); topicFutures.put(topicName, future); } else if (!topicFutures.containsKey(topicName)) { topicFutures.put(topicName, new KafkaFutureImpl<>()); topicNamesList.add(topicName); } } if (topicNamesList.isEmpty()) { return new HashMap<>(topicFutures); } // First, we need to retrieve the node info. DescribeClusterResult clusterResult = describeCluster(new DescribeClusterOptions().timeoutMs(options.timeoutMs())); clusterResult.nodes().whenComplete( (nodes, exception) -> { if (exception != null) { completeAllExceptionally(topicFutures.values(), exception); return; } final long now = time.milliseconds(); Map<Integer, Node> nodeIdMap = nodes.stream().collect(Collectors.toMap(Node::id, node -> node)); runnable.call( generateDescribeTopicsCallWithDescribeTopicPartitionsApi(topicNamesList, topicFutures, nodeIdMap, options, now), now ); }); return new HashMap<>(topicFutures); }
Fail futures in the given Map which were retried due to exceeding quota. We propagate the initial error back to the caller if the request timed out.
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
2,314
[ "topicNames", "options" ]
true
5
6
apache/kafka
31,560
javadoc
false
tryStatSync
function tryStatSync(fd, isUserFd) { const stats = binding.fstat(fd, false, undefined, true /* shouldNotThrow */); if (stats === undefined && !isUserFd) { fs.closeSync(fd); } return stats; }
Asynchronously reads the entire contents of a file. @param {string | Buffer | URL | number} path @param {{ encoding?: string | null; flag?: string; signal?: AbortSignal; } | string} [options] @param {( err?: Error, data?: string | Buffer ) => any} callback @returns {void}
javascript
lib/fs.js
385
[ "fd", "isUserFd" ]
false
3
6.08
nodejs/node
114,839
jsdoc
false
unravel_index
def unravel_index(indices, shape, order="C"): """ unravel_index(indices, shape, order='C') Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters ---------- indices : array_like An integer array whose elements are indices into the flattened version of an array of dimensions ``shape``. Before version 1.6.0, this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling ``indices``. order : {'C', 'F'}, optional Determines whether the indices should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. Returns ------- unraveled_coords : tuple of ndarray Each array in the tuple has the same shape as the ``indices`` array. See Also -------- ravel_multi_index Examples -------- >>> import numpy as np >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1) """ return (indices,)
unravel_index(indices, shape, order='C') Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters ---------- indices : array_like An integer array whose elements are indices into the flattened version of an array of dimensions ``shape``. Before version 1.6.0, this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling ``indices``. order : {'C', 'F'}, optional Determines whether the indices should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. Returns ------- unraveled_coords : tuple of ndarray Each array in the tuple has the same shape as the ``indices`` array. See Also -------- ravel_multi_index Examples -------- >>> import numpy as np >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1)
python
numpy/_core/multiarray.py
1,041
[ "indices", "shape", "order" ]
false
1
6.32
numpy/numpy
31,054
numpy
false
lazyTypes
function lazyTypes() { if (_TYPES !== null) { return _TYPES; } return _TYPES = require('internal/util/types'); }
Lazily loads and returns the internal/util/types module. @returns {object}
javascript
lib/internal/modules/helpers.js
358
[]
false
2
6.72
nodejs/node
114,839
jsdoc
false
vdot
def vdot(a, b, /): r""" vdot(a, b, /) Return the dot product of two vectors. The `vdot` function handles complex numbers differently than `dot`: if the first argument is complex, it is replaced by its complex conjugate in the dot product calculation. `vdot` also handles multidimensional arrays differently than `dot`: it does not perform a matrix product, but flattens the arguments to 1-D arrays before taking a vector dot product. Consequently, when the arguments are 2-D arrays of the same shape, this function effectively returns their `Frobenius inner product <https://en.wikipedia.org/wiki/Frobenius_inner_product>`_ (also known as the *trace inner product* or the *standard inner product* on a vector space of matrices). Parameters ---------- a : array_like If `a` is complex the complex conjugate is taken before calculation of the dot product. b : array_like Second argument to the dot product. Returns ------- output : ndarray Dot product of `a` and `b`. Can be an int, float, or complex depending on the types of `a` and `b`. See Also -------- dot : Return the dot product without using the complex conjugate of the first argument. Examples -------- >>> import numpy as np >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) (70-8j) >>> np.vdot(b, a) (70+8j) Note that higher-dimensional arrays are flattened! >>> a = np.array([[1, 4], [5, 6]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.vdot(a, b) 30 >>> np.vdot(b, a) 30 >>> 1*4 + 4*1 + 5*2 + 6*2 30 """ # noqa: E501 return (a, b)
r""" vdot(a, b, /) Return the dot product of two vectors. The `vdot` function handles complex numbers differently than `dot`: if the first argument is complex, it is replaced by its complex conjugate in the dot product calculation. `vdot` also handles multidimensional arrays differently than `dot`: it does not perform a matrix product, but flattens the arguments to 1-D arrays before taking a vector dot product. Consequently, when the arguments are 2-D arrays of the same shape, this function effectively returns their `Frobenius inner product <https://en.wikipedia.org/wiki/Frobenius_inner_product>`_ (also known as the *trace inner product* or the *standard inner product* on a vector space of matrices). Parameters ---------- a : array_like If `a` is complex the complex conjugate is taken before calculation of the dot product. b : array_like Second argument to the dot product. Returns ------- output : ndarray Dot product of `a` and `b`. Can be an int, float, or complex depending on the types of `a` and `b`. See Also -------- dot : Return the dot product without using the complex conjugate of the first argument. Examples -------- >>> import numpy as np >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) (70-8j) >>> np.vdot(b, a) (70+8j) Note that higher-dimensional arrays are flattened! >>> a = np.array([[1, 4], [5, 6]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.vdot(a, b) 30 >>> np.vdot(b, a) 30 >>> 1*4 + 4*1 + 5*2 + 6*2 30
python
numpy/_core/multiarray.py
844
[ "a", "b" ]
false
1
6.32
numpy/numpy
31,054
numpy
false
createBeanInstance
protected BeanWrapper createBeanInstance(String beanName, RootBeanDefinition mbd, @Nullable Object @Nullable [] args) { // Make sure bean class is actually resolved at this point. Class<?> beanClass = resolveBeanClass(mbd, beanName); if (beanClass != null && !Modifier.isPublic(beanClass.getModifiers()) && !mbd.isNonPublicAccessAllowed()) { throw new BeanCreationException(mbd.getResourceDescription(), beanName, "Bean class isn't public, and non-public access not allowed: " + beanClass.getName()); } if (args == null) { Supplier<?> instanceSupplier = mbd.getInstanceSupplier(); if (instanceSupplier != null) { return obtainFromSupplier(instanceSupplier, beanName, mbd); } } if (mbd.getFactoryMethodName() != null) { return instantiateUsingFactoryMethod(beanName, mbd, args); } // Shortcut when re-creating the same bean... boolean resolved = false; boolean autowireNecessary = false; if (args == null) { synchronized (mbd.constructorArgumentLock) { if (mbd.resolvedConstructorOrFactoryMethod != null) { resolved = true; autowireNecessary = mbd.constructorArgumentsResolved; } } } if (resolved) { if (autowireNecessary) { return autowireConstructor(beanName, mbd, null, null); } else { return instantiateBean(beanName, mbd); } } // Candidate constructors for autowiring? Constructor<?>[] ctors = determineConstructorsFromBeanPostProcessors(beanClass, beanName); if (ctors != null || mbd.getResolvedAutowireMode() == AUTOWIRE_CONSTRUCTOR || mbd.hasConstructorArgumentValues() || !ObjectUtils.isEmpty(args)) { return autowireConstructor(beanName, mbd, ctors, args); } // Preferred constructors for default construction? ctors = mbd.getPreferredConstructors(); if (ctors != null) { return autowireConstructor(beanName, mbd, ctors, null); } // No special handling: simply use no-arg constructor. return instantiateBean(beanName, mbd); }
Create a new instance for the specified bean, using an appropriate instantiation strategy: factory method, constructor autowiring, or simple instantiation. @param beanName the name of the bean @param mbd the bean definition for the bean @param args explicit arguments to use for constructor or factory method invocation @return a BeanWrapper for the new instance @see #obtainFromSupplier @see #instantiateUsingFactoryMethod @see #autowireConstructor @see #instantiateBean
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
1,177
[ "beanName", "mbd", "args" ]
BeanWrapper
true
16
6.24
spring-projects/spring-framework
59,386
javadoc
false
find_class
def find_class(self, module: str, name: str) -> Any: """Resolve import for pickle. When the main runner uses a symbol `foo` from this file, it sees it as `worker.main.foo`. However the worker (called as a standalone file) sees the same symbol as `__main__.foo`. We have to help pickle understand that they refer to the same symbols. """ symbol_map = { # Only blessed interface Enums and dataclasses need to be mapped. "WorkerTimerArgs": WorkerTimerArgs, "WorkerOutput": WorkerOutput, "WorkerFailure": WorkerFailure, } if name in symbol_map: return symbol_map[name] return super().find_class(module, name)
Resolve import for pickle. When the main runner uses a symbol `foo` from this file, it sees it as `worker.main.foo`. However the worker (called as a standalone file) sees the same symbol as `__main__.foo`. We have to help pickle understand that they refer to the same symbols.
python
benchmarks/instruction_counts/worker/main.py
98
[ "self", "module", "name" ]
Any
true
2
6
pytorch/pytorch
96,034
unknown
false
isPipelineProcessorWithGeoIpProcessor
@SuppressWarnings("unchecked") private static boolean isPipelineProcessorWithGeoIpProcessor( Map<String, Object> processor, boolean downloadDatabaseOnPipelineCreation, Map<String, PipelineConfiguration> pipelineConfigById, Map<String, Boolean> pipelineHasGeoProcessorById ) { final Map<String, Object> processorConfig = (Map<String, Object>) processor.get("pipeline"); if (processorConfig != null) { String pipelineName = (String) processorConfig.get("name"); if (pipelineName != null) { if (pipelineHasGeoProcessorById.containsKey(pipelineName)) { if (pipelineHasGeoProcessorById.get(pipelineName) == null) { /* * If the value is null here, it indicates that this method has been called recursively with the same pipeline name. * This will cause a runtime error when the pipeline is executed, but we're avoiding changing existing behavior at * server startup time. Instead, we just bail out as quickly as possible. It is possible that this could lead to a * geo database not being downloaded for the pipeline, but it doesn't really matter since the pipeline was going to * fail anyway. */ pipelineHasGeoProcessorById.put(pipelineName, false); } } else { List<Map<String, Object>> childProcessors = null; PipelineConfiguration config = pipelineConfigById.get(pipelineName); if (config != null) { childProcessors = (List<Map<String, Object>>) config.getConfig().get(Pipeline.PROCESSORS_KEY); } // We initialize this to null so that we know it's in progress and can use it to avoid stack overflow errors: pipelineHasGeoProcessorById.put(pipelineName, null); pipelineHasGeoProcessorById.put( pipelineName, hasAtLeastOneGeoipProcessor( childProcessors, downloadDatabaseOnPipelineCreation, pipelineConfigById, pipelineHasGeoProcessorById ) ); } return pipelineHasGeoProcessorById.get(pipelineName); } } return false; }
Check if a processor is a pipeline processor containing at least a geoip processor. This method also updates pipelineHasGeoProcessorById with a result for any pipelines it looks at. @param processor Processor config. @param downloadDatabaseOnPipelineCreation Should the download_database_on_pipeline_creation of the geoip processor be true or false. @param pipelineConfigById A Map of pipeline id to PipelineConfiguration @param pipelineHasGeoProcessorById A Map of pipeline id to Boolean, indicating whether the pipeline references a geoip processor (true), does not reference a geoip processor (false), or we are currently trying to figure that out (null). @return true if a geoip processor is found in the processors of this processor if this processor is a pipeline processor.
java
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java
487
[ "processor", "downloadDatabaseOnPipelineCreation", "pipelineConfigById", "pipelineHasGeoProcessorById" ]
true
6
7.76
elastic/elasticsearch
75,680
javadoc
false
validate_rearrange_expressions
def validate_rearrange_expressions( left: ParsedExpression, right: ParsedExpression, axes_lengths: Mapping[str, int] ) -> None: """Perform expression validations that are specific to the `rearrange` operation. Args: left (ParsedExpression): left-hand side expression right (ParsedExpression): right-hand side expression axes_lengths (Mapping[str, int]): any additional length specifications for dimensions """ for length in axes_lengths.values(): if (length_type := type(length)) is not int: raise TypeError( f"rearrange axis lengths must be integers, got: {length_type}" ) if left.has_non_unitary_anonymous_axes or right.has_non_unitary_anonymous_axes: raise ValueError("rearrange only supports unnamed axes of size 1") difference = set.symmetric_difference(left.identifiers, right.identifiers) if len(difference) > 0: raise ValueError( f"Identifiers only on one side of rearrange expression (should be on both): {difference}" ) unmatched_axes = axes_lengths.keys() - left.identifiers if len(unmatched_axes) > 0: raise ValueError( f"Identifiers not found in rearrange expression: {unmatched_axes}" )
Perform expression validations that are specific to the `rearrange` operation. Args: left (ParsedExpression): left-hand side expression right (ParsedExpression): right-hand side expression axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
python
functorch/einops/_parsing.py
249
[ "left", "right", "axes_lengths" ]
None
true
7
6.08
pytorch/pytorch
96,034
google
false
hasUndrained
public boolean hasUndrained() { for (TopicInfo topicInfo : topicInfoMap.values()) { for (Deque<ProducerBatch> deque : topicInfo.batches.values()) { synchronized (deque) { if (!deque.isEmpty()) return true; } } } return false; }
Check whether there are any batches which haven't been drained
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
784
[]
true
2
6.56
apache/kafka
31,560
javadoc
false
translate
def translate(self, table): """ Map all characters in the string through the given mapping table. This method is equivalent to the standard :meth:`str.translate` method for strings. It maps each character in the string to a new character according to the translation table provided. Unmapped characters are left unchanged, while characters mapped to None are removed. Parameters ---------- table : dict Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- Series or Index A new Series or Index with translated strings. See Also -------- Series.str.replace : Replace occurrences of pattern/regex in the Series with some other string. Index.str.replace : Replace occurrences of pattern/regex in the Index with some other string. Examples -------- >>> ser = pd.Series(["El niño", "Françoise"]) >>> mytable = str.maketrans({"ñ": "n", "ç": "c"}) >>> ser.str.translate(mytable) 0 El nino 1 Francoise dtype: str """ result = self._data.array._str_translate(table) dtype = object if self._data.dtype == "object" else None return self._wrap_result(result, dtype=dtype)
Map all characters in the string through the given mapping table. This method is equivalent to the standard :meth:`str.translate` method for strings. It maps each character in the string to a new character according to the translation table provided. Unmapped characters are left unchanged, while characters mapped to None are removed. Parameters ---------- table : dict Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth:`str.maketrans` is a helper function for making translation tables. Returns ------- Series or Index A new Series or Index with translated strings. See Also -------- Series.str.replace : Replace occurrences of pattern/regex in the Series with some other string. Index.str.replace : Replace occurrences of pattern/regex in the Index with some other string. Examples -------- >>> ser = pd.Series(["El niño", "Françoise"]) >>> mytable = str.maketrans({"ñ": "n", "ç": "c"}) >>> ser.str.translate(mytable) 0 El nino 1 Francoise dtype: str
python
pandas/core/strings/accessor.py
2,577
[ "self", "table" ]
false
2
7.52
pandas-dev/pandas
47,362
numpy
false
getStartInstant
public Instant getStartInstant() { if (runningState == State.UNSTARTED) { throw new IllegalStateException("Stopwatch has not been started"); } return startInstant; }
Gets the Instant this StopWatch was started, between the current time and midnight, January 1, 1970 UTC. @return the Instant this StopWatch was started, between the current time and midnight, January 1, 1970 UTC. @throws IllegalStateException if this StopWatch has not been started. @since 3.16.0
java
src/main/java/org/apache/commons/lang3/time/StopWatch.java
477
[]
Instant
true
2
7.92
apache/commons-lang
2,896
javadoc
false
paramJavadocPattern
private Pattern paramJavadocPattern(String paramName) { String pattern = String.format("(?<=@param +%s).*?(?=([\r\n]+ *@)|$)", paramName); return Pattern.compile(pattern, Pattern.DOTALL); }
Return the {@link PrimitiveType} of the specified type or {@code null} if the type does not represent a valid wrapper type. @param typeMirror a type @return the primitive type or {@code null} if the type is not a wrapper type
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/TypeUtils.java
265
[ "paramName" ]
Pattern
true
1
6.64
spring-projects/spring-boot
79,428
javadoc
false
setBeanFactory
@Override public void setBeanFactory(BeanFactory beanFactory) throws BeansException { if (!(beanFactory instanceof ListableBeanFactory lbf)) { throw new FatalBeanException( "ServiceLocatorFactoryBean needs to run in a BeanFactory that is a ListableBeanFactory"); } this.beanFactory = lbf; }
Set mappings between service ids (passed into the service locator) and bean names (in the bean factory). Service ids that are not defined here will be treated as bean names as-is. <p>The empty string as service id key defines the mapping for {@code null} and empty string, and for factory methods without parameter. If not defined, a single matching bean will be retrieved from the bean factory. @param serviceMappings mappings between service ids and bean names, with service ids as keys as bean names as values
java
spring-beans/src/main/java/org/springframework/beans/factory/config/ServiceLocatorFactoryBean.java
247
[ "beanFactory" ]
void
true
2
6.88
spring-projects/spring-framework
59,386
javadoc
false
getMantissa
private static String getMantissa(final String str, final int stopPos) { final char firstChar = str.charAt(0); final boolean hasSign = isSign(firstChar); final int length = str.length(); if (length <= (hasSign ? 1 : 0) || length < stopPos) { throw new NumberFormatException(str + " is not a valid number."); } return hasSign ? str.substring(1, stopPos) : str.substring(0, stopPos); }
Utility method for {@link #createNumber(String)}. <p> Returns mantissa of the given number. </p> @param str the string representation of the number. @param stopPos the position of the exponent or decimal point. @return mantissa of the given number. @throws NumberFormatException if no mantissa can be retrieved.
java
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
498
[ "str", "stopPos" ]
String
true
5
7.92
apache/commons-lang
2,896
javadoc
false
doesnt_use_pandas_warnings
def doesnt_use_pandas_warnings(file_obj: IO[str]) -> Iterable[tuple[int, str]]: """ Checking that pandas-specific warnings are used for deprecations. Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of the warning. msg : str Explanation of the error. """ contents = file_obj.read() lines = contents.split("\n") tree = ast.parse(contents) for node in ast.walk(tree): if not isinstance(node, ast.Call): continue if isinstance(node.func, ast.Attribute) and isinstance( node.func.value, ast.Name ): # Check for `warnings.warn`. if node.func.value.id != "warnings" or node.func.attr != "warn": continue elif isinstance(node.func, ast.Name): # Check for just `warn` when using `from warnings import warn`. if node.func.id != "warn": continue if any( "# pdlint: ignore[warning_class]" in lines[k] for k in range(node.lineno - 1, node.end_lineno + 1) ): continue values = [arg.id for arg in node.args if isinstance(arg, ast.Name)] + [ kw.value.id for kw in node.keywords if kw.arg == "category" ] for value in values: matches = re.match(DEPRECATION_WARNINGS_PATTERN, value) if matches is not None: yield ( node.lineno, f"Don't use {matches[0]}, use a pandas-specific warning in " f"pd.errors instead. You can add " f"`# pdlint: ignore[warning_class]` to override.", )
Checking that pandas-specific warnings are used for deprecations. Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of the warning. msg : str Explanation of the error.
python
scripts/validate_unwanted_patterns.py
347
[ "file_obj" ]
Iterable[tuple[int, str]]
true
12
6.88
pandas-dev/pandas
47,362
numpy
false
invoke
@Nullable Object invoke() throws ThrowableWrapper;
Invoke the cache operation defined by this instance. Wraps any exception that is thrown during the invocation in a {@link ThrowableWrapper}. @return the result of the operation @throws ThrowableWrapper if an error occurred while invoking the operation
java
spring-context/src/main/java/org/springframework/cache/interceptor/CacheOperationInvoker.java
41
[]
Object
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
configureDefaultApiTimeoutMs
private int configureDefaultApiTimeoutMs(AdminClientConfig config) { int requestTimeoutMs = config.getInt(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG); int defaultApiTimeoutMs = config.getInt(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); if (defaultApiTimeoutMs < requestTimeoutMs) { if (config.originals().containsKey(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)) { throw new ConfigException("The specified value of " + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG + " must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + "."); } else { log.warn("Overriding the default value for {} ({}) with the explicitly configured request timeout {}", AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs, requestTimeoutMs); return requestTimeoutMs; } } return defaultApiTimeoutMs; }
If a default.api.timeout.ms has been explicitly specified, raise an error if it conflicts with request.timeout.ms. If no default.api.timeout.ms has been configured, then set its value as the max of the default and request.timeout.ms. Also we should probably log a warning. Otherwise, use the provided values for both configurations. @param config The configuration
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
649
[ "config" ]
true
3
6.72
apache/kafka
31,560
javadoc
false
getProviderName
function getProviderName(child: any): string[] { const providers = child?.providers || []; const names = providers.map((p: any) => p.name); return names || []; }
Gets the set of currently active Route configuration objects from the router state. This function synchronously reads the current router state without waiting for navigation events. @param router - The Angular Router instance @returns A Set containing all Route configuration objects that are currently active @example ```ts const activeRoutes = getActiveRouteConfigs(router); // activeRoutes is a Set<Route> containing all currently active route configurations ```
typescript
devtools/projects/ng-devtools-backend/src/lib/router-tree.ts
100
[ "child" ]
true
3
8.88
angular/angular
99,544
jsdoc
false
getReferencesForConfigName
private Deque<StandardConfigDataReference> getReferencesForConfigName(String name, ConfigDataLocation configDataLocation, String directory, @Nullable String profile) { Deque<StandardConfigDataReference> references = new ArrayDeque<>(); for (PropertySourceLoader propertySourceLoader : this.propertySourceLoaders) { for (String extension : propertySourceLoader.getFileExtensions()) { StandardConfigDataReference reference = new StandardConfigDataReference(configDataLocation, directory, directory + name, profile, extension, propertySourceLoader); if (!references.contains(reference)) { references.addFirst(reference); } } } return references; }
Create a new {@link StandardConfigDataLocationResolver} instance. @param logFactory the factory for loggers to use @param binder a binder backed by the initial {@link Environment} @param resourceLoader a {@link ResourceLoader} used to load resources
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataLocationResolver.java
200
[ "name", "configDataLocation", "directory", "profile" ]
true
2
6.08
spring-projects/spring-boot
79,428
javadoc
false
make_friedman2
def make_friedman2(n_samples=100, *, noise=0.0, random_state=None): """Generate the "Friedman #2" regression problem. This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, 4) The input samples. y : ndarray of shape (n_samples,) The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. Examples -------- >>> from sklearn.datasets import make_friedman2 >>> X, y = make_friedman2(random_state=42) >>> X.shape (100, 4) >>> y.shape (100,) >>> list(y[:3]) [np.float64(1229.4), np.float64(27.0), np.float64(65.6)] """ generator = check_random_state(random_state) X = generator.uniform(size=(n_samples, 4)) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = ( X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2 ) ** 0.5 + noise * generator.standard_normal(size=(n_samples)) return X, y
Generate the "Friedman #2" regression problem. This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Read more in the :ref:`User Guide <sample_generators>`. Parameters ---------- n_samples : int, default=100 The number of samples. noise : float, default=0.0 The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset noise. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : ndarray of shape (n_samples, 4) The input samples. y : ndarray of shape (n_samples,) The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. Examples -------- >>> from sklearn.datasets import make_friedman2 >>> X, y = make_friedman2(random_state=42) >>> X.shape (100, 4) >>> y.shape (100,) >>> list(y[:3]) [np.float64(1229.4), np.float64(27.0), np.float64(65.6)]
python
sklearn/datasets/_samples_generator.py
1,253
[ "n_samples", "noise", "random_state" ]
false
1
6.4
scikit-learn/scikit-learn
64,340
numpy
false
of
public static <E> Stream<E> of(final Iterator<E> iterator) { return iterator == null ? Stream.empty() : StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false); }
Creates a sequential stream on the given Iterator. @param <E> the type of elements in the Iterator. @param iterator the Iterator to stream or null. @return a new Stream or {@link Stream#empty()} if the Iterator is null. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/stream/Streams.java
711
[ "iterator" ]
true
2
8.16
apache/commons-lang
2,896
javadoc
false
preserve_node_ordering
def preserve_node_ordering( graph: fx.Graph, additional_deps_map: dict[fx.Node, OrderedSet[fx.Node]], verbose: bool = False, ) -> None: """ Preserve node ordering using control_deps HOP with subgraph. This function wraps operations with control_deps that: 1. Makes additional dependencies explicit (first argument) 2. Creates a subgraph internally to preserve the exact original operation 3. Preserves the original node names Args: graph: The FX graph to modify additional_deps_map: Mapping from dependent nodes to their dependencies verbose: If True, print debug information """ if not additional_deps_map: return # Track replacements so we can update dependencies replacements: dict[fx.Node, fx.Node] = {} # Process each node that needs additional dependencies for dependent_node, dep_nodes in additional_deps_map.items(): assert dependent_node.op == "call_function", dependent_node.op original_name = dependent_node.name original_args = dependent_node.args original_kwargs = dependent_node.kwargs original_meta = dependent_node.meta.copy() updated_dep_nodes = [replacements.get(dep, dep) for dep in dep_nodes] # Create a subgraph that preserves the exact original operation subgraph_module = _create_subgraph_for_node(graph, dependent_node) owning_mod = graph.owning_module assert owning_mod is not None subgraph_attr_name = get_subgraph_name(owning_mod, original_name) setattr(graph.owning_module, subgraph_attr_name, subgraph_module) # Create control_deps call with: # 1. Additional dependencies as first arg (explicit) # 2. Subgraph via get_attr (like b2b gemm pass) # 3. Original arguments (only fx.Node args and kwargs are passed) with graph.inserting_before(dependent_node): # Create get_attr node for the subgraph get_subgraph = graph.get_attr(subgraph_attr_name) # add additional args node_args = [a for a in original_args if isinstance(a, fx.Node)] for value in original_kwargs.values(): if isinstance(value, fx.Node): node_args.append(value) # Create with temporary name first ordered_node = graph.call_function( control_deps, args=( tuple(updated_dep_nodes), # additional_deps get_subgraph, # subgraph via get_attr (like b2b gemm) *node_args, # original node arguments (from both args and kwargs) ), kwargs={}, name=f"__temp_{original_name}", # Temporary name to avoid conflict ) # Copy metadata from original node ordered_node.meta = original_meta # this will be constrained on the target node in subgraph if it exists ordered_node.meta.pop("eager_input_vals", None) # Replace all uses of the original node with the ordered version dependent_node.replace_all_uses_with(ordered_node) # Remove the original node from the graph graph.erase_node(dependent_node) # Now rename the ordered node to the original name ordered_node.name = original_name # PRESERVE ORIGINAL NAME # Track the replacement for future dependencies replacements[dependent_node] = ordered_node
Preserve node ordering using control_deps HOP with subgraph. This function wraps operations with control_deps that: 1. Makes additional dependencies explicit (first argument) 2. Creates a subgraph internally to preserve the exact original operation 3. Preserves the original node names Args: graph: The FX graph to modify additional_deps_map: Mapping from dependent nodes to their dependencies verbose: If True, print debug information
python
torch/_inductor/fx_passes/control_dependencies.py
79
[ "graph", "additional_deps_map", "verbose" ]
None
true
5
6.32
pytorch/pytorch
96,034
google
false
reset
final void reset() { numBuckets = 0; cachedCountsSum = null; }
@return the position of the first bucket of this set of buckets within {@link #bucketCounts} and {@link #bucketIndices}.
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/FixedCapacityExponentialHistogram.java
270
[]
void
true
1
6.64
elastic/elasticsearch
75,680
javadoc
false
responseDataToLogString
private String responseDataToLogString(Set<TopicPartition> topicPartitions) { if (!log.isTraceEnabled()) { int implied = sessionPartitions.size() - topicPartitions.size(); if (implied > 0) { return String.format(" with %d response partition(s), %d implied partition(s)", topicPartitions.size(), implied); } else { return String.format(" with %d response partition(s)", topicPartitions.size()); } } StringBuilder bld = new StringBuilder(); bld.append(" with response=("). append(topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))). append(")"); String prefix = ", implied=("; String suffix = ""; for (TopicPartition partition : sessionPartitions.keySet()) { if (!topicPartitions.contains(partition)) { bld.append(prefix); bld.append(partition); prefix = ", "; suffix = ")"; } } bld.append(suffix); return bld.toString(); }
Create a string describing the partitions in a FetchResponse. @param topicPartitions The topicPartitions from the FetchResponse. @return The string to log.
java
clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
490
[ "topicPartitions" ]
String
true
4
7.6
apache/kafka
31,560
javadoc
false
visitCatchClause
function visitCatchClause(node: CatchClause): CatchClause { const ancestorFacts = enterSubtree(HierarchyFacts.BlockScopeExcludes, HierarchyFacts.BlockScopeIncludes); let updated: CatchClause; Debug.assert(!!node.variableDeclaration, "Catch clause variable should always be present when downleveling ES2015."); if (isBindingPattern(node.variableDeclaration.name)) { const temp = factory.createTempVariable(/*recordTempVariable*/ undefined); const newVariableDeclaration = factory.createVariableDeclaration(temp); setTextRange(newVariableDeclaration, node.variableDeclaration); const vars = flattenDestructuringBinding( node.variableDeclaration, visitor, context, FlattenLevel.All, temp, ); const list = factory.createVariableDeclarationList(vars); setTextRange(list, node.variableDeclaration); const destructure = factory.createVariableStatement(/*modifiers*/ undefined, list); updated = factory.updateCatchClause(node, newVariableDeclaration, addStatementToStartOfBlock(node.block, destructure)); } else { updated = visitEachChild(node, visitor, context); } exitSubtree(ancestorFacts, HierarchyFacts.None, HierarchyFacts.None); return updated; }
Transforms a MethodDeclaration of an ObjectLiteralExpression into an expression. @param node The ObjectLiteralExpression that contains the MethodDeclaration. @param method The MethodDeclaration node. @param receiver The receiver for the assignment.
typescript
src/compiler/transformers/es2015.ts
4,195
[ "node" ]
true
3
6.24
microsoft/TypeScript
107,154
jsdoc
false
initialize
@Override public void initialize(LoggingInitializationContext initializationContext, @Nullable String configLocation, @Nullable LogFile logFile) { LoggerContext loggerContext = getLoggerContext(); if (isAlreadyInitialized(loggerContext)) { return; } StatusConsoleListener listener = new StatusConsoleListener(Level.WARN); StatusLogger.getLogger().registerListener(listener); loggerContext.putObject(STATUS_LISTENER_KEY, listener); Environment environment = initializationContext.getEnvironment(); if (environment != null) { loggerContext.putObject(ENVIRONMENT_KEY, environment); Log4J2LoggingSystem.propertySource.setEnvironment(environment); PropertiesUtil.getProperties().addPropertySource(Log4J2LoggingSystem.propertySource); } loggerContext.getConfiguration().removeFilter(FILTER); super.initialize(initializationContext, configLocation, logFile); markAsInitialized(loggerContext); }
Return the configuration location. The result may be: <ul> <li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li> <li>A file path: if provided explicitly by the user</li> <li>A URI: if loaded from the classpath default or a custom location</li> </ul> @param configuration the source configuration @return the config location or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
237
[ "initializationContext", "configLocation", "logFile" ]
void
true
3
7.28
spring-projects/spring-boot
79,428
javadoc
false
_gotitem
def _gotitem(self, key, ndim: int, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ grouper = self._grouper if subset is None: subset = self.obj if key is not None: subset = subset[key] else: # reached via Apply.agg_dict_like with selection=None and ndim=1 assert subset.ndim == 1 if ndim == 1: assert subset.ndim == 1 grouped = get_groupby( subset, by=None, grouper=grouper, group_keys=self.group_keys ) return grouped
Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on
python
pandas/core/resample.py
502
[ "self", "key", "ndim", "subset" ]
true
5
6.88
pandas-dev/pandas
47,362
numpy
false
destinationBrokerId
default OptionalInt destinationBrokerId() { return OptionalInt.empty(); }
Get the target broker ID that a request is intended for or empty if the request can be sent to any broker. Note that if the destination broker ID is present in the {@link ApiRequestScope} returned by {@link AdminApiLookupStrategy#lookupScope(Object)}, then no lookup will be attempted. @return optional broker ID
java
clients/src/main/java/org/apache/kafka/clients/admin/internals/ApiRequestScope.java
42
[]
OptionalInt
true
1
6.48
apache/kafka
31,560
javadoc
false
describeLogDirs
DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options);
Query the information of all log directories on the given set of brokers <p> This operation is supported by brokers with version 1.0.0 or higher. @param brokers A list of brokers @param options The options to use when querying log dir info @return The DescribeLogDirsResult
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
594
[ "brokers", "options" ]
DescribeLogDirsResult
true
1
6.32
apache/kafka
31,560
javadoc
false
is_terminal_support_colors
def is_terminal_support_colors() -> bool: """Try to determine if the current terminal supports colors.""" if sys.platform == "win32": return False if not is_tty(): return False if "COLORTERM" in os.environ: return True term = os.environ.get("TERM", "dumb").lower() if term in ("xterm", "linux") or "color" in term: return True return False
Try to determine if the current terminal supports colors.
python
airflow-core/src/airflow/utils/platform.py
41
[]
bool
true
6
6.88
apache/airflow
43,597
unknown
false