function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
getAllInterfaces
private static void getAllInterfaces(Class<?> cls, final HashSet<Class<?>> interfacesFound) { while (cls != null) { final Class<?>[] interfaces = cls.getInterfaces(); for (final Class<?> i : interfaces) { if (interfacesFound.add(i)) { getAllInterfaces(i, interfacesFound); } } cls = cls.getSuperclass(); } }
Gets the interfaces for the specified class. @param cls the class to look up, may be {@code null}. @param interfacesFound the {@link Set} of interfaces for the class.
java
src/main/java/org/apache/commons/lang3/ClassUtils.java
386
[ "cls", "interfacesFound" ]
void
true
3
6.88
apache/commons-lang
2,896
javadoc
false
abbreviateMiddle
public static String abbreviateMiddle(final String str, final String middle, final int length) { if (isAnyEmpty(str, middle) || length >= str.length() || length < middle.length() + 2) { return str; } final int targetString = length - middle.length(); final int startOffset = targetString / 2 + targetString % 2; final int endOffset = str.length() - targetString / 2; return str.substring(0, startOffset) + middle + str.substring(endOffset); }
Abbreviates a String to the length passed, replacing the middle characters with the supplied replacement String. <p> This abbreviation only occurs if the following criteria is met: </p> <ul> <li>Neither the String for abbreviation nor the replacement String are null or empty</li> <li>The length to truncate to is less than the length of the supplied String</li> <li>The length to truncate to is greater than 0</li> <li>The abbreviated String will have enough room for the length supplied replacement String and the first and last characters of the supplied String for abbreviation</li> </ul> <p> Otherwise, the returned String will be the same as the supplied String for abbreviation. </p> <pre> StringUtils.abbreviateMiddle(null, null, 0) = null StringUtils.abbreviateMiddle("abc", null, 0) = "abc" StringUtils.abbreviateMiddle("abc", ".", 0) = "abc" StringUtils.abbreviateMiddle("abc", ".", 3) = "abc" StringUtils.abbreviateMiddle("abcdef", ".", 4) = "ab.f" </pre> @param str the String to abbreviate, may be null. @param middle the String to replace the middle characters with, may be null. @param length the length to abbreviate {@code str} to. @return the abbreviated String if the above criteria is met, or the original String supplied for abbreviation. @since 2.5
java
src/main/java/org/apache/commons/lang3/StringUtils.java
419
[ "str", "middle", "length" ]
String
true
4
7.92
apache/commons-lang
2,896
javadoc
false
newConnections
private NetworkConnections<N, E> newConnections() { return isDirected() ? allowsParallelEdges() ? DirectedMultiNetworkConnections.<N, E>of() : DirectedNetworkConnections.<N, E>of() : allowsParallelEdges() ? UndirectedMultiNetworkConnections.<N, E>of() : UndirectedNetworkConnections.<N, E>of(); }
Adds {@code node} to the graph and returns the associated {@link NetworkConnections}. @throws IllegalStateException if {@code node} is already present
java
android/guava/src/com/google/common/graph/StandardMutableNetwork.java
166
[]
true
4
6.08
google/guava
51,352
javadoc
false
masked_invalid
def masked_invalid(a, copy=True): """ Mask an array where invalid values occur (NaNs or infs). This function is a shortcut to ``masked_where``, with `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. Only applies to arrays with a dtype where NaNs or infs make sense (i.e. floating point types), but accepts any array_like object. See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a array([ 0., 1., nan, inf, 4.]) >>> ma.masked_invalid(a) masked_array(data=[0.0, 1.0, --, --, 4.0], mask=[False, False, True, True, False], fill_value=1e+20) """ a = np.array(a, copy=None, subok=True) res = masked_where(~(np.isfinite(a)), a, copy=copy) # masked_invalid previously never returned nomask as a mask and doing so # threw off matplotlib (gh-22842). So use shrink=False: if res._mask is nomask: res._mask = make_mask_none(res.shape, res.dtype) return res
Mask an array where invalid values occur (NaNs or infs). This function is a shortcut to ``masked_where``, with `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. Only applies to arrays with a dtype where NaNs or infs make sense (i.e. floating point types), but accepts any array_like object. See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5, dtype=np.float64) >>> a[2] = np.nan >>> a[3] = np.inf >>> a array([ 0., 1., nan, inf, 4.]) >>> ma.masked_invalid(a) masked_array(data=[0.0, 1.0, --, --, 4.0], mask=[False, False, True, True, False], fill_value=1e+20)
python
numpy/ma/core.py
2,389
[ "a", "copy" ]
false
2
7.84
numpy/numpy
31,054
unknown
false
getProxyImports
private String[] getProxyImports() { List<String> result = new ArrayList<>(3); result.add(AutoProxyRegistrar.class.getName()); result.add(ProxyCachingConfiguration.class.getName()); if (JSR_107_PRESENT && JCACHE_IMPL_PRESENT) { result.add(PROXY_JCACHE_CONFIGURATION_CLASS); } return StringUtils.toStringArray(result); }
Return the imports to use if the {@link AdviceMode} is set to {@link AdviceMode#PROXY}. <p>Take care of adding the necessary JSR-107 import if it is available.
java
spring-context/src/main/java/org/springframework/cache/annotation/CachingConfigurationSelector.java
81
[]
true
3
6.56
spring-projects/spring-framework
59,386
javadoc
false
get_versions
def get_versions() -> dict: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None, } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return { "version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None, }
Get version information or return default if unable to do so.
python
pandas/_version.py
643
[]
dict
true
3
7.04
pandas-dev/pandas
47,362
unknown
false
getLevelConfiguration
public LevelConfiguration getLevelConfiguration() { LevelConfiguration result = getLevelConfiguration(ConfigurationScope.INHERITED); Assert.state(result != null, "Inherited level configuration must not be null"); return result; }
Return the level configuration, considering inherited loggers. @return the level configuration @since 2.7.13
java
core/spring-boot/src/main/java/org/springframework/boot/logging/LoggerConfiguration.java
103
[]
LevelConfiguration
true
1
6.4
spring-projects/spring-boot
79,428
javadoc
false
toString
@Override public String toString() { return "FastDateParser[" + pattern + ", " + locale + ", " + timeZone.getID() + "]"; }
Gets a string version of this formatter. @return a debugging string
java
src/main/java/org/apache/commons/lang3/time/FastDateParser.java
1,114
[]
String
true
1
6.96
apache/commons-lang
2,896
javadoc
false
resolveSecureSetting
private char[] resolveSecureSetting(String key, char[] defaultValue) { try { char[] setting = getSecureSetting(expandSettingKey(key)); if (setting == null || setting.length == 0) { return defaultValue; } return setting; } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new SslConfigException("cannot retrieve secure setting [" + settingPrefix + key + "]", e); } }
Resolve all necessary configuration settings, and load a {@link SslConfiguration}. @param basePath The base path to use for any settings that represent file paths. Typically points to the Elasticsearch configuration directory. @throws SslConfigException For any problems with the configuration, or with loading the required SSL classes.
java
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java
462
[ "key", "defaultValue" ]
true
5
6.24
elastic/elasticsearch
75,680
javadoc
false
create_ingestion
def create_ingestion( self, data_set_id: str, ingestion_id: str, ingestion_type: str, wait_for_completion: bool = True, check_interval: int = 30, aws_account_id: str | None = None, ) -> dict: """ Create and start a new SPICE ingestion for a dataset; refresh the SPICE datasets. .. seealso:: - :external+boto3:py:meth:`QuickSight.Client.create_ingestion` :param data_set_id: ID of the dataset used in the ingestion. :param ingestion_id: ID for the ingestion. :param ingestion_type: Type of ingestion: "INCREMENTAL_REFRESH"|"FULL_REFRESH" :param wait_for_completion: if the program should keep running until job finishes :param check_interval: the time interval in seconds which the operator will check the status of QuickSight Ingestion :param aws_account_id: An AWS Account ID, if set to ``None`` then use associated AWS Account ID. :return: Returns descriptive information about the created data ingestion having Ingestion ARN, HTTP status, ingestion ID and ingestion status. """ aws_account_id = aws_account_id or self.account_id self.log.info("Creating QuickSight Ingestion for data set id %s.", data_set_id) try: create_ingestion_response = self.conn.create_ingestion( DataSetId=data_set_id, IngestionId=ingestion_id, IngestionType=ingestion_type, AwsAccountId=aws_account_id, ) if wait_for_completion: self.wait_for_state( aws_account_id=aws_account_id, data_set_id=data_set_id, ingestion_id=ingestion_id, target_state={"COMPLETED"}, check_interval=check_interval, ) return create_ingestion_response except Exception as general_error: self.log.error("Failed to run Amazon QuickSight create_ingestion API, error: %s", general_error) raise
Create and start a new SPICE ingestion for a dataset; refresh the SPICE datasets. .. seealso:: - :external+boto3:py:meth:`QuickSight.Client.create_ingestion` :param data_set_id: ID of the dataset used in the ingestion. :param ingestion_id: ID for the ingestion. :param ingestion_type: Type of ingestion: "INCREMENTAL_REFRESH"|"FULL_REFRESH" :param wait_for_completion: if the program should keep running until job finishes :param check_interval: the time interval in seconds which the operator will check the status of QuickSight Ingestion :param aws_account_id: An AWS Account ID, if set to ``None`` then use associated AWS Account ID. :return: Returns descriptive information about the created data ingestion having Ingestion ARN, HTTP status, ingestion ID and ingestion status.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/quicksight.py
47
[ "self", "data_set_id", "ingestion_id", "ingestion_type", "wait_for_completion", "check_interval", "aws_account_id" ]
dict
true
3
7.44
apache/airflow
43,597
sphinx
false
checkStrictModeDeleteExpression
function checkStrictModeDeleteExpression(node: DeleteExpression) { // Grammar checking if (inStrictMode && node.expression.kind === SyntaxKind.Identifier) { // When a delete operator occurs within strict mode code, a SyntaxError is thrown if its // UnaryExpression is a direct reference to a variable, function argument, or function name const span = getErrorSpanForNode(file, node.expression); file.bindDiagnostics.push(createFileDiagnostic(file, span.start, span.length, Diagnostics.delete_cannot_be_called_on_an_identifier_in_strict_mode)); } }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
2,633
[ "node" ]
false
3
6.08
microsoft/TypeScript
107,154
jsdoc
false
escapeJson
public static final String escapeJson(final String input) { return ESCAPE_JSON.translate(input); }
Escapes the characters in a {@link String} using Json String rules. <p>Escapes any values it finds into their Json String form. Deals correctly with quotes and control-chars (tab, backslash, cr, ff, etc.) </p> <p>So a tab becomes the characters {@code '\\'} and {@code 't'}.</p> <p>The only difference between Java strings and Json strings is that in Json, forward-slash (/) is escaped.</p> <p>See https://www.ietf.org/rfc/rfc4627.txt for further details.</p> <p>Example:</p> <pre> input string: He didn't say, "Stop!" output string: He didn't say, \"Stop!\" </pre> @param input String to escape values in, may be null @return String with escaped values, {@code null} if null string input @since 3.2
java
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
550
[ "input" ]
String
true
1
6.96
apache/commons-lang
2,896
javadoc
false
empirical_covariance
def empirical_covariance(X, *, assume_centered=False): """Compute the Maximum likelihood covariance estimator. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If `True`, data will not be centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If `False`, data will be centered before computation. Returns ------- covariance : ndarray of shape (n_features, n_features) Empirical covariance (Maximum Likelihood Estimator). Examples -------- >>> from sklearn.covariance import empirical_covariance >>> X = [[1,1,1],[1,1,1],[1,1,1], ... [0,0,0],[0,0,0],[0,0,0]] >>> empirical_covariance(X) array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]]) """ X = check_array(X, ensure_2d=False, ensure_all_finite=False) if X.ndim == 1: X = np.reshape(X, (1, -1)) if X.shape[0] == 1: warnings.warn( "Only one sample available. You may want to reshape your data array" ) if assume_centered: covariance = np.dot(X.T, X) / X.shape[0] else: covariance = np.cov(X.T, bias=1) if covariance.ndim == 0: covariance = np.array([[covariance]]) return covariance
Compute the Maximum likelihood covariance estimator. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data from which to compute the covariance estimate. assume_centered : bool, default=False If `True`, data will not be centered before computation. Useful when working with data whose mean is almost, but not exactly zero. If `False`, data will be centered before computation. Returns ------- covariance : ndarray of shape (n_features, n_features) Empirical covariance (Maximum Likelihood Estimator). Examples -------- >>> from sklearn.covariance import empirical_covariance >>> X = [[1,1,1],[1,1,1],[1,1,1], ... [0,0,0],[0,0,0],[0,0,0]] >>> empirical_covariance(X) array([[0.25, 0.25, 0.25], [0.25, 0.25, 0.25], [0.25, 0.25, 0.25]])
python
sklearn/covariance/_empirical_covariance.py
65
[ "X", "assume_centered" ]
false
6
7.04
scikit-learn/scikit-learn
64,340
numpy
false
listClientMetricsResources
@SuppressWarnings({"deprecation", "removal"}) @Override public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { final long now = time.milliseconds(); final KafkaFutureImpl<Collection<ClientMetricsResourceListing>> future = new KafkaFutureImpl<>(); runnable.call(new Call("listClientMetricsResources", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override ListConfigResourcesRequest.Builder createRequest(int timeoutMs) { return new ListConfigResourcesRequest.Builder( new ListConfigResourcesRequestData() .setResourceTypes(List.of(ConfigResource.Type.CLIENT_METRICS.id())) ); } @Override void handleResponse(AbstractResponse abstractResponse) { ListConfigResourcesResponse response = (ListConfigResourcesResponse) abstractResponse; if (response.error().isFailure()) { future.completeExceptionally(response.error().exception()); } else { future.complete(response .data() .configResources() .stream() .filter(entry -> entry.resourceType() == ConfigResource.Type.CLIENT_METRICS.id()) .map(entry -> new ClientMetricsResourceListing(entry.resourceName())) .collect(Collectors.toList())); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); return new ListClientMetricsResourcesResult(future); }
Forcefully terminates an ongoing transaction for a given transactional ID. <p> This API is intended for well-formed but long-running transactions that are known to the transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows, where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed. </p> @param transactionalId The transactional ID whose active transaction should be forcefully terminated. @return a {@link TerminateTransactionResult} that can be used to await the operation result.
java
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
4,921
[ "options" ]
ListClientMetricsResourcesResult
true
2
7.44
apache/kafka
31,560
javadoc
false
_get_store_line
def _get_store_line( self, value: Union[str, CppCSEVariable], var: str, index: sympy.Expr, dtype: torch.dtype, accu_store: bool = False, ): """ Get a store line buffer that stores `value` into `var` at `index` of `dtype`. It handles both contiguous and non-contiguous store cases. :param value: Vectorized type templaterized on `dtype`. :param var: buffer to store into. :index: index into the `var`. """ # when value's type is str (e.g., welford reduction), caller should make sure # it is a vector assert isinstance(value, str) or ( isinstance(value, CppCSEVariable) and value.is_vec ), value tiling_var = self.itervars[self.tiling_idx] var_expr = f"{var} + {cexpr_index(index)}" stride = self._try_get_const_stride(index, tiling_var) code = IndentedBuffer() if stride == 1: if accu_store: load = ( f"{self._get_vec_type(dtype)}::loadu({var_expr})" if dtype == torch.float and self.tail_size is None else f"{self._get_vec_type(dtype)}::loadu({var_expr}, {cexpr_index(self.num_elems)})" ) value = f"({value} + {load})" if dtype == torch.float and self.tail_size is None: code.writeline(f"{value}.store({var_expr});") else: code.writeline( f"{value}.store({var_expr}, {cexpr_index(self.num_elems)});" ) else: self._load_or_store_non_contiguous( var, index, dtype, buffer=code, store_value=value, accu_store=accu_store ) return code
Get a store line buffer that stores `value` into `var` at `index` of `dtype`. It handles both contiguous and non-contiguous store cases. :param value: Vectorized type templaterized on `dtype`. :param var: buffer to store into. :index: index into the `var`.
python
torch/_inductor/codegen/cpp.py
2,906
[ "self", "value", "var", "index", "dtype", "accu_store" ]
true
11
7.2
pytorch/pytorch
96,034
sphinx
false
add_backward_reload_stream_ops
def add_backward_reload_stream_ops(graph: fx.Graph) -> None: """ Add stream operations for backward pass GPU reloading. Pattern: fork → wait_stream → device_put → record_event → join → wait_event This ensures that: 1. Reloading doesn't start prematurely (fork → wait_stream) 2. Reloading happens on a separate stream (device_put) 3. First use waits for reload completion (record_event → join → wait_event) NOTE: The pattern consists of two logical groups: - First group (fork → wait_stream → device_put → record_event → join): Performs asynchronous data transfer on a separate stream - Second group (wait_event): Data transfer completion check when the data is actually needed For prefetch optimization, the first group can be moved earlier in the graph to overlap computation with data transfer, while the wait_event must remain at its current position to prevent blocking computation unnecessarily. Args: graph: The backward graph to modify """ # Find all GPU reload nodes reload_nodes: list[fx.Node] = [ node for node in graph.nodes if GPU_RELOAD_PREFIX in node.name and node.op == "call_function" ] if not reload_nodes: return # Get default stream id and offload stream id current_stream_id: int = get_current_stream( reload_nodes[0].args[0].meta["original_device"] # type: ignore[assignment] ) reload_stream_id: int = new_stream() for reload_node in reload_nodes: event_id: int = new_event() with graph.inserting_before(reload_node): # Fork to reload stream graph.call_function( torch.ops.streams.fork.default, args=(current_stream_id, reload_stream_id), name=f"stream_in_{reload_node.name}", ) # Wait for default stream to prevent premature reloading graph.call_function( torch.ops.streams.wait_stream.default, args=(reload_stream_id, current_stream_id), ) with graph.inserting_after(reload_node): # Record event on reload stream after device_put record_event_node = graph.call_function( torch.ops.streams.record_event.default, args=(event_id, reload_stream_id), ) with graph.inserting_after(record_event_node): # Join back to default stream join_node = graph.call_function( torch.ops.streams.join.default, args=(reload_stream_id, current_stream_id), name=f"stream_out_{reload_node.name}", ) with graph.inserting_after(join_node): # Wait for the event on default stream graph.call_function( torch.ops.streams.wait_event.default, args=(event_id, current_stream_id), )
Add stream operations for backward pass GPU reloading. Pattern: fork → wait_stream → device_put → record_event → join → wait_event This ensures that: 1. Reloading doesn't start prematurely (fork → wait_stream) 2. Reloading happens on a separate stream (device_put) 3. First use waits for reload completion (record_event → join → wait_event) NOTE: The pattern consists of two logical groups: - First group (fork → wait_stream → device_put → record_event → join): Performs asynchronous data transfer on a separate stream - Second group (wait_event): Data transfer completion check when the data is actually needed For prefetch optimization, the first group can be moved earlier in the graph to overlap computation with data transfer, while the wait_event must remain at its current position to prevent blocking computation unnecessarily. Args: graph: The backward graph to modify
python
torch/_functorch/_activation_offloading/activation_offloading.py
434
[ "graph" ]
None
true
4
6.8
pytorch/pytorch
96,034
google
false
put
@CanIgnoreReturnValue @Override public boolean put(@ParametricNullness K key, @ParametricNullness V value) { return super.put(key, value); }
Stores a key-value pair in the multimap. @param key key to store in the multimap @param value value to store in the multimap @return {@code true} always
java
android/guava/src/com/google/common/collect/AbstractListMultimap.java
118
[ "key", "value" ]
true
1
6.56
google/guava
51,352
javadoc
false
run
protected @Nullable WebApplicationContext run(SpringApplication application) { return (WebApplicationContext) application.run(); }
Called to run a fully configured {@link SpringApplication}. @param application the application to run @return the {@link WebApplicationContext}
java
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/support/SpringBootServletInitializer.java
204
[ "application" ]
WebApplicationContext
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
chunk
function chunk(array, size, guard) { if ((guard ? isIterateeCall(array, size, guard) : size === undefined)) { size = 1; } else { size = nativeMax(toInteger(size), 0); } var length = array == null ? 0 : array.length; if (!length || size < 1) { return []; } var index = 0, resIndex = 0, result = Array(nativeCeil(length / size)); while (index < length) { result[resIndex++] = baseSlice(array, index, (index += size)); } return result; }
Creates an array of elements split into groups the length of `size`. If `array` can't be split evenly, the final chunk will be the remaining elements. @static @memberOf _ @since 3.0.0 @category Array @param {Array} array The array to process. @param {number} [size=1] The length of each chunk @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. @returns {Array} Returns the new array of chunks. @example _.chunk(['a', 'b', 'c', 'd'], 2); // => [['a', 'b'], ['c', 'd']] _.chunk(['a', 'b', 'c', 'd'], 3); // => [['a', 'b', 'c'], ['d']]
javascript
lodash.js
6,942
[ "array", "size", "guard" ]
false
8
7.68
lodash/lodash
61,490
jsdoc
false
isWarnEnabled
@Override public boolean isWarnEnabled() { synchronized (this.lines) { return (this.destination == null) || this.destination.isWarnEnabled(); } }
Create a new {@link DeferredLog} instance managed by a {@link DeferredLogFactory}. @param destination the switch-over destination @param lines the lines backing all related deferred logs @since 2.4.0
java
core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLog.java
86
[]
true
2
6.4
spring-projects/spring-boot
79,428
javadoc
false
found
public ItemsBuilder found(String singular, String plural) { return new ItemsBuilder(this, "found", singular, plural); }
Indicate that one or more results were found. For example {@code found("bean", "beans").items("x", "y")} results in the message "found beans x, y". @param singular the article found in singular form @param plural the article found in plural form @return an {@link ItemsBuilder}
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
238
[ "singular", "plural" ]
ItemsBuilder
true
1
6.8
spring-projects/spring-boot
79,428
javadoc
false
updateFetchPositions
public CompletableFuture<Void> updateFetchPositions(long deadlineMs) { CompletableFuture<Void> result = new CompletableFuture<>(); try { if (maybeCompleteWithPreviousException(result)) { return result; } validatePositionsIfNeeded(); if (subscriptionState.hasAllFetchPositions()) { // All positions are already available result.complete(null); return result; } // Some positions are missing, so trigger requests to fetch offsets and update them. updatePositionsWithOffsets(deadlineMs).whenComplete((__, error) -> { if (error != null) { result.completeExceptionally(error); } else { result.complete(null); } }); } catch (Exception e) { result.completeExceptionally(maybeWrapAsKafkaException(e)); } return result; }
Update fetch positions for assigned partitions that do not have a position. This will: <ul> <li>check if all assigned partitions already have fetch positions and return right away if that's the case</li> <li>trigger an async request to validate positions (detect log truncation)</li> <li>fetch committed offsets if enabled, and use the response to update the positions</li> <li>fetch partition offsets for partitions that may still require a position, and use the response to update the positions</li> </ul> @param deadlineMs Time in milliseconds when the triggering application event expires. Any error received after this will be saved, and used to complete the result exceptionally on the next call to this function. @return Future that will complete with a boolean indicating if all assigned partitions have positions (based on {@link SubscriptionState#hasAllFetchPositions()}). It will complete immediately, with true, if all positions are already available. If some positions are missing, the future will complete once the offsets are retrieved and positions are updated.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
235
[ "deadlineMs" ]
true
5
7.92
apache/kafka
31,560
javadoc
false
dce_hop_extra_outputs
def dce_hop_extra_outputs(gm: torch.fx.GraphModule) -> bool: """ Remove unused extra outputs from HOP calls recursively. Processes graphs top-down: first DCE the current graph's HOP outputs, then recursively process nested subgraphs. This ensures that when we process a nested subgraph, the parent has already removed unused getitems, so the nested subgraph sees the correct usage information. Args: gm: The GraphModule to optimize Returns: True if any modifications were made, False otherwise """ modified = False # Group HOP nodes by subgraph name # Multiple invocations may share the same subgraph, so we need to check # which indices are used across ALL invocations before removing any subgraph_to_nodes: dict[str, list[torch.fx.Node]] = collections.defaultdict(list) for node in gm.graph.nodes: if node.op == "call_function" and node.target in _HOPS_WITH_EXTRA_OUTPUTS: subgraph_attr = node.args[0] if ( isinstance(subgraph_attr, torch.fx.Node) and subgraph_attr.op == "get_attr" ): subgraph_name = subgraph_attr.target assert isinstance(subgraph_name, str) subgraph_to_nodes[subgraph_name].append(node) # STEP 1: DCE this graph's HOP outputs first (top-down) for subgraph_name, hop_nodes in subgraph_to_nodes.items(): if _dce_subgraph(gm, subgraph_name, hop_nodes): modified = True if modified: gm.graph.lint() gm.recompile() # STEP 2: Recursively process nested subgraphs # After we've removed unused getitems from this graph, nested subgraphs # will see the correct usage information for subgraph_name in subgraph_to_nodes: subgraph = getattr(gm, subgraph_name) if isinstance(subgraph, torch.fx.GraphModule): if dce_hop_extra_outputs(subgraph): modified = True return modified
Remove unused extra outputs from HOP calls recursively. Processes graphs top-down: first DCE the current graph's HOP outputs, then recursively process nested subgraphs. This ensures that when we process a nested subgraph, the parent has already removed unused getitems, so the nested subgraph sees the correct usage information. Args: gm: The GraphModule to optimize Returns: True if any modifications were made, False otherwise
python
torch/_dynamo/dce_extra_outputs.py
35
[ "gm" ]
bool
true
12
8.08
pytorch/pytorch
96,034
google
false
standardFirstEntry
protected @Nullable Entry<E> standardFirstEntry() { Iterator<Entry<E>> entryIterator = entrySet().iterator(); if (!entryIterator.hasNext()) { return null; } Entry<E> entry = entryIterator.next(); return Multisets.immutableEntry(entry.getElement(), entry.getCount()); }
A sensible definition of {@link #firstEntry()} in terms of {@code entrySet().iterator()}. <p>If you override {@link #entrySet()}, you may wish to override {@link #firstEntry()} to forward to this implementation.
java
android/guava/src/com/google/common/collect/ForwardingSortedMultiset.java
122
[]
true
2
6.24
google/guava
51,352
javadoc
false
findSourceMap
function findSourceMap(sourceURL) { if (typeof sourceURL !== 'string') { return undefined; } // No source maps for builtin modules. if (sourceURL.startsWith('node:')) { return undefined; } if (!getSourceMapsSupport().nodeModules && isUnderNodeModules(sourceURL)) { return undefined; } SourceMap ??= require('internal/source_map/source_map').SourceMap; try { if (RegExpPrototypeExec(kLeadingProtocol, sourceURL) === null) { // If the sourceURL is an invalid path, this will throw an error. sourceURL = pathToFileURL(sourceURL).href; } const entry = getModuleSourceMapCache().get(sourceURL) ?? generatedSourceMapCache.get(sourceURL); if (entry?.data == null) { return undefined; } let sourceMap = entry.sourceMap; if (sourceMap === undefined) { sourceMap = new SourceMap(entry.data, { lineLengths: entry.lineLengths }); entry.sourceMap = sourceMap; } return sourceMap; } catch (err) { debug(err); return undefined; } }
Find a source map for a given actual source URL or path. This function may be invoked from user code or test runner, this must not throw any exceptions. @param {string} sourceURL - actual source URL or path @returns {import('internal/source_map/source_map').SourceMap | undefined} a source map or undefined if not found
javascript
lib/internal/source_map/source_map_cache.js
369
[ "sourceURL" ]
false
9
6.24
nodejs/node
114,839
jsdoc
false
endLoopBlock
function endLoopBlock(): void { Debug.assert(peekBlockKind() === CodeBlockKind.Loop); const block = endBlock() as SwitchBlock; const breakLabel = block.breakLabel; if (!block.isScript) { markLabel(breakLabel); } }
Ends a code block that supports `break` or `continue` statements that are defined in generated code or in the source tree.
typescript
src/compiler/transformers/generators.ts
2,340
[]
true
2
7.04
microsoft/TypeScript
107,154
jsdoc
false
newLinkedHashMap
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call public static <K extends @Nullable Object, V extends @Nullable Object> LinkedHashMap<K, V> newLinkedHashMap() { return new LinkedHashMap<>(); }
Creates a <i>mutable</i>, empty, insertion-ordered {@code LinkedHashMap} instance. <p><b>Note:</b> if mutability is not required, use {@link ImmutableMap#of()} instead. <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead, use the {@code LinkedHashMap} constructor directly, taking advantage of <a href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond" syntax</a>. @return a new, empty {@code LinkedHashMap}
java
android/guava/src/com/google/common/collect/Maps.java
293
[]
true
1
6.08
google/guava
51,352
javadoc
false
validate_inclusive
def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: """ Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. Parameters ---------- inclusive : {"both", "neither", "left", "right"} Returns ------- left_right_inclusive : tuple[bool, bool] Raises ------ ValueError : if argument is not among valid values """ left_right_inclusive: tuple[bool, bool] | None = None if isinstance(inclusive, str): left_right_inclusive = { "both": (True, True), "left": (True, False), "right": (False, True), "neither": (False, False), }.get(inclusive) if left_right_inclusive is None: raise ValueError( "Inclusive has to be either 'both', 'neither', 'left' or 'right'" ) return left_right_inclusive
Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. Parameters ---------- inclusive : {"both", "neither", "left", "right"} Returns ------- left_right_inclusive : tuple[bool, bool] Raises ------ ValueError : if argument is not among valid values
python
pandas/util/_validators.py
424
[ "inclusive" ]
tuple[bool, bool]
true
3
6.08
pandas-dev/pandas
47,362
numpy
false
next
public char next(char c) throws JSONException { char result = next(); if (result != c) { throw syntaxError("Expected " + c + " but was " + result); } return result; }
Returns the current position and the entire input string. @return the current position and the entire input string.
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
485
[ "c" ]
true
2
6.88
spring-projects/spring-boot
79,428
javadoc
false
toString
@Override public String toString() { return "(" + "groupId='" + groupId + '\'' + ", isSimpleConsumerGroup=" + isSimpleConsumerGroup + ", groupState=" + groupState + ", type=" + type + ')'; }
The type of the consumer group. @return An Optional containing the type, if available.
java
clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java
155
[]
String
true
1
6.88
apache/kafka
31,560
javadoc
false
containsBeanDefinition
protected abstract boolean containsBeanDefinition(String beanName);
Check if this bean factory contains a bean definition with the given name. Does not consider any hierarchy this factory may participate in. Invoked by {@code containsBean} when no cached singleton instance is found. <p>Depending on the nature of the concrete bean factory implementation, this operation might be expensive (for example, because of directory lookups in external registries). However, for listable bean factories, this usually just amounts to a local hash lookup: The operation is therefore part of the public interface there. The same implementation can serve for both this template method and the public interface method in that case. @param beanName the name of the bean to look for @return if this bean factory contains a bean definition with the given name @see #containsBean @see org.springframework.beans.factory.ListableBeanFactory#containsBeanDefinition
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
1,963
[ "beanName" ]
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
patch_environ
def patch_environ(new_env_variables: dict[str, str]) -> Generator[None, None, None]: """ Set environment variables in context. After leaving the context, it restores its original state. :param new_env_variables: Environment variables to set """ current_env_state = {key: os.environ.get(key) for key in new_env_variables} os.environ.update(new_env_variables) try: yield finally: for key, old_value in current_env_state.items(): if old_value is None: if key in os.environ: del os.environ[key] else: os.environ[key] = old_value
Set environment variables in context. After leaving the context, it restores its original state. :param new_env_variables: Environment variables to set
python
airflow-core/src/airflow/utils/process_utils.py
329
[ "new_env_variables" ]
Generator[None, None, None]
true
5
6.4
apache/airflow
43,597
sphinx
false
bytesToInetAddress
private static InetAddress bytesToInetAddress(byte[] addr, @Nullable String scope) { try { InetAddress address = InetAddress.getByAddress(addr); if (scope == null) { return address; } checkArgument( address instanceof Inet6Address, "Unexpected state, scope should only appear for ipv6"); Inet6Address v6Address = (Inet6Address) address; int interfaceIndex = tryParseDecimal(scope, 0, scope.length()); if (interfaceIndex != -1) { return Inet6Address.getByAddress( v6Address.getHostAddress(), v6Address.getAddress(), interfaceIndex); } try { NetworkInterface asInterface = NetworkInterface.getByName(scope); if (asInterface == null) { throw formatIllegalArgumentException("No such interface: '%s'", scope); } return Inet6Address.getByAddress( v6Address.getHostAddress(), v6Address.getAddress(), asInterface); } catch (SocketException | UnknownHostException e) { throw new IllegalArgumentException("No such interface: " + scope, e); } } catch (UnknownHostException e) { throw new AssertionError(e); } }
Convert a byte array into an InetAddress. <p>{@link InetAddress#getByAddress} is documented as throwing a checked exception "if IP address is of illegal length." We replace it with an unchecked exception, for use by callers who already know that addr is an array of length 4 or 16. @param addr the raw 4-byte or 16-byte IP address in big-endian order @return an InetAddress object created from the raw IP address
java
android/guava/src/com/google/common/net/InetAddresses.java
419
[ "addr", "scope" ]
InetAddress
true
6
7.92
google/guava
51,352
javadoc
false
fileTreeChildren
private static Iterable<Path> fileTreeChildren(Path dir) { if (Files.isDirectory(dir, NOFOLLOW_LINKS)) { try { return listFiles(dir); } catch (IOException e) { // the exception thrown when iterating a DirectoryStream if an I/O exception occurs throw new DirectoryIteratorException(e); } } return ImmutableList.of(); }
Returns a {@link Traverser} instance for the file and directory tree. The returned traverser starts from a {@link Path} and will return all files and directories it encounters. <p>The returned traverser attempts to avoid following symbolic links to directories. However, the traverser cannot guarantee that it will not follow symbolic links to directories as it is possible for a directory to be replaced with a symbolic link between checking if the file is a directory and actually reading the contents of that directory. <p>If the {@link Path} passed to one of the traversal methods does not exist or is not a directory, no exception will be thrown and the returned {@link Iterable} will contain a single element: that path. <p>{@link DirectoryIteratorException} may be thrown when iterating {@link Iterable} instances created by this traverser if an {@link IOException} is thrown by a call to {@link #listFiles(Path)}. <p>Example: {@code MoreFiles.fileTraverser().depthFirstPreOrder(Paths.get("/"))} may return the following paths: {@code ["/", "/etc", "/etc/config.txt", "/etc/fonts", "/home", "/home/alice", ...]} @since 23.5
java
android/guava/src/com/google/common/io/MoreFiles.java
301
[ "dir" ]
true
3
6.88
google/guava
51,352
javadoc
false
command_line_usage
def command_line_usage() -> None: """Entry point for the compiler bisector command-line interface.""" if len(sys.argv) < 2: print(HELP_TEXT) sys.exit(1) bisection_manager = CompilerBisector() command = sys.argv[1] if command == "end": bisection_manager.delete_bisect_status() sys.exit(0) if command == "start": bisection_manager.delete_bisect_status() bisection_manager.initialize_system() sys.exit(0) if command == "run": if len(sys.argv) < 3: print( "Usage: python -m torch._inductor.compiler_bisector run <command> [args...]" ) sys.exit(1) import subprocess run_cmd = sys.argv[2:] def test_function() -> bool: # Pass bisection state to subprocess via environment variables env = os.environ.copy() backend = bisection_manager.get_backend() subsystem = bisection_manager.get_subsystem() if backend: # For test script to select the right backend env["TORCH_COMPILE_BACKEND"] = backend # For bisector in subprocess to know which backend we're testing env["TORCH_BISECT_BACKEND"] = backend if subsystem: assert backend is not None # subsystem requires a backend env["TORCH_BISECT_SUBSYSTEM"] = subsystem # Get run_state to determine TORCH_BISECT_MAX run_state = bisection_manager.get_run_state(backend, subsystem) if run_state == "test_disable": # -1 means always disable (counter > -1 is always True) env["TORCH_BISECT_MAX"] = "-1" elif run_state == "find_max_bounds": # Subprocess can't report count back, so we estimate upper bound # Run without disabling, then set a reasonable upper bound bisection_manager.update_bisect_range(backend, subsystem, 0, 1000) # Don't set TORCH_BISECT_MAX - let it run normally elif run_state == "bisect": low, high = bisection_manager.get_bisect_range(backend, subsystem) midpoint = (low + high) // 2 env["TORCH_BISECT_MAX"] = str(midpoint) result = subprocess.run(run_cmd, env=env) return result.returncode == 0 bisection_manager.delete_bisect_status() bisection_manager.bisection_enabled = True result = bisection_manager.do_bisect(test_function, cli_interface=False) if result: print(f"\nBisection complete: {result}") else: print("\nBisection complete: no issue found") sys.exit(0) if command not in ["good", "bad"]: print(f"Invalid command: {command}") print("Must be 'good', 'bad', 'start', 'end', or 'run'.") sys.exit(1) def test_function() -> bool: return command == "good" if not bisection_manager.get_backend(): raise ValueError("Must call start prior to good or bad") bisection_manager.do_bisect(test_function, cli_interface=True)
Entry point for the compiler bisector command-line interface.
python
torch/_inductor/compiler_bisector.py
672
[]
None
true
15
6.48
pytorch/pytorch
96,034
unknown
false
getObjectType
@Override public @Nullable Class<?> getObjectType() { if (this.proxy != null) { return this.proxy.getClass(); } if (this.proxyInterfaces != null && this.proxyInterfaces.length == 1) { return this.proxyInterfaces[0]; } if (this.target instanceof TargetSource targetSource) { return targetSource.getTargetClass(); } if (this.target != null) { return this.target.getClass(); } return null; }
A hook for subclasses to post-process the {@link ProxyFactory} before creating the proxy instance with it. @param proxyFactory the AOP ProxyFactory about to be used @since 4.2
java
spring-aop/src/main/java/org/springframework/aop/framework/AbstractSingletonProxyFactoryBean.java
218
[]
true
6
6.4
spring-projects/spring-framework
59,386
javadoc
false
urlParamsToForm
function urlParamsToForm() { const regex = /(\w+)=(\w+)/g; const search = decodeURIComponent(location.search); let match: any[] | null; while ((match = regex.exec(search))) { const name = match[1]; const value = match[2]; const els = document.querySelectorAll('input[name="' + name + '"]'); let el: any; for (let i = 0; i < els.length; i++) { el = els[i]; if (el.type === 'radio' || el.type === 'checkbox') { el.checked = el.value === value; } else { el.value = value; } } } }
@license Copyright Google LLC All Rights Reserved. Use of this source code is governed by an MIT-style license that can be found in the LICENSE file at https://angular.dev/license
typescript
modules/benchmarks/src/util.ts
75
[]
false
6
6.24
angular/angular
99,544
jsdoc
false
getManifestInfo
private ManifestInfo getManifestInfo(ZipContent zipContent) { ZipContent.Entry contentEntry = zipContent.getEntry(MANIFEST_NAME); if (contentEntry == null) { return ManifestInfo.NONE; } try { try (InputStream inputStream = getInputStream(contentEntry)) { Manifest manifest = new Manifest(inputStream); return new ManifestInfo(manifest); } } catch (IOException ex) { throw new UncheckedIOException(ex); } }
Return if an entry with the given name exists. @param name the name to check @return if the entry exists
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
313
[ "zipContent" ]
ManifestInfo
true
3
8.08
spring-projects/spring-boot
79,428
javadoc
false
estimatedBucketCount
public ExponentialHistogramBuilder estimatedBucketCount(int totalBuckets) { estimatedBucketCount = totalBuckets; return this; }
If known, sets the estimated total number of buckets to minimize unnecessary allocations. Only has an effect if invoked before the first call to {@link #setPositiveBucket(long, long)} and {@link #setNegativeBucket(long, long)}. @param totalBuckets the total number of buckets expected to be added @return the builder
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramBuilder.java
87
[ "totalBuckets" ]
ExponentialHistogramBuilder
true
1
6
elastic/elasticsearch
75,680
javadoc
false
getJsonMapper
private JsonMapper getJsonMapper() { if (this.jsonMapper == null) { this.jsonMapper = new JsonMapper(); } return this.jsonMapper; }
Creates an instance with a default {@link JsonMapper} that is created lazily.
java
core/spring-boot/src/main/java/org/springframework/boot/json/JacksonJsonParser.java
65
[]
JsonMapper
true
2
6.56
spring-projects/spring-boot
79,428
javadoc
false
createFromDimensions
static CuVSIvfPqParams createFromDimensions( long nRows, long nFeatures, CagraIndexParams.CuvsDistanceType distanceType, int efConstruction ) { if (nRows <= 0 || nFeatures <= 0) { throw new IllegalArgumentException("Dataset dimensions must be positive: rows=" + nRows + ", features=" + nFeatures); } // Calculate PQ dimensions and bits based on feature count int pqDim; int pqBits; if (nFeatures <= 32) { pqDim = 16; pqBits = 8; } else { pqBits = 4; if (nFeatures <= 64) { pqDim = 32; } else if (nFeatures <= 128) { pqDim = 64; } else if (nFeatures <= 192) { pqDim = 96; } else { // Round up to nearest multiple of 128 pqDim = (int) roundUpSafe(nFeatures / 2, 128); } } // Calculate number of lists: approximately 1 cluster per 2000 rows int nLists = Math.max(1, (int) (nRows / 2000)); // Calculate kmeans training set fraction adaptively final double kMinPointsPerCluster = 32.0; final double minKmeansTrainsetPoints = kMinPointsPerCluster * nLists; final double maxKmeansTrainsetFraction = 1.0; final double minKmeansTrainsetFraction = Math.min(maxKmeansTrainsetFraction, minKmeansTrainsetPoints / nRows); double kmeansTrainsetFraction = Math.clamp(1.0 / Math.sqrt(nRows * 1e-5), minKmeansTrainsetFraction, maxKmeansTrainsetFraction); // Calculate number of probes based on number of lists and efConstruction int nProbes = Math.round((float) (2.0 + Math.sqrt(nLists) / 20.0 + efConstruction / 16.0)); // Build index parameters CuVSIvfPqIndexParams indexParams = new CuVSIvfPqIndexParams.Builder().withMetric(distanceType) .withPqDim(pqDim) .withPqBits(pqBits) .withNLists(nLists) .withKmeansNIters(10) .withKmeansTrainsetFraction(kmeansTrainsetFraction) .withCodebookKind(CagraIndexParams.CodebookGen.PER_SUBSPACE) .build(); // Build search parameters CuVSIvfPqSearchParams searchParams = new CuVSIvfPqSearchParams.Builder().withNProbes(nProbes) .withLutDtype(CagraIndexParams.CudaDataType.CUDA_R_16F) .withInternalDistanceDtype(CagraIndexParams.CudaDataType.CUDA_R_16F) .build(); // Build and return the complete IVF_PQ parameters return new CuVSIvfPqParams.Builder().withCuVSIvfPqIndexParams(indexParams) .withCuVSIvfPqSearchParams(searchParams) .withRefinementRate(1.0f) .build(); }
Creates {@link CuVSIvfPqParams} with automatically calculated parameters based on dataset dimensions and construction parameter. <p>This is a convenience method when you have the dataset dimensions but not the dataset object itself. The calculation logic is identical to {@link #create(int, int, CagraIndexParams.CuvsDistanceType, int)}. @param nRows the number of rows (vectors) in the dataset @param nFeatures the number of features (dimensions) per vector @param distanceType the distance metric to use (e.g., L2Expanded, Cosine) @param efConstruction the construction parameter for parameter calculation @return a {@link CuVSIvfPqParams} instance with calculated parameters @throws IllegalArgumentException if dimensions are invalid
java
libs/gpu-codec/src/main/java/org/elasticsearch/gpu/codec/CuVSIvfPqParamsFactory.java
70
[ "nRows", "nFeatures", "distanceType", "efConstruction" ]
CuVSIvfPqParams
true
7
7.52
elastic/elasticsearch
75,680
javadoc
false
array_split
def array_split(ary, indices_or_sections, axis=0): """ Split an array into multiple sub-arrays. Please refer to the ``split`` documentation. The only difference between these functions is that ``array_split`` allows `indices_or_sections` to be an integer that does *not* equally divide the axis. For an array of length l that should be split into n sections, it returns l % n sub-arrays of size l//n + 1 and the rest of size l//n. See Also -------- split : Split array into multiple sub-arrays of equal size. Examples -------- >>> import numpy as np >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] >>> x = np.arange(9) >>> np.array_split(x, 4) [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] """ try: Ntotal = ary.shape[axis] except AttributeError: Ntotal = len(ary) try: # handle array case. Nsections = len(indices_or_sections) + 1 div_points = [0] + list(indices_or_sections) + [Ntotal] except TypeError: # indices_or_sections is a scalar, not an array. Nsections = int(indices_or_sections) if Nsections <= 0: raise ValueError('number sections must be larger than 0.') from None Neach_section, extras = divmod(Ntotal, Nsections) section_sizes = ([0] + extras * [Neach_section + 1] + (Nsections - extras) * [Neach_section]) div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() sub_arys = [] sary = _nx.swapaxes(ary, axis, 0) for i in range(Nsections): st = div_points[i] end = div_points[i + 1] sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) return sub_arys
Split an array into multiple sub-arrays. Please refer to the ``split`` documentation. The only difference between these functions is that ``array_split`` allows `indices_or_sections` to be an integer that does *not* equally divide the axis. For an array of length l that should be split into n sections, it returns l % n sub-arrays of size l//n + 1 and the rest of size l//n. See Also -------- split : Split array into multiple sub-arrays of equal size. Examples -------- >>> import numpy as np >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] >>> x = np.arange(9) >>> np.array_split(x, 4) [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
python
numpy/lib/_shape_base_impl.py
720
[ "ary", "indices_or_sections", "axis" ]
false
3
8
numpy/numpy
31,054
unknown
false
bindModuleExportsAssignment
function bindModuleExportsAssignment(node: BindablePropertyAssignmentExpression) { // A common practice in node modules is to set 'export = module.exports = {}', this ensures that 'exports' // is still pointing to 'module.exports'. // We do not want to consider this as 'export=' since a module can have only one of these. // Similarly we do not want to treat 'module.exports = exports' as an 'export='. if (!setCommonJsModuleIndicator(node)) { return; } const assignedExpression = getRightMostAssignedExpression(node.right); if (isEmptyObjectLiteral(assignedExpression) || container === file && isExportsOrModuleExportsOrAlias(file, assignedExpression)) { return; } if (isObjectLiteralExpression(assignedExpression) && every(assignedExpression.properties, isShorthandPropertyAssignment)) { forEach(assignedExpression.properties, bindExportAssignedObjectMemberAlias); return; } // 'module.exports = expr' assignment const flags = exportAssignmentIsAlias(node) ? SymbolFlags.Alias // An export= with an EntityNameExpression or a ClassExpression exports all meanings of that identifier or class : SymbolFlags.Property | SymbolFlags.ExportValue | SymbolFlags.ValueModule; const symbol = declareSymbol(file.symbol.exports!, file.symbol, node, flags | SymbolFlags.Assignment, SymbolFlags.None); setValueDeclaration(symbol, node); }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
3,236
[ "node" ]
false
8
6.08
microsoft/TypeScript
107,154
jsdoc
false
inner
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R: """Retrieve the cached result without calling the function. Args: *args: Positional arguments to generate the cache key. **kwargs: Keyword arguments to generate the cache key. Returns: The cached result (decoded if decoder is provided). Raises: KeyError: If no cached result exists for the given parameters. """ # Generate cache key from parameters cache_key = self._make_key(custom_params_encoder, *args, **kwargs) # Check if result is cached cached_hit = self._cache.get(cache_key) if cached_hit is None: raise KeyError(f"No cached result found for key: {cache_key}") # Extract the cached value cache_entry = cast(CacheEntry, cached_hit.value) # Decode and return the cached result if custom_result_decoder is not None: # Get the decoder function by calling the factory with params decoder_fn = custom_result_decoder(*args, **kwargs) return decoder_fn(cast(_EncodedR, cache_entry.encoded_result)) return cast(_R, cache_entry.encoded_result)
Retrieve the cached result without calling the function. Args: *args: Positional arguments to generate the cache key. **kwargs: Keyword arguments to generate the cache key. Returns: The cached result (decoded if decoder is provided). Raises: KeyError: If no cached result exists for the given parameters.
python
torch/_inductor/runtime/caching/interfaces.py
560
[]
_R
true
3
8.08
pytorch/pytorch
96,034
google
false
record
private void record(BufferedStartupStep step) { if (this.filter.test(step) && this.estimatedSize.get() < this.capacity) { this.estimatedSize.incrementAndGet(); this.events.add(new TimelineEvent(step, this.clock.instant())); } while (true) { BufferedStartupStep current = this.current.get(); BufferedStartupStep next = getLatestActive(current); if (this.current.compareAndSet(current, next)) { return; } } }
Add a predicate filter to the list of existing ones. <p> A {@link StartupStep step} that doesn't match all filters will not be recorded. @param filter the predicate filter to add.
java
core/spring-boot/src/main/java/org/springframework/boot/context/metrics/buffering/BufferingApplicationStartup.java
124
[ "step" ]
void
true
5
6.88
spring-projects/spring-boot
79,428
javadoc
false
check_for_wildcard_key
def check_for_wildcard_key( self, wildcard_key: str, bucket_name: str | None = None, delimiter: str = "" ) -> bool: """ Check that a key matching a wildcard expression exists in a bucket. :param wildcard_key: the path to the key :param bucket_name: the name of the bucket :param delimiter: the delimiter marks key hierarchy :return: True if a key exists and False if not. """ return ( self.get_wildcard_key(wildcard_key=wildcard_key, bucket_name=bucket_name, delimiter=delimiter) is not None )
Check that a key matching a wildcard expression exists in a bucket. :param wildcard_key: the path to the key :param bucket_name: the name of the bucket :param delimiter: the delimiter marks key hierarchy :return: True if a key exists and False if not.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
1,134
[ "self", "wildcard_key", "bucket_name", "delimiter" ]
bool
true
1
7.04
apache/airflow
43,597
sphinx
false
toWatchPath
Path toWatchPath(ResourceLoader resourceLoader) { try { Assert.state(!isPemContent(), "Value contains PEM content"); Assert.state(this.value != null, "Value must not be null"); Resource resource = resourceLoader.getResource(this.value); if (!resource.isFile()) { throw new BundleContentNotWatchableException(this); } return Path.of(resource.getFile().getAbsolutePath()); } catch (Exception ex) { if (ex instanceof BundleContentNotWatchableException bundleContentNotWatchableException) { throw bundleContentNotWatchableException; } throw new IllegalStateException("Unable to convert value of property '%s' to a path".formatted(this.name), ex); } }
Return if there is any property value present. @return if the value is present
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/BundleContentProperty.java
56
[ "resourceLoader" ]
Path
true
4
7.04
spring-projects/spring-boot
79,428
javadoc
false
start
public void start(int timeoutMs) { // start() is invoked internally instead of by the caller to avoid SpotBugs errors about starting a thread // in a constructor. start(); try { if (!initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)) { maybeSetInitializationError( new TimeoutException("Consumer network thread resource initialization timed out after " + timeoutMs + " ms") ); } } catch (InterruptedException e) { maybeSetInitializationError( new InterruptException("Consumer network thread resource initialization was interrupted", e) ); } KafkaException e = initializationError.get(); if (e != null) throw e; }
Start the network thread and let it complete its initialization before proceeding. The {@link ClassicKafkaConsumer} constructor blocks during creation of its {@link NetworkClient}, providing precedent for waiting here. In certain cases (e.g. an invalid {@link LoginModule} in {@link SaslConfigs#SASL_JAAS_CONFIG}), an error could be thrown during {@link #initializeResources()}. This would result in the {@link #run()} method exiting, no longer able to process events, which means that the consumer effectively hangs. @param timeoutMs Length of time, in milliseconds, to wait for the thread to start and complete initialization
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
119
[ "timeoutMs" ]
void
true
4
6.56
apache/kafka
31,560
javadoc
false
parseDependencies
private Map<String, Dependency> parseDependencies(JSONObject root) throws JSONException { Map<String, Dependency> result = new HashMap<>(); if (!root.has(DEPENDENCIES_EL)) { return result; } JSONObject dependencies = root.getJSONObject(DEPENDENCIES_EL); JSONArray array = dependencies.getJSONArray(VALUES_EL); for (int i = 0; i < array.length(); i++) { JSONObject group = array.getJSONObject(i); parseGroup(group, result); } return result; }
Returns the defaults applicable to the service. @return the defaults of the service
java
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrServiceMetadata.java
130
[ "root" ]
true
3
6.88
spring-projects/spring-boot
79,428
javadoc
false
lazyLoadRimraf
function lazyLoadRimraf() { if (rimraf === undefined) ({ rimraf } = require('internal/fs/rimraf')); }
Synchronously truncates the file descriptor. @param {number} fd @param {number} [len] @returns {void}
javascript
lib/fs.js
1,110
[]
false
2
7.04
nodejs/node
114,839
jsdoc
false
onLeaderElected
protected abstract Map<String, ByteBuffer> onLeaderElected(String leaderId, String protocol, List<JoinGroupResponseData.JoinGroupResponseMember> allMemberMetadata, boolean skipAssignment);
Invoked when the leader is elected. This is used by the leader to perform the assignment if necessary and to push state to all the members of the group (e.g. to push partition assignments in the case of the new consumer) @param leaderId The id of the leader (which is this member) @param protocol The protocol selected by the coordinator @param allMemberMetadata Metadata from all members of the group @param skipAssignment True if leader must skip running the assignor @return A map from each member to their state assignment
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
237
[ "leaderId", "protocol", "allMemberMetadata", "skipAssignment" ]
true
1
6.16
apache/kafka
31,560
javadoc
false
next
@Override public Calendar next() { if (spot.equals(endFinal)) { throw new NoSuchElementException(); } spot.add(Calendar.DATE, 1); return (Calendar) spot.clone(); }
Returns the next calendar in the iteration. @return Object calendar for the next date.
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
95
[]
Calendar
true
2
8.08
apache/commons-lang
2,896
javadoc
false
rand_score
def rand_score(labels_true, labels_pred): """Rand index. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings [1]_ [2]_. The raw RI score [3]_ is: .. code-block:: text RI = (number of agreeing pairs) / (number of pairs) Read more in the :ref:`User Guide <rand_score>`. Parameters ---------- labels_true : array-like of shape (n_samples,), dtype=integral Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,), dtype=integral Cluster labels to evaluate. Returns ------- RI : float Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for perfect match. See Also -------- adjusted_rand_score: Adjusted Rand Score. adjusted_mutual_info_score: Adjusted Mutual Information. References ---------- .. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions." Journal of Classification 2, 193–218 (1985). <10.1007/BF01908075>`. .. [2] `Wikipedia: Simple Matching Coefficient <https://en.wikipedia.org/wiki/Simple_matching_coefficient>`_ .. [3] `Wikipedia: Rand Index <https://en.wikipedia.org/wiki/Rand_index>`_ Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import rand_score >>> rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized: >>> rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.83 """ contingency = pair_confusion_matrix(labels_true, labels_pred) numerator = contingency.diagonal().sum() denominator = contingency.sum() if numerator == denominator or denominator == 0: # Special limit cases: no clustering since the data is not split; # or trivial clustering where each document is assigned a unique # cluster. These are perfect matches hence return 1.0. return 1.0 return float(numerator / denominator)
Rand index. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings [1]_ [2]_. The raw RI score [3]_ is: .. code-block:: text RI = (number of agreeing pairs) / (number of pairs) Read more in the :ref:`User Guide <rand_score>`. Parameters ---------- labels_true : array-like of shape (n_samples,), dtype=integral Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,), dtype=integral Cluster labels to evaluate. Returns ------- RI : float Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for perfect match. See Also -------- adjusted_rand_score: Adjusted Rand Score. adjusted_mutual_info_score: Adjusted Mutual Information. References ---------- .. [1] :doi:`Hubert, L., Arabie, P. "Comparing partitions." Journal of Classification 2, 193–218 (1985). <10.1007/BF01908075>`. .. [2] `Wikipedia: Simple Matching Coefficient <https://en.wikipedia.org/wiki/Simple_matching_coefficient>`_ .. [3] `Wikipedia: Rand Index <https://en.wikipedia.org/wiki/Rand_index>`_ Examples -------- Perfectly matching labelings have a score of 1 even >>> from sklearn.metrics.cluster import rand_score >>> rand_score([0, 0, 1, 1], [1, 1, 0, 0]) 1.0 Labelings that assign all classes members to the same clusters are complete but may not always be pure, hence penalized: >>> rand_score([0, 0, 1, 2], [0, 0, 1, 1]) 0.83
python
sklearn/metrics/cluster/_supervised.py
280
[ "labels_true", "labels_pred" ]
false
3
7.28
scikit-learn/scikit-learn
64,340
numpy
false
_compute_lower_bound
def _compute_lower_bound(self, log_resp, log_prob_norm): """Estimate the lower bound of the model. The lower bound on the likelihood (of the training data with respect to the model) is used to detect the convergence and has to increase at each iteration. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. log_prob_norm : float Logarithm of the probability of each sample in X. Returns ------- lower_bound : float """ # Contrary to the original formula, we have done some simplification # and removed all the constant terms. (n_features,) = self.mean_prior_.shape # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)` # because the precision matrix is normalized. log_det_precisions_chol = _compute_log_det_cholesky( self.precisions_cholesky_, self.covariance_type, n_features ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) if self.covariance_type == "tied": log_wishart = self.n_components * np.float64( _log_wishart_norm( self.degrees_of_freedom_, log_det_precisions_chol, n_features ) ) else: log_wishart = np.sum( _log_wishart_norm( self.degrees_of_freedom_, log_det_precisions_chol, n_features ) ) if self.weight_concentration_prior_type == "dirichlet_process": log_norm_weight = -np.sum( betaln(self.weight_concentration_[0], self.weight_concentration_[1]) ) else: log_norm_weight = _log_dirichlet_norm(self.weight_concentration_) return ( -np.sum(np.exp(log_resp) * log_resp) - log_wishart - log_norm_weight - 0.5 * n_features * np.sum(np.log(self.mean_precision_)) )
Estimate the lower bound of the model. The lower bound on the likelihood (of the training data with respect to the model) is used to detect the convergence and has to increase at each iteration. Parameters ---------- X : array-like of shape (n_samples, n_features) log_resp : array, shape (n_samples, n_components) Logarithm of the posterior probabilities (or responsibilities) of the point of each sample in X. log_prob_norm : float Logarithm of the probability of each sample in X. Returns ------- lower_bound : float
python
sklearn/mixture/_bayesian_mixture.py
781
[ "self", "log_resp", "log_prob_norm" ]
false
5
6.08
scikit-learn/scikit-learn
64,340
numpy
false
collect
public <A, R> R collect(final Supplier<R> supplier, final BiConsumer<R, ? super O> accumulator, final BiConsumer<R, R> combiner) { makeTerminated(); return stream().collect(supplier, accumulator, combiner); }
Performs a mutable reduction operation on the elements of this FailableStream. A mutable reduction is one in which the reduced value is a mutable result container, such as an {@link ArrayList}, and elements are incorporated by updating the state of the result rather than by replacing the result. This produces a result equivalent to: <pre>{@code R result = supplier.get(); for (T element : this stream) accumulator.accept(result, element); return result; }</pre> <p> Like {@link #reduce(Object, BinaryOperator)}, {@code collect} operations can be parallelized without requiring additional synchronization. </p> <p> This is an intermediate operation. </p> <p> Note There are many existing classes in the JDK whose signatures are well-suited for use with method references as arguments to {@code collect()}. For example, the following will accumulate strings into an {@link ArrayList}: </p> <pre>{@code List<String> asList = stringStream.collect(ArrayList::new, ArrayList::add, ArrayList::addAll); }</pre> <p> The following will take a stream of strings and concatenates them into a single string: </p> <pre>{@code String concat = stringStream.collect(StringBuilder::new, StringBuilder::append, StringBuilder::append) .toString(); }</pre> @param <R> type of the result. @param <A> Type of the accumulator. @param supplier a function that creates a new result container. For a parallel execution, this function may be called multiple times and must return a fresh value each time. @param accumulator An associative, non-interfering, stateless function for incorporating an additional element into a result. @param combiner An associative, non-interfering, stateless function for combining two values, which must be compatible with the accumulator function. @return The result of the reduction.
java
src/main/java/org/apache/commons/lang3/Streams.java
318
[ "supplier", "accumulator", "combiner" ]
R
true
1
6.16
apache/commons-lang
2,896
javadoc
false
refreshProperties
protected PropertiesHolder refreshProperties(String filename, @Nullable PropertiesHolder propHolder) { long refreshTimestamp = (getCacheMillis() < 0 ? -1 : System.currentTimeMillis()); Resource resource = resolveResource(filename); if (resource != null) { long fileTimestamp = -1; if (getCacheMillis() >= 0) { // Last-modified timestamp of file will just be read if caching with timeout. try { fileTimestamp = resource.lastModified(); if (propHolder != null && propHolder.getFileTimestamp() == fileTimestamp) { if (logger.isDebugEnabled()) { logger.debug("Re-caching properties for filename [" + filename + "] - file hasn't been modified"); } propHolder.setRefreshTimestamp(refreshTimestamp); return propHolder; } } catch (IOException ex) { // Probably a class path resource: cache it forever. if (logger.isDebugEnabled()) { logger.debug(resource + " could not be resolved in the file system - assuming that it hasn't changed", ex); } fileTimestamp = -1; } } try { Properties props = loadProperties(resource, filename); propHolder = new PropertiesHolder(props, fileTimestamp); } catch (IOException ex) { if (logger.isWarnEnabled()) { logger.warn("Could not parse properties file [" + resource.getFilename() + "]", ex); } // Empty holder representing "not valid". propHolder = new PropertiesHolder(); } } else { // Resource does not exist. if (logger.isDebugEnabled()) { logger.debug("No properties file found for [" + filename + "]"); } // Empty holder representing "not found". propHolder = new PropertiesHolder(); } propHolder.setRefreshTimestamp(refreshTimestamp); this.cachedProperties.put(filename, propHolder); return propHolder; }
Refresh the PropertiesHolder for the given bundle filename. <p>The holder can be {@code null} if not cached before, or a timed-out cache entry (potentially getting re-validated against the current last-modified timestamp). @param filename the bundle filename (basename + Locale) @param propHolder the current PropertiesHolder for the bundle @see #resolveResource(String)
java
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
458
[ "filename", "propHolder" ]
PropertiesHolder
true
12
6.24
spring-projects/spring-framework
59,386
javadoc
false
_load_into_new_table
def _load_into_new_table(self, table_name: str, delete_on_error: bool) -> str: """ Import S3 key or keys into a new DynamoDB table. :param table_name: Name of the table that shall be created :param delete_on_error: If set, the new DynamoDB table will be deleted in case of import errors :return: The Amazon resource number (ARN) """ dynamodb_hook = DynamoDBHook(aws_conn_id=self.aws_conn_id) client = dynamodb_hook.client import_table_config = self.import_table_kwargs or {} import_table_creation_config = self.import_table_creation_kwargs or {} try: response = client.import_table( S3BucketSource={ "S3Bucket": self.s3_bucket, "S3KeyPrefix": self.s3_key, }, InputFormat=self.input_format, TableCreationParameters={ "TableName": table_name, "AttributeDefinitions": self.dynamodb_attributes, "KeySchema": self.dynamodb_key_schema, "BillingMode": self.billing_mode, **import_table_creation_config, }, **import_table_config, ) except ClientError as e: self.log.error("Error: failed to load from S3 into DynamoDB table. Error: %s", str(e)) raise AirflowException(f"S3 load into DynamoDB table failed with error: {e}") if response["ImportTableDescription"]["ImportStatus"] == "FAILED": raise AirflowException( "S3 into Dynamodb job creation failed. Code: " f"{response['ImportTableDescription']['FailureCode']}. " f"Failure: {response['ImportTableDescription']['FailureMessage']}" ) if self.wait_for_completion: self.log.info("Waiting for S3 into Dynamodb job to complete") waiter = dynamodb_hook.get_waiter("import_table") try: waiter.wait( ImportArn=response["ImportTableDescription"]["ImportArn"], WaiterConfig={"Delay": self.check_interval, "MaxAttempts": self.max_attempts}, ) except WaiterError: status, error_code, error_msg = dynamodb_hook.get_import_status( response["ImportTableDescription"]["ImportArn"] ) if delete_on_error: client.delete_table(TableName=table_name) raise AirflowException( f"S3 import into Dynamodb job failed: Status: {status}. Error: {error_code}. Error message: {error_msg}" ) return response["ImportTableDescription"]["ImportArn"]
Import S3 key or keys into a new DynamoDB table. :param table_name: Name of the table that shall be created :param delete_on_error: If set, the new DynamoDB table will be deleted in case of import errors :return: The Amazon resource number (ARN)
python
providers/amazon/src/airflow/providers/amazon/aws/transfers/s3_to_dynamodb.py
147
[ "self", "table_name", "delete_on_error" ]
str
true
6
7.84
apache/airflow
43,597
sphinx
false
goodFastHash
public static HashFunction goodFastHash(int minimumBits) { int bits = checkPositiveAndMakeMultipleOf32(minimumBits); if (bits == 32) { return Murmur3_32HashFunction.GOOD_FAST_HASH_32; } if (bits <= 128) { return Murmur3_128HashFunction.GOOD_FAST_HASH_128; } // Otherwise, join together some 128-bit murmur3s int hashFunctionsNeeded = (bits + 127) / 128; HashFunction[] hashFunctions = new HashFunction[hashFunctionsNeeded]; hashFunctions[0] = Murmur3_128HashFunction.GOOD_FAST_HASH_128; int seed = GOOD_FAST_HASH_SEED; for (int i = 1; i < hashFunctionsNeeded; i++) { seed += 1500450271; // a prime; shouldn't matter hashFunctions[i] = murmur3_128(seed); } return new ConcatenatedHashFunction(hashFunctions); }
Returns a general-purpose, <b>temporary-use</b>, non-cryptographic hash function. The algorithm the returned function implements is unspecified and subject to change without notice. <p><b>Warning:</b> a new random seed for these functions is chosen each time the {@code Hashing} class is loaded. <b>Do not use this method</b> if hash codes may escape the current process in any way, for example being sent over RPC, or saved to disk. For a general-purpose, non-cryptographic hash function that will never change behavior, we suggest {@link #murmur3_128}. <p>Repeated calls to this method on the same loaded {@code Hashing} class, using the same value for {@code minimumBits}, will return identically-behaving {@link HashFunction} instances. @param minimumBits a positive integer. This can be arbitrarily large. The returned {@link HashFunction} instance may use memory proportional to this integer. @return a hash function, described above, that produces hash codes of length {@code minimumBits} or greater
java
android/guava/src/com/google/common/hash/Hashing.java
64
[ "minimumBits" ]
HashFunction
true
4
7.76
google/guava
51,352
javadoc
false
getJavaVersion
public static JavaVersion getJavaVersion() { List<JavaVersion> candidates = Arrays.asList(JavaVersion.values()); Collections.reverse(candidates); for (JavaVersion candidate : candidates) { if (candidate.available) { return candidate; } } return SEVENTEEN; }
Returns the {@link JavaVersion} of the current runtime. @return the {@link JavaVersion}
java
core/spring-boot/src/main/java/org/springframework/boot/system/JavaVersion.java
120
[]
JavaVersion
true
2
7.6
spring-projects/spring-boot
79,428
javadoc
false
instance
public Struct instance(Field field) { return instance(schema.get(field.name)); }
Create a struct instance for the given field which must be a container type (struct or array) @param field The name of the field to create (field must be a schema type) @return The struct @throws SchemaException If the given field is not a container type
java
clients/src/main/java/org/apache/kafka/common/protocol/types/Struct.java
186
[ "field" ]
Struct
true
1
6
apache/kafka
31,560
javadoc
false
parse
@Override public TemporalAccessor parse(String text, Locale locale) throws ParseException { try { return doParse(text, locale, this.formatter); } catch (DateTimeParseException ex) { if (!ObjectUtils.isEmpty(this.fallbackPatterns)) { for (String pattern : this.fallbackPatterns) { try { DateTimeFormatter fallbackFormatter = DateTimeFormatterUtils.createStrictDateTimeFormatter(pattern); return doParse(text, locale, fallbackFormatter); } catch (DateTimeParseException ignoredException) { // Ignore fallback parsing exceptions since the exception thrown below // will include information from the "source" if available -- for example, // the toString() of a @DateTimeFormat annotation. } } } else { // Fallback to ISO-based default java.time type parsing try { return defaultParse(text); } catch (DateTimeParseException ignoredException) { // Ignore fallback parsing exception like above } } if (this.source != null) { throw new DateTimeParseException( String.format("Unable to parse date time value \"%s\" using configuration from %s", text, this.source), text, ex.getErrorIndex(), ex); } // else rethrow original exception throw ex; } }
Create a new TemporalAccessorParser for the given TemporalAccessor type. @param temporalAccessorType the specific TemporalAccessor class (LocalDate, LocalTime, LocalDateTime, ZonedDateTime, OffsetDateTime, OffsetTime) @param formatter the base DateTimeFormatter instance
java
spring-context/src/main/java/org/springframework/format/datetime/standard/TemporalAccessorParser.java
88
[ "text", "locale" ]
TemporalAccessor
true
6
6.08
spring-projects/spring-framework
59,386
javadoc
false
argmin
def argmin(self, skipna: bool = True) -> int: """ Return the index of minimum value. In case of multiple occurrences of the minimum value, the index corresponding to the first occurrence is returned. Parameters ---------- skipna : bool, default True Returns ------- int See Also -------- ExtensionArray.argmax : Return the index of the maximum value. Examples -------- >>> arr = pd.array([3, 1, 2, 5, 4]) >>> arr.argmin() np.int64(1) """ # Implementer note: You have two places to override the behavior of # argmin. # 1. _values_for_argsort : construct the values used in nargminmax # 2. argmin itself : total control over sorting. validate_bool_kwarg(skipna, "skipna") if not skipna and self._hasna: raise ValueError("Encountered an NA value with skipna=False") return nargminmax(self, "argmin")
Return the index of minimum value. In case of multiple occurrences of the minimum value, the index corresponding to the first occurrence is returned. Parameters ---------- skipna : bool, default True Returns ------- int See Also -------- ExtensionArray.argmax : Return the index of the maximum value. Examples -------- >>> arr = pd.array([3, 1, 2, 5, 4]) >>> arr.argmin() np.int64(1)
python
pandas/core/arrays/base.py
968
[ "self", "skipna" ]
int
true
3
8.48
pandas-dev/pandas
47,362
numpy
false
getAllSuperclasses
public static List<Class<?>> getAllSuperclasses(final Class<?> cls) { if (cls == null) { return null; } final List<Class<?>> classes = new ArrayList<>(); Class<?> superclass = cls.getSuperclass(); while (superclass != null) { classes.add(superclass); superclass = superclass.getSuperclass(); } return classes; }
Gets a {@link List} of superclasses for the given class. @param cls the class to look up, may be {@code null}. @return the {@link List} of superclasses in order going up from this one {@code null} if null input.
java
src/main/java/org/apache/commons/lang3/ClassUtils.java
404
[ "cls" ]
true
3
8.24
apache/commons-lang
2,896
javadoc
false
of
public static CorrelationIdFormatter of(Collection<String> spec) { if (CollectionUtils.isEmpty(spec)) { return DEFAULT; } List<Part> parts = spec.stream().map(Part::of).toList(); return new CorrelationIdFormatter(parts); }
Create a new {@link CorrelationIdFormatter} instance from the given specification. @param spec a pre-separated specification @return a new {@link CorrelationIdFormatter} instance
java
core/spring-boot/src/main/java/org/springframework/boot/logging/CorrelationIdFormatter.java
156
[ "spec" ]
CorrelationIdFormatter
true
2
7.12
spring-projects/spring-boot
79,428
javadoc
false
maybeAutoCommitOffsetsAsync
private RequestFuture<Void> maybeAutoCommitOffsetsAsync() { if (autoCommitEnabled) return autoCommitOffsetsAsync(); return null; }
Commit offsets synchronously. This method will retry until the commit completes successfully or an unrecoverable error is encountered. @param offsets The offsets to be committed @throws org.apache.kafka.common.errors.AuthorizationException if the consumer is not authorized to the group or to any of the specified partitions. See the exception for more details @throws CommitFailedException if an unrecoverable error occurs before the commit can be completed @throws FencedInstanceIdException if a static member gets fenced @return If the offset commit was successfully sent and a successful response was received from the coordinator
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
1,248
[]
true
2
7.44
apache/kafka
31,560
javadoc
false
combine_first
def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({"falcon": np.nan, "eagle": 160.0}) >>> s2 = pd.Series({"eagle": 200.0, "duck": 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ from pandas.core.reshape.concat import concat if self.dtype == other.dtype: if self.index.equals(other.index): return self.mask(self.isna(), other) new_index = self.index.union(other.index) this = self # identify the index subset to keep for each series keep_other = other.index.difference(this.index[notna(this)]) keep_this = this.index.difference(keep_other) this = this.reindex(keep_this) other = other.reindex(keep_other) if this.dtype.kind == "M" and other.dtype.kind != "M": # TODO: try to match resos? other = to_datetime(other) warnings.warn( # GH#62931 "Silently casting non-datetime 'other' to datetime in " "Series.combine_first is deprecated and will be removed " "in a future version. Explicitly cast before calling " "combine_first instead.", Pandas4Warning, stacklevel=find_stack_level(), ) combined = concat([this, other]) combined = combined.reindex(new_index) return combined.__finalize__(self, method="combine_first")
Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({"falcon": np.nan, "eagle": 160.0}) >>> s2 = pd.Series({"eagle": 200.0, "duck": 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64
python
pandas/core/series.py
3,269
[ "self", "other" ]
Series
true
5
8.56
pandas-dev/pandas
47,362
numpy
false
send_robust
def send_robust(self, sender, **named): """ Send signal from sender to all connected receivers catching errors. If any receivers are asynchronous, they are called after all the synchronous receivers via a single call to async_to_sync(). They are also executed concurrently with asyncio.TaskGroup(). Arguments: sender The sender of the signal. Can be any Python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. If any receiver raises an error (specifically any subclass of Exception), return the error instance as the result for that receiver. """ if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] sync_receivers, async_receivers = self._live_receivers(sender) for receiver in sync_receivers: try: response = receiver(signal=self, sender=sender, **named) except Exception as err: self._log_robust_failure(receiver, err) responses.append((receiver, err)) else: responses.append((receiver, response)) if async_receivers: async def asend_and_wrap_exception(receiver): try: response = await receiver(signal=self, sender=sender, **named) except Exception as err: self._log_robust_failure(receiver, err) return err return response async def asend(): async_responses = await _gather( *( asend_and_wrap_exception(receiver) for receiver in async_receivers ) ) return zip(async_receivers, async_responses) responses.extend(async_to_sync(asend)()) return responses
Send signal from sender to all connected receivers catching errors. If any receivers are asynchronous, they are called after all the synchronous receivers via a single call to async_to_sync(). They are also executed concurrently with asyncio.TaskGroup(). Arguments: sender The sender of the signal. Can be any Python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. If any receiver raises an error (specifically any subclass of Exception), return the error instance as the result for that receiver.
python
django/dispatch/dispatcher.py
313
[ "self", "sender" ]
false
6
6
django/django
86,204
google
false
_maybe_mark
def _maybe_mark( estimator, check, expected_failed_checks: dict[str, str] | None = None, mark: Literal["xfail", "skip", None] = None, pytest=None, xfail_strict: bool | None = None, ): """Mark the test as xfail or skip if needed. Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. check : partial or callable Check to be marked. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : "xfail" or "skip" or None Whether to mark the check as xfail or skip. pytest : pytest module, default=None Pytest module to use to mark the check. This is only needed if ``mark`` is `"xfail"`. Note that one can run `check_estimator` without having `pytest` installed. This is used in combination with `parametrize_with_checks` only. xfail_strict : bool, default=None Whether to run checks in xfail strict mode. This option is ignored unless `mark="xfail"`. If True, checks that are expected to fail but actually pass will lead to a test failure. If False, unexpectedly passing tests will be marked as xpass. If None, the default pytest behavior is used. .. versionadded:: 1.8 """ should_be_marked, reason = _should_be_skipped_or_marked( estimator, check, expected_failed_checks ) if not should_be_marked or mark is None: return estimator, check estimator_name = estimator.__class__.__name__ if mark == "xfail": # With xfail_strict=None we want the value from the pytest config to # take precedence and that means not passing strict to the xfail # mark at all. if xfail_strict is None: mark = pytest.mark.xfail(reason=reason) else: mark = pytest.mark.xfail(reason=reason, strict=xfail_strict) return pytest.param(estimator, check, marks=mark) else: @wraps(check) def wrapped(*args, **kwargs): raise SkipTest( f"Skipping {_check_name(check)} for {estimator_name}: {reason}" ) return estimator, wrapped
Mark the test as xfail or skip if needed. Parameters ---------- estimator : estimator object Estimator instance for which to generate checks. check : partial or callable Check to be marked. expected_failed_checks : dict[str, str], default=None Dictionary of the form {check_name: reason} for checks that are expected to fail. mark : "xfail" or "skip" or None Whether to mark the check as xfail or skip. pytest : pytest module, default=None Pytest module to use to mark the check. This is only needed if ``mark`` is `"xfail"`. Note that one can run `check_estimator` without having `pytest` installed. This is used in combination with `parametrize_with_checks` only. xfail_strict : bool, default=None Whether to run checks in xfail strict mode. This option is ignored unless `mark="xfail"`. If True, checks that are expected to fail but actually pass will lead to a test failure. If False, unexpectedly passing tests will be marked as xpass. If None, the default pytest behavior is used. .. versionadded:: 1.8
python
sklearn/utils/estimator_checks.py
421
[ "estimator", "check", "expected_failed_checks", "mark", "pytest", "xfail_strict" ]
true
7
6.96
scikit-learn/scikit-learn
64,340
numpy
false
ohlc
def ohlc(self): """ Compute open, high, low and close values of a group, excluding missing values. Returns ------- DataFrame Open, high, low and close values within each group. See Also -------- DataFrame.agg : Aggregate using one or more operations over the specified axis. DataFrame.resample : Resample time-series data. DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 5], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").ohlc() open high low close 2023-01-01 1 3 1 2 2023-02-01 4 5 3 5 """ ax = self.ax obj = self._obj_with_exclusions if len(ax) == 0: # GH#42902 obj = obj.copy() obj.index = _asfreq_compat(obj.index, self.freq) if obj.ndim == 1: obj = obj.to_frame() obj = obj.reindex(["open", "high", "low", "close"], axis=1) else: mi = MultiIndex.from_product( [obj.columns, ["open", "high", "low", "close"]] ) obj = obj.reindex(mi, axis=1) return obj return self._downsample("ohlc")
Compute open, high, low and close values of a group, excluding missing values. Returns ------- DataFrame Open, high, low and close values within each group. See Also -------- DataFrame.agg : Aggregate using one or more operations over the specified axis. DataFrame.resample : Resample time-series data. DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 5], ... index=pd.DatetimeIndex( ... [ ... "2023-01-01", ... "2023-01-10", ... "2023-01-15", ... "2023-02-01", ... "2023-02-10", ... "2023-02-15", ... ] ... ), ... ) >>> ser.resample("MS").ohlc() open high low close 2023-01-01 1 3 1 2 2023-02-01 4 5 3 5
python
pandas/core/resample.py
1,707
[ "self" ]
false
4
6.64
pandas-dev/pandas
47,362
unknown
false
deleteConsumerGroupOffsets
DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options);
Delete committed offsets for a set of partitions in a consumer group. This will succeed at the partition level only if the group is not actively subscribed to the corresponding topic. @param options The options to use when deleting offsets in a consumer group. @return The DeleteConsumerGroupOffsetsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
1,022
[ "groupId", "partitions", "options" ]
DeleteConsumerGroupOffsetsResult
true
1
6.64
apache/kafka
31,560
javadoc
false
toString
@Override public String toString() { return "QuorumInfo(" + "leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ", highWatermark=" + highWatermark + ", voters=" + voters + ", observers=" + observers + ", nodes=" + nodes + ')'; }
@return The voter nodes in the Raft cluster, or an empty map if KIP-853 is not enabled.
java
clients/src/main/java/org/apache/kafka/clients/admin/QuorumInfo.java
98
[]
String
true
1
7.04
apache/kafka
31,560
javadoc
false
chebline
def chebline(off, scl): """ Chebyshev series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Chebyshev series for ``off + scl*x``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.legendre.legline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> import numpy.polynomial.chebyshev as C >>> C.chebline(3,2) array([3, 2]) >>> C.chebval(-3, C.chebline(3,2)) # should be -3 -3.0 """ if scl != 0: return np.array([off, scl]) else: return np.array([off])
Chebyshev series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``off + scl*x``. Returns ------- y : ndarray This module's representation of the Chebyshev series for ``off + scl*x``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.legendre.legline numpy.polynomial.laguerre.lagline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> import numpy.polynomial.chebyshev as C >>> C.chebline(3,2) array([3, 2]) >>> C.chebval(-3, C.chebline(3,2)) # should be -3 -3.0
python
numpy/polynomial/chebyshev.py
474
[ "off", "scl" ]
false
3
7.04
numpy/numpy
31,054
numpy
false
bulkSeparator
byte bulkSeparator();
@return a {@link byte} that separates items in a bulk request that uses this {@link XContent}. @throws RuntimeException if this {@link XContent} does not support a delimited bulk format. See {@link #hasBulkSeparator()}.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java
42
[]
true
1
6.48
elastic/elasticsearch
75,680
javadoc
false
exclusiveBetween
public static void exclusiveBetween(final double start, final double end, final double value, final String message) { // TODO when breaking BC, consider returning value if (value <= start || value >= end) { throw new IllegalArgumentException(message); } }
Validate that the specified primitive value falls between the two exclusive values specified; otherwise, throws an exception with the specified message. <pre>Validate.exclusiveBetween(0.1, 2.1, 1.1, "Not in range");</pre> @param start the exclusive start value. @param end the exclusive end value. @param value the value to validate. @param message the exception message if invalid, not null. @throws IllegalArgumentException if the value falls outside the boundaries. @since 3.3
java
src/main/java/org/apache/commons/lang3/Validate.java
114
[ "start", "end", "value", "message" ]
void
true
3
6.56
apache/commons-lang
2,896
javadoc
false
listCacheDelete
function listCacheDelete(key) { var data = this.__data__, index = assocIndexOf(data, key); if (index < 0) { return false; } var lastIndex = data.length - 1; if (index == lastIndex) { data.pop(); } else { splice.call(data, index, 1); } --this.size; return true; }
Removes `key` and its value from the list cache. @private @name delete @memberOf ListCache @param {string} key The key of the value to remove. @returns {boolean} Returns `true` if the entry was removed, else `false`.
javascript
lodash.js
2,082
[ "key" ]
false
4
6.08
lodash/lodash
61,490
jsdoc
false
equals
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } Ansi8BitColor other = (Ansi8BitColor) obj; return this.prefix.equals(other.prefix) && this.code == other.code; }
Create a new {@link Ansi8BitColor} instance. @param prefix the prefix escape chars @param code color code (must be 0-255) @throws IllegalArgumentException if color code is not between 0 and 255.
java
core/spring-boot/src/main/java/org/springframework/boot/ansi/Ansi8BitColor.java
48
[ "obj" ]
true
5
6.72
spring-projects/spring-boot
79,428
javadoc
false
processEvent
public void processEvent(ApplicationEvent event) { @Nullable Object[] args = resolveArguments(event); if (shouldHandle(event, args)) { Object result = doInvoke(args); if (result != null) { handleResult(result); } else { logger.trace("No result object given - no result to handle"); } } }
Process the specified {@link ApplicationEvent}, checking if the condition matches and handling a non-null result, if any. @param event the event to process through the listener method
java
spring-context/src/main/java/org/springframework/context/event/ApplicationListenerMethodAdapter.java
249
[ "event" ]
void
true
3
6.56
spring-projects/spring-framework
59,386
javadoc
false
value_counts
def value_counts(self, dropna: bool = True) -> Series: """ Return a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts """ from pandas import ( Index, Series, ) data = self._pa_array vc = data.value_counts() values = vc.field(0) counts = vc.field(1) if dropna and data.null_count > 0: mask = values.is_valid() values = values.filter(mask) counts = counts.filter(mask) counts = ArrowExtensionArray(counts) index = Index(self._from_pyarrow_array(values)) return Series(counts, index=index, name="count", copy=False)
Return a Series containing counts of each unique value. Parameters ---------- dropna : bool, default True Don't include counts of missing values. Returns ------- counts : Series See Also -------- Series.value_counts
python
pandas/core/arrays/arrow/array.py
1,773
[ "self", "dropna" ]
Series
true
3
6.72
pandas-dev/pandas
47,362
numpy
false
nullToEmpty
public static <T> T[] nullToEmpty(final T[] array, final Class<T[]> type) { if (type == null) { throw new IllegalArgumentException("The type must not be null"); } if (array == null) { return type.cast(Array.newInstance(type.getComponentType(), 0)); } return array; }
Defensive programming technique to change a {@code null} reference to an empty one. <p> This method returns an empty array for a {@code null} input array. </p> @param array the array to check for {@code null} or empty. @param type the class representation of the desired array. @param <T> the class type. @return the same array, {@code public static} empty array if {@code null}. @throws IllegalArgumentException if the type argument is null. @since 3.5
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
4,640
[ "array", "type" ]
true
3
8.08
apache/commons-lang
2,896
javadoc
false
textOrNull
String textOrNull() throws IOException;
Returns an instance of {@link Map} holding parsed map. Serves as a replacement for the "map", "mapOrdered" and "mapStrings" methods above. @param mapFactory factory for creating new {@link Map} objects @param mapValueParser parser for parsing a single map value @param <T> map value type @return {@link Map} object
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentParser.java
110
[]
String
true
1
6.32
elastic/elasticsearch
75,680
javadoc
false
calculate_tensor_size
def calculate_tensor_size(tensor: torch.Tensor) -> float: """ Calculate the size of a PyTorch tensor in megabytes (MB). Args: tensor (torch.Tensor): Input tensor Returns: float: Memory size in MB """ # Get number of elements and size per element num_elements = tensor.numel() element_size = tensor.element_size() return (num_elements * element_size) / (1024 * 1024)
Calculate the size of a PyTorch tensor in megabytes (MB). Args: tensor (torch.Tensor): Input tensor Returns: float: Memory size in MB
python
torch/_functorch/partitioners.py
536
[ "tensor" ]
float
true
1
6.72
pytorch/pytorch
96,034
google
false
appendPossibility
static void appendPossibility(StringBuilder description) { if (!description.toString().endsWith(System.lineSeparator())) { description.append("%n".formatted()); } description.append("%n%s".formatted(POSSIBILITY)); }
Analyze the given failure for missing parameter name exceptions. @param failure the failure to analyze @return a failure analysis or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/diagnostics/analyzer/MissingParameterNamesFailureAnalyzer.java
111
[ "description" ]
void
true
2
7.92
spring-projects/spring-boot
79,428
javadoc
false
on_pre_execution
def on_pre_execution(**kwargs): """ Call callbacks before execution. Note that any exception from callback will be logged but won't be propagated. :param kwargs: :return: None """ logger.debug("Calling callbacks: %s", __pre_exec_callbacks) for callback in __pre_exec_callbacks: try: callback(**kwargs) except Exception: logger.exception("Failed on pre-execution callback using %s", callback)
Call callbacks before execution. Note that any exception from callback will be logged but won't be propagated. :param kwargs: :return: None
python
airflow-core/src/airflow/utils/cli_action_loggers.py
70
[]
false
2
7.44
apache/airflow
43,597
sphinx
false
to_coo
def to_coo(self) -> spmatrix: """ Return the contents of the frame as a sparse SciPy COO matrix. Returns ------- scipy.sparse.spmatrix If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.sparse.to_dense : Convert a DataFrame with sparse values to dense. Notes ----- The dtype will be the lowest-common-denominator type (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. By numpy.find_common_type convention, mixing int64 and and uint64 will result in a float64 dtype. Examples -------- >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) >>> df.sparse.to_coo() <COOrdinate sparse matrix of dtype 'int64' with 2 stored elements and shape (4, 1)> """ import_optional_dependency("scipy") from scipy.sparse import coo_matrix dtype = find_common_type(self._parent.dtypes.to_list()) if isinstance(dtype, SparseDtype): dtype = dtype.subtype cols, rows, data = [], [], [] for col, (_, ser) in enumerate(self._parent.items()): sp_arr = ser.array row = sp_arr.sp_index.indices cols.append(np.repeat(col, len(row))) rows.append(row) data.append(sp_arr.sp_values.astype(dtype, copy=False)) cols_arr = np.concatenate(cols) rows_arr = np.concatenate(rows) data_arr = np.concatenate(data) return coo_matrix((data_arr, (rows_arr, cols_arr)), shape=self._parent.shape)
Return the contents of the frame as a sparse SciPy COO matrix. Returns ------- scipy.sparse.spmatrix If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.sparse.to_dense : Convert a DataFrame with sparse values to dense. Notes ----- The dtype will be the lowest-common-denominator type (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. By numpy.find_common_type convention, mixing int64 and and uint64 will result in a float64 dtype. Examples -------- >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])}) >>> df.sparse.to_coo() <COOrdinate sparse matrix of dtype 'int64' with 2 stored elements and shape (4, 1)>
python
pandas/core/arrays/sparse/accessor.py
396
[ "self" ]
spmatrix
true
3
7.28
pandas-dev/pandas
47,362
unknown
false
_is_valid_na_for
def _is_valid_na_for(self, dtype: DtypeObj) -> bool: """ Check that we are all-NA of a type/dtype that is compatible with this dtype. Augments `self.is_na` with an additional check of the type of NA values. """ if not self.is_na: return False blk = self.block if blk.dtype.kind == "V": return True if blk.dtype == object: values = blk.values return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K")) na_value = blk.fill_value if na_value is NaT and blk.dtype != dtype: # e.g. we are dt64 and other is td64 # fill_values match but we should not cast blk.values to dtype # TODO: this will need updating if we ever have non-nano dt64/td64 return False if na_value is NA and needs_i8_conversion(dtype): # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat # e.g. blk.dtype == "Int64" and dtype is td64, we dont want # to consider these as matching return False # TODO: better to use can_hold_element? return is_valid_na_for_dtype(na_value, dtype)
Check that we are all-NA of a type/dtype that is compatible with this dtype. Augments `self.is_na` with an additional check of the type of NA values.
python
pandas/core/internals/concat.py
308
[ "self", "dtype" ]
bool
true
8
6
pandas-dev/pandas
47,362
unknown
false
atleast_nd
def atleast_nd(x: Array, /, *, ndim: int, xp: ModuleType | None = None) -> Array: """ Recursively expand the dimension of an array to at least `ndim`. Parameters ---------- x : array Input array. ndim : int The minimum number of dimensions for the result. xp : array_namespace, optional The standard-compatible namespace for `x`. Default: infer. Returns ------- array An array with ``res.ndim`` >= `ndim`. If ``x.ndim`` >= `ndim`, `x` is returned. If ``x.ndim`` < `ndim`, `x` is expanded by prepending new axes until ``res.ndim`` equals `ndim`. Examples -------- >>> import array_api_strict as xp >>> import array_api_extra as xpx >>> x = xp.asarray([1]) >>> xpx.atleast_nd(x, ndim=3, xp=xp) Array([[[1]]], dtype=array_api_strict.int64) >>> x = xp.asarray([[[1, 2], ... [3, 4]]]) >>> xpx.atleast_nd(x, ndim=1, xp=xp) is x True """ if xp is None: xp = array_namespace(x) if x.ndim < ndim: x = xp.expand_dims(x, axis=0) x = atleast_nd(x, ndim=ndim, xp=xp) return x
Recursively expand the dimension of an array to at least `ndim`. Parameters ---------- x : array Input array. ndim : int The minimum number of dimensions for the result. xp : array_namespace, optional The standard-compatible namespace for `x`. Default: infer. Returns ------- array An array with ``res.ndim`` >= `ndim`. If ``x.ndim`` >= `ndim`, `x` is returned. If ``x.ndim`` < `ndim`, `x` is expanded by prepending new axes until ``res.ndim`` equals `ndim`. Examples -------- >>> import array_api_strict as xp >>> import array_api_extra as xpx >>> x = xp.asarray([1]) >>> xpx.atleast_nd(x, ndim=3, xp=xp) Array([[[1]]], dtype=array_api_strict.int64) >>> x = xp.asarray([[[1, 2], ... [3, 4]]]) >>> xpx.atleast_nd(x, ndim=1, xp=xp) is x True
python
sklearn/externals/array_api_extra/_lib/_funcs.py
178
[ "x", "ndim", "xp" ]
Array
true
3
8.32
scikit-learn/scikit-learn
64,340
numpy
false
parseBuckets
private static void parseBuckets(Map<String, List<Number>> serializedBuckets, BiConsumer<Long, Long> bucketSetter) { List<Number> indices = serializedBuckets.getOrDefault(BUCKET_INDICES_FIELD, Collections.emptyList()); List<Number> counts = serializedBuckets.getOrDefault(BUCKET_COUNTS_FIELD, Collections.emptyList()); assert indices.size() == counts.size(); for (int i = 0; i < indices.size(); i++) { bucketSetter.accept(indices.get(i).longValue(), counts.get(i).longValue()); } }
Parses an {@link ExponentialHistogram} from a {@link Map}. This method is neither optimized, nor does it do any validation of the parsed content. No estimation for missing sum/min/max is done. Therefore only intended for testing! @param xContent the serialized histogram as a map @return the deserialized histogram
java
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ExponentialHistogramXContent.java
169
[ "serializedBuckets", "bucketSetter" ]
void
true
2
7.92
elastic/elasticsearch
75,680
javadoc
false
forCurrentThread
static SpringBootExceptionHandler forCurrentThread() { return handler.get(); }
Check if the exception is a log configuration message, i.e. the log call might not have actually output anything. @param ex the source exception @return {@code true} if the exception contains a log configuration message
java
core/spring-boot/src/main/java/org/springframework/boot/SpringBootExceptionHandler.java
121
[]
SpringBootExceptionHandler
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
onMemberEpochUpdated
@Override public void onMemberEpochUpdated(Optional<Integer> memberEpoch, String memberId) { if (memberEpoch.isEmpty() && memberInfo.memberEpoch.isPresent()) { log.info("Member {} won't include epoch in following offset " + "commit/fetch requests because it has left the group.", memberInfo.memberId); } else if (memberEpoch.isPresent()) { log.debug("Member {} will include new member epoch {} in following offset commit/fetch requests.", memberId, memberEpoch); } memberInfo.memberId = memberId; memberInfo.memberEpoch = memberEpoch; }
Update latest member epoch used by the member. @param memberEpoch New member epoch received. To be included in the new request. @param memberId Current member ID. To be included in the new request.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
596
[ "memberEpoch", "memberId" ]
void
true
4
6.88
apache/kafka
31,560
javadoc
false
create
static Admin create(Map<String, Object> conf) { return KafkaAdminClient.createInternal(new AdminClientConfig(conf, true), null, null); }
Create a new Admin with the given configuration. @param conf The configuration. @return The new KafkaAdminClient.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
143
[ "conf" ]
Admin
true
1
6.32
apache/kafka
31,560
javadoc
false
ofEntries
@SafeVarargs @SuppressWarnings("unchecked") public static <K,V> ManagedMap<K,V> ofEntries(Entry<? extends K, ? extends V>... entries) { ManagedMap<K,V > map = new ManagedMap<>(); for (Entry<? extends K, ? extends V> entry : entries) { map.put(entry.getKey(), entry.getValue()); } return map; }
Return a new instance containing keys and values extracted from the given entries. The entries themselves are not stored in the map. @param entries {@code Map.Entry}s containing the keys and values from which the map is populated @param <K> the {@code Map}'s key type @param <V> the {@code Map}'s value type @return a {@code Map} containing the specified mappings @since 5.3.16
java
spring-beans/src/main/java/org/springframework/beans/factory/support/ManagedMap.java
68
[]
true
1
7.04
spring-projects/spring-framework
59,386
javadoc
false
containsBeanDefinition
boolean containsBeanDefinition(String beanName);
Check if this bean factory contains a bean definition with the given name. <p>Does not consider any hierarchy this factory may participate in, and ignores any singleton beans that have been registered by other means than bean definitions. @param beanName the name of the bean to look for @return if this bean factory contains a bean definition with the given name @see #containsBean
java
spring-beans/src/main/java/org/springframework/beans/factory/ListableBeanFactory.java
71
[ "beanName" ]
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
pin_min_versions_to_ci_deps
def pin_min_versions_to_ci_deps() -> int: """ Pin minimum versions to CI dependencies. Pip dependencies are not pinned. """ all_yaml_files = list(YAML_PATH.iterdir()) all_yaml_files.append(ENV_PATH) toml_dependencies = {} with open(SETUP_PATH, "rb") as toml_f: toml_dependencies = tomllib.load(toml_f) ret = 0 for curr_file in all_yaml_files: with open(curr_file, encoding="utf-8") as yaml_f: yaml_start_data = yaml_f.read() yaml_file = yaml.safe_load(yaml_start_data) yaml_dependencies = yaml_file["dependencies"] yaml_map = get_yaml_map_from(yaml_dependencies) toml_map = get_toml_map_from(toml_dependencies) yaml_result_data = pin_min_versions_to_yaml_file( yaml_map, toml_map, yaml_start_data ) if yaml_result_data != yaml_start_data: with open(curr_file, "w", encoding="utf-8") as f: f.write(yaml_result_data) ret |= 1 return ret
Pin minimum versions to CI dependencies. Pip dependencies are not pinned.
python
scripts/validate_min_versions_in_sync.py
49
[]
int
true
3
6.88
pandas-dev/pandas
47,362
unknown
false
sortedUniq
function sortedUniq(array) { return (array && array.length) ? baseSortedUniq(array) : []; }
This method is like `_.uniq` except that it's designed and optimized for sorted arrays. @static @memberOf _ @since 4.0.0 @category Array @param {Array} array The array to inspect. @returns {Array} Returns the new duplicate free array. @example _.sortedUniq([1, 1, 2]); // => [1, 2]
javascript
lodash.js
8,200
[ "array" ]
false
3
7.52
lodash/lodash
61,490
jsdoc
false
parseIso8601
private static Duration parseIso8601(String value) { try { return Duration.parse(value); } catch (Exception ex) { throw new IllegalArgumentException("'" + value + "' is not a valid ISO-8601 duration", ex); } }
Detect the style then parse the value to return a duration. @param value the value to parse @param unit the duration unit to use if the value doesn't specify one ({@code null} will default to ms) @return the parsed duration @throws IllegalArgumentException if the value is not a known style or cannot be parsed
java
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatterUtils.java
151
[ "value" ]
Duration
true
2
7.76
spring-projects/spring-framework
59,386
javadoc
false
existsSync
function existsSync(path) { try { path = getValidatedPath(path); } catch (err) { if (showExistsDeprecation && err?.code === 'ERR_INVALID_ARG_TYPE') { process.emitWarning( 'Passing invalid argument types to fs.existsSync is deprecated', 'DeprecationWarning', 'DEP0187', ); showExistsDeprecation = false; } return false; } return binding.existsSync(path); }
Synchronously tests whether or not the given path exists. @param {string | Buffer | URL} path @returns {boolean}
javascript
lib/fs.js
273
[ "path" ]
false
4
6.24
nodejs/node
114,839
jsdoc
false
packAsBinary
public static void packAsBinary(int[] vector, byte[] packed) { if (packed.length * Byte.SIZE < vector.length) { throw new IllegalArgumentException("packed array is too small: " + packed.length * Byte.SIZE + " < " + vector.length); } IMPL.packAsBinary(vector, packed); }
Packs the provided int array populated with "0" and "1" values into a byte array. @param vector the int array to pack, must contain only "0" and "1" values. @param packed the byte array to store the packed result, must be large enough to hold the packed data.
java
libs/simdvec/src/main/java/org/elasticsearch/simdvec/ESVectorUtil.java
389
[ "vector", "packed" ]
void
true
2
7.04
elastic/elasticsearch
75,680
javadoc
false
addAndGet
public double addAndGet(final Number operand) { this.value += operand.doubleValue(); return value; }
Increments this instance's value by {@code operand}; this method returns the value associated with the instance immediately after the addition operation. This method is not thread safe. @param operand the quantity to add, not null. @throws NullPointerException if {@code operand} is null. @return the value associated with this instance after adding the operand. @since 3.5
java
src/main/java/org/apache/commons/lang3/mutable/MutableDouble.java
127
[ "operand" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
getStrategy
static Strategy getStrategy(final int tokenLen) { switch (tokenLen) { case 1: return ISO_8601_1_STRATEGY; case 2: return ISO_8601_2_STRATEGY; case 3: return ISO_8601_3_STRATEGY; default: throw new IllegalArgumentException("invalid number of X"); } }
Factory method for ISO8601TimeZoneStrategies. @param tokenLen a token indicating the length of the TimeZone String to be formatted. @return a ISO8601TimeZoneStrategy that can format TimeZone String of length {@code tokenLen}. If no such strategy exists, an IllegalArgumentException will be thrown.
java
src/main/java/org/apache/commons/lang3/time/FastDateParser.java
212
[ "tokenLen" ]
Strategy
true
1
6.4
apache/commons-lang
2,896
javadoc
false
valueOf
public static String valueOf(final char[] value) { return value == null ? null : String.valueOf(value); }
Returns the string representation of the {@code char} array or null. @param value the character array. @return a String or null. @see String#valueOf(char[]) @since 3.9
java
src/main/java/org/apache/commons/lang3/StringUtils.java
9,049
[ "value" ]
String
true
2
8
apache/commons-lang
2,896
javadoc
false