function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
calculate_range
def calculate_range(dtype: torch.dtype) -> tuple: """ Calculate the range of values for a given torch.dtype. Args: dtype (torch.dtype): The input dtype. Returns: tuple: A tuple containing the minimum and maximum values. """ info = torch.finfo(dtype) return info.min, info.max
Calculate the range of values for a given torch.dtype. Args: dtype (torch.dtype): The input dtype. Returns: tuple: A tuple containing the minimum and maximum values.
python
torch/_functorch/partitioners.py
597
[ "dtype" ]
tuple
true
1
6.72
pytorch/pytorch
96,034
google
false
createEntrySet
@Override ImmutableSet<Entry<K, V>> createEntrySet() { final class EntrySet extends ImmutableMapEntrySet<K, V> { @Override public UnmodifiableIterator<Entry<K, V>> iterator() { return asList().iterator(); } @Override ImmutableList<Entry<K, V>> createAsList() { return new ImmutableList<Entry<K, V>>() { @Override public Entry<K, V> get(int index) { return new AbstractMap.SimpleImmutableEntry<>( keySet.asList().get(index), valueList.get(index)); } @Override boolean isPartialView() { return true; } @Override public int size() { return ImmutableSortedMap.this.size(); } // redeclare to help optimizers with b/310253115 @SuppressWarnings("RedundantOverride") @Override @J2ktIncompatible @GwtIncompatible Object writeReplace() { return super.writeReplace(); } }; } @Override ImmutableMap<K, V> map() { return ImmutableSortedMap.this; } // redeclare to help optimizers with b/310253115 @SuppressWarnings("RedundantOverride") @Override @J2ktIncompatible @GwtIncompatible Object writeReplace() { return super.writeReplace(); } } return isEmpty() ? ImmutableSet.of() : new EntrySet(); }
Returns an immutable set of the mappings in this map, sorted by the key ordering.
java
android/guava/src/com/google/common/collect/ImmutableSortedMap.java
850
[]
true
2
7.2
google/guava
51,352
javadoc
false
hashDelete
function hashDelete(key) { var result = this.has(key) && delete this.__data__[key]; this.size -= result ? 1 : 0; return result; }
Removes `key` and its value from the hash. @private @name delete @memberOf Hash @param {Object} hash The hash to modify. @param {string} key The key of the value to remove. @returns {boolean} Returns `true` if the entry was removed, else `false`.
javascript
lodash.js
1,979
[ "key" ]
false
3
6.08
lodash/lodash
61,490
jsdoc
false
Core
explicit Core(Try<T>&& t) : CoreBase(State::OnlyResult, 1) { new (&this->result_) Result(std::move(t)); }
This can not be called concurrently with setResult().
cpp
folly/futures/detail/Core.h
688
[]
true
2
6.64
facebook/folly
30,157
doxygen
false
parse_arguments
def parse_arguments() -> argparse.Namespace: """ Parses command-line arguments using argparse. Returns: argparse.Namespace: The parsed arguments containing the PR number, optional target directory, and strip count. """ parser = argparse.ArgumentParser( description=( "Download and apply a Pull Request (PR) patch from the PyTorch GitHub repository " "to your local PyTorch installation.\n\n" "Best Practice: Since this script involves hot-patching PyTorch, it's recommended to use " "a disposable environment like a Docker container or a dedicated Python virtual environment (venv). " "This ensures that if the patching fails, you can easily recover by resetting the environment." ), epilog=( "Example:\n" " python nightly_hotpatch.py 12345\n" " python nightly_hotpatch.py 12345 --directory /path/to/pytorch --strip 1\n\n" "These commands will download the patch for PR #12345 and apply it to your local " "PyTorch installation." ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "PR_NUMBER", type=int, help="The number of the Pull Request (PR) from the PyTorch GitHub repository to download and apply as a patch.", ) parser.add_argument( "--directory", "-d", type=str, default=None, help="Optional. Specify the target directory to apply the patch. " "If not provided, the script will use the PyTorch installation path.", ) parser.add_argument( "--strip", "-p", type=int, default=1, help="Optional. Specify the strip count to remove leading directories from file paths in the patch. Default is 1.", ) return parser.parse_args()
Parses command-line arguments using argparse. Returns: argparse.Namespace: The parsed arguments containing the PR number, optional target directory, and strip count.
python
tools/nightly_hotpatch.py
13
[]
argparse.Namespace
true
1
6.24
pytorch/pytorch
96,034
unknown
false
_filter_header
def _filter_header(s): """Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : string Npy file header. Returns ------- header : str Cleaned up header. """ import tokenize from io import StringIO tokens = [] last_token_was_number = False for token in tokenize.generate_tokens(StringIO(s).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and token_type == tokenize.NAME and token_string == "L"): continue else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) return tokenize.untokenize(tokens)
Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : string Npy file header. Returns ------- header : str Cleaned up header.
python
numpy/lib/_format_impl.py
586
[ "s" ]
false
6
6.08
numpy/numpy
31,054
numpy
false
toBoolean
public Boolean toBoolean() { return Boolean.valueOf(booleanValue()); }
Gets this mutable as an instance of Boolean. @return a Boolean instance containing the value from this mutable, never null @since 2.5
java
src/main/java/org/apache/commons/lang3/mutable/MutableBoolean.java
198
[]
Boolean
true
1
6.8
apache/commons-lang
2,896
javadoc
false
combine
def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable | None = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is not present at some index from one of the two Series being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({"falcon": 330.0, "eagle": 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name res_values = self.array._cast_pointwise_result(new_values) return self._constructor( res_values, dtype=res_values.dtype, index=new_index, name=new_name, copy=False, )
Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is not present at some index from one of the two Series being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({"falcon": 330.0, "eagle": 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64
python
pandas/core/series.py
3,165
[ "self", "other", "func", "fill_value" ]
Series
true
5
8.56
pandas-dev/pandas
47,362
numpy
false
getPointcut
private @Nullable AspectJExpressionPointcut getPointcut(Method candidateAdviceMethod, Class<?> candidateAspectClass) { AspectJAnnotation aspectJAnnotation = AbstractAspectJAdvisorFactory.findAspectJAnnotationOnMethod(candidateAdviceMethod); if (aspectJAnnotation == null) { return null; } AspectJExpressionPointcut ajexp = new AspectJExpressionPointcut(candidateAspectClass, new String[0], new Class<?>[0]); ajexp.setExpression(aspectJAnnotation.getPointcutExpression()); if (this.beanFactory != null) { ajexp.setBeanFactory(this.beanFactory); } return ajexp; }
Build a {@link org.springframework.aop.aspectj.DeclareParentsAdvisor} for the given introduction field. <p>Resulting Advisors will need to be evaluated for targets. @param introductionField the field to introspect @return the Advisor instance, or {@code null} if not an Advisor
java
spring-aop/src/main/java/org/springframework/aop/aspectj/annotation/ReflectiveAspectJAdvisorFactory.java
224
[ "candidateAdviceMethod", "candidateAspectClass" ]
AspectJExpressionPointcut
true
3
7.44
spring-projects/spring-framework
59,386
javadoc
false
isposinf
def isposinf(x, out=None): """ Test element-wise for positive infinity, return result as bool array. Parameters ---------- x : array_like The input array. out : array_like, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a freshly-allocated boolean array is returned. Returns ------- out : ndarray A boolean array with the same dimensions as the input. If second argument is not supplied then a boolean array is returned with values True where the corresponding element of the input is positive infinity and values False where the element of the input is not positive infinity. If a second argument is supplied the result is stored there. If the type of that array is a numeric type the result is represented as zeros and ones, if the type is boolean then as False and True. The return value `out` is then a reference to that array. See Also -------- isinf, isneginf, isfinite, isnan Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Errors result if the second argument is also supplied when x is a scalar input, if first and second arguments have different shapes, or if the first argument has complex values Examples -------- >>> import numpy as np >>> np.isposinf(np.inf) True >>> np.isposinf(-np.inf) False >>> np.isposinf([-np.inf, 0., np.inf]) array([False, False, True]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) >>> np.isposinf(x, y) array([0, 0, 1]) >>> y array([0, 0, 1]) """ is_inf = nx.isinf(x) try: signbit = ~nx.signbit(x) except TypeError as e: dtype = nx.asanyarray(x).dtype raise TypeError(f'This operation is not supported for {dtype} values ' 'because it would be ambiguous.') from e else: return nx.logical_and(is_inf, signbit, out)
Test element-wise for positive infinity, return result as bool array. Parameters ---------- x : array_like The input array. out : array_like, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a freshly-allocated boolean array is returned. Returns ------- out : ndarray A boolean array with the same dimensions as the input. If second argument is not supplied then a boolean array is returned with values True where the corresponding element of the input is positive infinity and values False where the element of the input is not positive infinity. If a second argument is supplied the result is stored there. If the type of that array is a numeric type the result is represented as zeros and ones, if the type is boolean then as False and True. The return value `out` is then a reference to that array. See Also -------- isinf, isneginf, isfinite, isnan Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). Errors result if the second argument is also supplied when x is a scalar input, if first and second arguments have different shapes, or if the first argument has complex values Examples -------- >>> import numpy as np >>> np.isposinf(np.inf) True >>> np.isposinf(-np.inf) False >>> np.isposinf([-np.inf, 0., np.inf]) array([False, False, True]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([2, 2, 2]) >>> np.isposinf(x, y) array([0, 0, 1]) >>> y array([0, 0, 1])
python
numpy/lib/_ufunclike_impl.py
63
[ "x", "out" ]
false
2
7.44
numpy/numpy
31,054
numpy
false
filter
def filter( self, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> Self: """ Subset the DataFrame or Series according to the specified index labels. For DataFrame, filter rows or columns depending on ``axis`` argument. Note that this routine does not filter based on content. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or 'index', 1 or 'columns', None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for ``DataFrame``. For ``Series`` this parameter is unused and defaults to ``None``. Returns ------- Same type as caller The filtered subset of the DataFrame or Series. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame( ... np.array(([1, 2, 3], [4, 5, 6])), ... index=["mouse", "rabbit"], ... columns=["one", "two", "three"], ... ) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=["one", "three"]) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex="e$", axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like="bbi", axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) items = Index(items).intersection(labels) if len(items) == 0: # Keep the dtype of labels when we are empty items = items.astype(labels.dtype) # error: Keywords must be strings return self.reindex(**{name: items}) # type: ignore[misc] elif like: def f(x) -> bool: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`")
Subset the DataFrame or Series according to the specified index labels. For DataFrame, filter rows or columns depending on ``axis`` argument. Note that this routine does not filter based on content. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or 'index', 1 or 'columns', None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for ``DataFrame``. For ``Series`` this parameter is unused and defaults to ``None``. Returns ------- Same type as caller The filtered subset of the DataFrame or Series. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame( ... np.array(([1, 2, 3], [4, 5, 6])), ... index=["mouse", "rabbit"], ... columns=["one", "two", "three"], ... ) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=["one", "three"]) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex="e$", axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like="bbi", axis=0) one two three rabbit 4 5 6
python
pandas/core/generic.py
5,518
[ "self", "items", "like", "regex", "axis" ]
Self
true
8
8.56
pandas-dev/pandas
47,362
numpy
false
get_partition_cudagraph_metadata
def get_partition_cudagraph_metadata( partition_map: GraphPartitionMap, metadata: CudagraphMetadata, ) -> CudagraphMetadata: """ Convert the cudagraph metadata at the graph level to the graph partition level, given the graph partition info (i.e., mapping from partition input/output index to graph input/output index). """ partition_placeholders = [] partition_static_input_idxs: OrderedSet[int] = OrderedSet() partition_mutated_input_idxs: OrderedSet[int] = OrderedSet() for partition_input_idx, graph_input_idx in enumerate( partition_map.input_index_mapping ): if graph_input_idx in metadata.static_input_idxs: partition_static_input_idxs.add(partition_input_idx) if graph_input_idx in metadata.mutated_input_idxs: partition_mutated_input_idxs.add(partition_input_idx) if graph_input_idx is not None: placeholder = metadata.placeholders[graph_input_idx] else: # create a dummy placeholder info since this partition input is not a graph input placeholder = PlaceholderInfo( name=f"partition_{partition_map.id}_placeholder_{partition_input_idx}", stack_trace=None, users=[], mutating_use_stack_trace=None, ) partition_placeholders.append(placeholder) partition_stack_traces = [] for graph_output_idx in partition_map.output_index_mapping: if graph_output_idx is not None: partition_stack_traces.append(metadata.stack_traces[graph_output_idx]) else: partition_stack_traces.append(None) partition_constants = { name: metadata.constants[name] for name in partition_map.constant_names } return CudagraphMetadata( partition_placeholders, partition_static_input_idxs, partition_mutated_input_idxs, partition_stack_traces, partition_constants, )
Convert the cudagraph metadata at the graph level to the graph partition level, given the graph partition info (i.e., mapping from partition input/output index to graph input/output index).
python
torch/_inductor/cudagraph_utils.py
373
[ "partition_map", "metadata" ]
CudagraphMetadata
true
9
6
pytorch/pytorch
96,034
unknown
false
open
JSONStringer open(Scope empty, String openBracket) throws JSONException { if (this.stack.isEmpty() && !this.out.isEmpty()) { throw new JSONException("Nesting problem: multiple top-level roots"); } beforeValue(); this.stack.add(empty); this.out.append(openBracket); return this; }
Enters a new scope by appending any necessary whitespace and the given bracket. @param empty any necessary whitespace @param openBracket the open bracket @return this object @throws JSONException if processing of json failed
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONStringer.java
175
[ "empty", "openBracket" ]
JSONStringer
true
3
7.6
spring-projects/spring-boot
79,428
javadoc
false
batches
Iterable<? extends RecordBatch> batches();
Get the record batches. Note that the signature allows subclasses to return a more specific batch type. This enables optimizations such as in-place offset assignment (see for example {@link DefaultRecordBatch}), and partial reading of record data, see {@link FileLogInputStream.FileChannelRecordBatch#magic()}. @return An iterator over the record batches of the log
java
clients/src/main/java/org/apache/kafka/common/record/Records.java
64
[]
true
1
6
apache/kafka
31,560
javadoc
false
sendJoinGroupRequest
RequestFuture<ByteBuffer> sendJoinGroupRequest() { if (coordinatorUnknown()) return RequestFuture.coordinatorNotAvailable(); // send a join group request to the coordinator log.info("(Re-)joining group"); JoinGroupRequest.Builder requestBuilder = new JoinGroupRequest.Builder( new JoinGroupRequestData() .setGroupId(rebalanceConfig.groupId) .setSessionTimeoutMs(this.rebalanceConfig.sessionTimeoutMs) .setMemberId(this.generation.memberId) .setGroupInstanceId(this.rebalanceConfig.groupInstanceId.orElse(null)) .setProtocolType(protocolType()) .setProtocols(metadata()) .setRebalanceTimeoutMs(this.rebalanceConfig.rebalanceTimeoutMs) .setReason(JoinGroupRequest.maybeTruncateReason(this.rejoinReason)) ); log.debug("Sending JoinGroup ({}) to coordinator {}", requestBuilder, this.coordinator); // Note that we override the request timeout using the rebalance timeout since that is the // maximum time that it may block on the coordinator. We add an extra 5 seconds for small delays. int joinGroupTimeoutMs = Math.max( client.defaultRequestTimeoutMs(), Math.max( rebalanceConfig.rebalanceTimeoutMs + JOIN_GROUP_TIMEOUT_LAPSE, rebalanceConfig.rebalanceTimeoutMs) // guard against overflow since rebalance timeout can be MAX_VALUE ); return client.send(coordinator, requestBuilder, joinGroupTimeoutMs) .compose(new JoinGroupResponseHandler(generation)); }
Join the group and return the assignment for the next generation. This function handles both JoinGroup and SyncGroup, delegating to {@link #onLeaderElected(String, String, List, boolean)} if elected leader by the coordinator. NOTE: This is visible only for testing @return A request future which wraps the assignment returned from the group leader
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
606
[]
true
2
7.92
apache/kafka
31,560
javadoc
false
getSharedInstance
public static ConversionService getSharedInstance() { ApplicationConversionService sharedInstance = ApplicationConversionService.sharedInstance; if (sharedInstance == null) { synchronized (ApplicationConversionService.class) { sharedInstance = ApplicationConversionService.sharedInstance; if (sharedInstance == null) { sharedInstance = new ApplicationConversionService(null, true); ApplicationConversionService.sharedInstance = sharedInstance; } } } return sharedInstance; }
Return a shared default application {@code ConversionService} instance, lazily building it once needed. <p> Note: This method actually returns an {@link ApplicationConversionService} instance. However, the {@code ConversionService} signature has been preserved for binary compatibility. @return the shared {@code ApplicationConversionService} instance (never {@code null})
java
core/spring-boot/src/main/java/org/springframework/boot/convert/ApplicationConversionService.java
202
[]
ConversionService
true
3
7.44
spring-projects/spring-boot
79,428
javadoc
false
loadManifest
private Object loadManifest() throws IOException { File file = new File(this.rootDirectory, "META-INF/MANIFEST.MF"); if (!file.exists()) { return NO_MANIFEST; } try (FileInputStream inputStream = new FileInputStream(file)) { return new Manifest(inputStream); } }
Create a new {@link ExplodedArchive} instance. @param rootDirectory the root directory
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/ExplodedArchive.java
76
[]
Object
true
2
6.08
spring-projects/spring-boot
79,428
javadoc
false
copyIncludes
private EnumSet<Include> copyIncludes() { return (this.includes.isEmpty()) ? EnumSet.noneOf(Include.class) : EnumSet.copyOf(this.includes); }
Remove elements from the given map if they are not included in this set of options. @param map the map to update @since 3.2.7
java
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java
98
[]
true
2
6.96
spring-projects/spring-boot
79,428
javadoc
false
build
@Override public ImmutableSortedMultiset<E> build() { dedupAndCoalesceAndDeleteEmpty(); if (length == 0) { return emptyMultiset(comparator); } RegularImmutableSortedSet<E> elementSet = (RegularImmutableSortedSet<E>) ImmutableSortedSet.construct(comparator, length, elements); long[] cumulativeCounts = new long[length + 1]; for (int i = 0; i < length; i++) { cumulativeCounts[i + 1] = cumulativeCounts[i] + counts[i]; } forceCopyElements = true; return new RegularImmutableSortedMultiset<E>(elementSet, cumulativeCounts, 0, length); }
Returns a newly-created {@code ImmutableSortedMultiset} based on the contents of the {@code Builder}.
java
android/guava/src/com/google/common/collect/ImmutableSortedMultiset.java
689
[]
true
3
6.08
google/guava
51,352
javadoc
false
getNestedJarEntry
private NestedJarEntry getNestedJarEntry(String name) { Objects.requireNonNull(name, "name"); NestedJarEntry lastEntry = this.lastEntry; if (lastEntry != null && name.equals(lastEntry.getName())) { return lastEntry; } ZipContent.Entry entry = getVersionedContentEntry(name); entry = (entry != null) ? entry : getContentEntry(null, name); if (entry == null) { return null; } NestedJarEntry nestedJarEntry = new NestedJarEntry(entry, name); this.lastEntry = nestedJarEntry; return nestedJarEntry; }
Return if an entry with the given name exists. @param name the name to check @return if the entry exists
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFile.java
257
[ "name" ]
NestedJarEntry
true
5
8.08
spring-projects/spring-boot
79,428
javadoc
false
_unbox_scalar
def _unbox_scalar( self, value: DTScalarOrNaT ) -> np.int64 | np.datetime64 | np.timedelta64: """ Unbox the integer value of a scalar `value`. Parameters ---------- value : Period, Timestamp, Timedelta, or NaT Depending on subclass. Returns ------- int Examples -------- >>> arr = pd.array(np.array(["1970-01-01"], "datetime64[ns]")) >>> arr._unbox_scalar(arr[0]) np.datetime64('1970-01-01T00:00:00.000000000') """ raise AbstractMethodError(self)
Unbox the integer value of a scalar `value`. Parameters ---------- value : Period, Timestamp, Timedelta, or NaT Depending on subclass. Returns ------- int Examples -------- >>> arr = pd.array(np.array(["1970-01-01"], "datetime64[ns]")) >>> arr._unbox_scalar(arr[0]) np.datetime64('1970-01-01T00:00:00.000000000')
python
pandas/core/arrays/datetimelike.py
258
[ "self", "value" ]
np.int64 | np.datetime64 | np.timedelta64
true
1
6.64
pandas-dev/pandas
47,362
numpy
false
is_keys_unchanged_async
async def is_keys_unchanged_async( self, client: AioBaseClient, bucket_name: str, prefix: str, inactivity_period: float = 60 * 60, min_objects: int = 1, previous_objects: set[str] | None = None, inactivity_seconds: int = 0, allow_delete: bool = True, last_activity_time: datetime | None = None, ) -> dict[str, Any]: """ Check if new objects have been uploaded and the period has passed; update sensor state accordingly. :param client: aiobotocore client :param bucket_name: the name of the bucket :param prefix: a key prefix :param inactivity_period: the total seconds of inactivity to designate keys unchanged. Note, this mechanism is not real time and this operator may not return until a poke_interval after this period has passed with no additional objects sensed. :param min_objects: the minimum number of objects needed for keys unchanged sensor to be considered valid. :param previous_objects: the set of object ids found during the last poke. :param inactivity_seconds: number of inactive seconds :param allow_delete: Should this sensor consider objects being deleted between pokes valid behavior. If true a warning message will be logged when this happens. If false an error will be raised. :param last_activity_time: last activity datetime. """ if not previous_objects: previous_objects = set() list_keys = await self._list_keys_async(client=client, bucket_name=bucket_name, prefix=prefix) current_objects = set(list_keys) current_num_objects = len(current_objects) if current_num_objects > len(previous_objects): # When new objects arrived, reset the inactivity_seconds # and update previous_objects for the next poke. self.log.info( "New objects found at %s, resetting last_activity_time.", os.path.join(bucket_name, prefix), ) self.log.debug("New objects: %s", current_objects - previous_objects) last_activity_time = datetime.now() inactivity_seconds = 0 previous_objects = current_objects return { "status": "pending", "previous_objects": previous_objects, "last_activity_time": last_activity_time, "inactivity_seconds": inactivity_seconds, } if len(previous_objects) - len(current_objects): # During the last poke interval objects were deleted. if allow_delete: deleted_objects = previous_objects - current_objects previous_objects = current_objects last_activity_time = datetime.now() self.log.info( "Objects were deleted during the last poke interval. Updating the " "file counter and resetting last_activity_time:\n%s", deleted_objects, ) return { "status": "pending", "previous_objects": previous_objects, "last_activity_time": last_activity_time, "inactivity_seconds": inactivity_seconds, } return { "status": "error", "message": f"{os.path.join(bucket_name, prefix)} between pokes.", } if last_activity_time: inactivity_seconds = int( (datetime.now(last_activity_time.tzinfo) - last_activity_time).total_seconds() ) else: # Handles the first poke where last inactivity time is None. last_activity_time = datetime.now() inactivity_seconds = 0 if inactivity_seconds >= inactivity_period: path = os.path.join(bucket_name, prefix) if current_num_objects >= min_objects: success_message = ( f"SUCCESS: Sensor found {current_num_objects} objects at {path}. " "Waited at least {inactivity_period} seconds, with no new objects uploaded." ) self.log.info(success_message) return { "status": "success", "message": success_message, } self.log.error( "FAILURE: Inactivity Period passed, not enough objects found in %s", path, ) return { "status": "pending", "previous_objects": previous_objects, "last_activity_time": last_activity_time, "inactivity_seconds": inactivity_seconds, }
Check if new objects have been uploaded and the period has passed; update sensor state accordingly. :param client: aiobotocore client :param bucket_name: the name of the bucket :param prefix: a key prefix :param inactivity_period: the total seconds of inactivity to designate keys unchanged. Note, this mechanism is not real time and this operator may not return until a poke_interval after this period has passed with no additional objects sensed. :param min_objects: the minimum number of objects needed for keys unchanged sensor to be considered valid. :param previous_objects: the set of object ids found during the last poke. :param inactivity_seconds: number of inactive seconds :param allow_delete: Should this sensor consider objects being deleted between pokes valid behavior. If true a warning message will be logged when this happens. If false an error will be raised. :param last_activity_time: last activity datetime.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
724
[ "self", "client", "bucket_name", "prefix", "inactivity_period", "min_objects", "previous_objects", "inactivity_seconds", "allow_delete", "last_activity_time" ]
dict[str, Any]
true
9
6.64
apache/airflow
43,597
sphinx
false
newSample
@Override protected HistogramSample newSample(long timeMs) { return new HistogramSample(binScheme, timeMs); }
Return the computed frequency describing the number of occurrences of the values in the bucket for the given center point, relative to the total number of occurrences in the samples. @param config the metric configuration @param now the current time in milliseconds @param centerValue the value corresponding to the center point of the bucket @return the frequency of the values in the bucket relative to the total number of samples
java
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java
161
[ "timeMs" ]
HistogramSample
true
1
6.32
apache/kafka
31,560
javadoc
false
lastIndexOf
public int lastIndexOf(final String str) { return lastIndexOf(str, size - 1); }
Searches the string builder to find the last reference to the specified string. <p> Note that a null input string will return -1, whereas the JDK throws an exception. </p> @param str the string to find, null returns -1 @return the last index of the string, or -1 if not found
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
2,345
[ "str" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
hermfromroots
def hermfromroots(roots): """ Generate a Hermite series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Hermite form, where the :math:`r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Hermite form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- numpy.polynomial.polynomial.polyfromroots numpy.polynomial.legendre.legfromroots numpy.polynomial.laguerre.lagfromroots numpy.polynomial.chebyshev.chebfromroots numpy.polynomial.hermite_e.hermefromroots Examples -------- >>> from numpy.polynomial.hermite import hermfromroots, hermval >>> coef = hermfromroots((-1, 0, 1)) >>> hermval((-1, 0, 1), coef) array([0., 0., 0.]) >>> coef = hermfromroots((-1j, 1j)) >>> hermval((-1j, 1j), coef) array([0.+0.j, 0.+0.j]) """ return pu._fromroots(hermline, hermmul, roots)
Generate a Hermite series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Hermite form, where the :math:`r_n` are the roots specified in `roots`. If a zero has multiplicity n, then it must appear in `roots` n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are `c`, then .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Hermite form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then `out` is a real array, if some of the roots are complex, then `out` is complex even if all the coefficients in the result are real (see Examples below). See Also -------- numpy.polynomial.polynomial.polyfromroots numpy.polynomial.legendre.legfromroots numpy.polynomial.laguerre.lagfromroots numpy.polynomial.chebyshev.chebfromroots numpy.polynomial.hermite_e.hermefromroots Examples -------- >>> from numpy.polynomial.hermite import hermfromroots, hermval >>> coef = hermfromroots((-1, 0, 1)) >>> hermval((-1, 0, 1), coef) array([0., 0., 0.]) >>> coef = hermfromroots((-1j, 1j)) >>> hermval((-1j, 1j), coef) array([0.+0.j, 0.+0.j])
python
numpy/polynomial/hermite.py
256
[ "roots" ]
false
1
6.32
numpy/numpy
31,054
numpy
false
fallback_to_default_project_endpoint
def fallback_to_default_project_endpoint(func: Callable[..., RT]) -> Callable[..., RT]: """ Provide fallback for MaxCompute project and endpoint to be used as a decorator. If the project or endpoint is None it will be replaced with the project from the connection extra definition. :param func: function to wrap :return: result of the function call """ @functools.wraps(func) def inner_wrapper(self, **kwargs) -> RT: required_args = ("project", "endpoint") for arg_name in required_args: # Use the value from kwargs if it is provided and value is not None, otherwise use the # value from the connection extra property. kwargs[arg_name] = getattr(self, arg_name) if kwargs.get(arg_name) is None else kwargs[arg_name] if not kwargs[arg_name]: raise MaxComputeConfigurationException( f'"{arg_name}" must be passed either as ' "keyword parameter or as extra " "in the MaxCompute connection definition. Both are not set!" ) return func(self, **kwargs) return inner_wrapper
Provide fallback for MaxCompute project and endpoint to be used as a decorator. If the project or endpoint is None it will be replaced with the project from the connection extra definition. :param func: function to wrap :return: result of the function call
python
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/maxcompute.py
34
[ "func" ]
Callable[..., RT]
true
4
8.08
apache/airflow
43,597
sphinx
false
iterator
@Override public Iterator<ConditionAndOutcome> iterator() { return Collections.unmodifiableSet(this.outcomes).iterator(); }
Return a {@link Stream} of the {@link ConditionAndOutcome} items. @return a stream of the {@link ConditionAndOutcome} items. @since 3.5.0
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReport.java
253
[]
true
1
6.32
spring-projects/spring-boot
79,428
javadoc
false
start_go_pipeline_with_binary
def start_go_pipeline_with_binary( self, variables: dict, launcher_binary: str, worker_binary: str, process_line_callback: Callable[[str], None] | None = None, ) -> None: """ Start Apache Beam Go pipeline with an executable binary. :param variables: Variables passed to the job. :param launcher_binary: Path to the binary compiled for the launching platform. :param worker_binary: Path to the binary compiled for the worker platform. :param process_line_callback: (optional) Callback that can be used to process each line of the stdout and stderr file descriptors. """ job_variables = copy.deepcopy(variables) if "labels" in job_variables: job_variables["labels"] = json.dumps(job_variables["labels"], separators=(",", ":")) job_variables["worker_binary"] = worker_binary command_prefix = [launcher_binary] self._start_pipeline( variables=job_variables, command_prefix=command_prefix, process_line_callback=process_line_callback, )
Start Apache Beam Go pipeline with an executable binary. :param variables: Variables passed to the job. :param launcher_binary: Path to the binary compiled for the launching platform. :param worker_binary: Path to the binary compiled for the worker platform. :param process_line_callback: (optional) Callback that can be used to process each line of the stdout and stderr file descriptors.
python
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
406
[ "self", "variables", "launcher_binary", "worker_binary", "process_line_callback" ]
None
true
2
6.72
apache/airflow
43,597
sphinx
false
onStart
default <T> @Nullable Bindable<T> onStart(ConfigurationPropertyName name, Bindable<T> target, BindContext context) { return target; }
Called when binding of an element starts but before any result has been determined. @param <T> the bindable source type @param name the name of the element being bound @param target the item being bound @param context the bind context @return the actual item that should be used for binding (may be {@code null})
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindHandler.java
48
[ "name", "target", "context" ]
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
ensure_python_int
def ensure_python_int(value: int | np.integer) -> int: """ Ensure that a value is a python int. Parameters ---------- value: int or numpy.integer Returns ------- int Raises ------ TypeError: if the value isn't an int or can't be converted to one. """ if not (is_integer(value) or is_float(value)): if not is_scalar(value): raise TypeError( f"Value needs to be a scalar value, was type {type(value).__name__}" ) raise TypeError(f"Wrong type {type(value)} for value {value}") try: new_value = int(value) assert new_value == value except (TypeError, ValueError, AssertionError) as err: raise TypeError(f"Wrong type {type(value)} for value {value}") from err return new_value
Ensure that a value is a python int. Parameters ---------- value: int or numpy.integer Returns ------- int Raises ------ TypeError: if the value isn't an int or can't be converted to one.
python
pandas/core/dtypes/common.py
96
[ "value" ]
int
true
4
6.72
pandas-dev/pandas
47,362
numpy
false
_write_dump_to_disk
def _write_dump_to_disk(self, dump: CacheDump) -> None: """Write the cache dump to disk. Writes the provided dump to the shared cache JSON file and logs the result. Args: dump: The cache dump to write. """ try: with open(self._shared_cache_filepath, "w") as f: json.dump(dump, f, indent=2) # Log the filepath if self._sub_key: logger.log( logging.INFO, "Memoizer cache (sub_key=%s) dumped to: %s", self._sub_key, self._shared_cache_filepath, ) else: logger.log( logging.INFO, "Memoizer cache dumped to: %s", self._shared_cache_filepath, ) except Exception as e: # If dumping fails, just log it and don't crash the program logger.log( logging.WARNING, "Warning: Failed to dump memoizer cache: %s", e, )
Write the cache dump to disk. Writes the provided dump to the shared cache JSON file and logs the result. Args: dump: The cache dump to write.
python
torch/_inductor/runtime/caching/interfaces.py
305
[ "self", "dump" ]
None
true
3
6.72
pytorch/pytorch
96,034
google
false
_gotitem
def _gotitem(self, key, ndim: int, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy( subset, self.keys, level=self.level, grouper=self._grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna, ) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy( subset, self.keys, level=self.level, grouper=self._grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna, ) raise AssertionError("invalid ndim for _gotitem")
sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on
python
pandas/core/groupby/generic.py
2,849
[ "self", "key", "ndim", "subset" ]
true
5
6.08
pandas-dev/pandas
47,362
numpy
false
reduce
function reduce(collection, iteratee, accumulator) { var func = isArray(collection) ? arrayReduce : baseReduce, initAccum = arguments.length < 3; return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEach); }
Reduces `collection` to a value which is the accumulated result of running each element in `collection` thru `iteratee`, where each successive invocation is supplied the return value of the previous. If `accumulator` is not given, the first element of `collection` is used as the initial value. The iteratee is invoked with four arguments: (accumulator, value, index|key, collection). Many lodash methods are guarded to work as iteratees for methods like `_.reduce`, `_.reduceRight`, and `_.transform`. The guarded methods are: `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`, and `sortBy` @static @memberOf _ @since 0.1.0 @category Collection @param {Array|Object} collection The collection to iterate over. @param {Function} [iteratee=_.identity] The function invoked per iteration. @param {*} [accumulator] The initial value. @returns {*} Returns the accumulated value. @see _.reduceRight @example _.reduce([1, 2], function(sum, n) { return sum + n; }, 0); // => 3 _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { (result[value] || (result[value] = [])).push(key); return result; }, {}); // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed)
javascript
lodash.js
9,784
[ "collection", "iteratee", "accumulator" ]
false
2
7.04
lodash/lodash
61,490
jsdoc
false
hasCycle
public static boolean hasCycle(Network<?, ?> network) { // In a directed graph, parallel edges cannot introduce a cycle in an acyclic graph. // However, in an undirected graph, any parallel edge induces a cycle in the graph. if (!network.isDirected() && network.allowsParallelEdges() && network.edges().size() > network.asGraph().edges().size()) { return true; } return hasCycle(network.asGraph()); }
Returns true if {@code network} has at least one cycle. A cycle is defined as a non-empty subset of edges in a graph arranged to form a path (a sequence of adjacent outgoing edges) starting and ending with the same node. <p>This method will detect any non-empty cycle, including self-loops (a cycle of length 1).
java
android/guava/src/com/google/common/graph/Graphs.java
86
[ "network" ]
true
4
6
google/guava
51,352
javadoc
false
createCellSet
@Override abstract ImmutableSet<Cell<R, C, V>> createCellSet();
A builder for creating immutable table instances, especially {@code public static final} tables ("constant tables"). Example: {@snippet : static final ImmutableTable<Integer, Character, String> SPREADSHEET = new ImmutableTable.Builder<Integer, Character, String>() .put(1, 'A', "foo") .put(1, 'B', "bar") .put(2, 'A', "baz") .buildOrThrow(); } <p>By default, the order in which cells are added to the builder determines the iteration ordering of all views in the returned table, with {@link #putAll} following the {@link Table#cellSet()} iteration order. However, if {@link #orderRowsBy} or {@link #orderColumnsBy} is called, the views are sorted by the supplied comparators. <p>For empty or single-cell immutable tables, {@link #of()} and {@link #of(Object, Object, Object)} are even more convenient. <p>Builder instances can be reused - it is safe to call {@link #buildOrThrow} multiple times to build multiple tables in series. Each table is a superset of the tables created before it. @since 11.0
java
android/guava/src/com/google/common/collect/ImmutableTable.java
306
[]
true
1
6.56
google/guava
51,352
javadoc
false
from_constructor
def from_constructor( cls, aws_conn_id: str | None, region_name: str | None, verify: bool | str | None, botocore_config: dict[str, Any] | None, additional_params: dict, ): """ Resolve generic AWS Hooks parameters in class constructor. Examples: .. code-block:: python class AwsFooBarOperator(BaseOperator): def __init__( self, *, aws_conn_id: str | None = "aws_default", region_name: str | None = None, verify: bool | str | None = None, botocore_config: dict | None = None, foo: str = "bar", **kwargs, ): params = AwsHookParams.from_constructor( aws_conn_id, region_name, verify, botocore_config, additional_params=kwargs ) super().__init__(**kwargs) self.aws_conn_id = params.aws_conn_id self.region_name = params.region_name self.verify = params.verify self.botocore_config = params.botocore_config self.foo = foo """ return cls(aws_conn_id, region_name, verify, botocore_config)
Resolve generic AWS Hooks parameters in class constructor. Examples: .. code-block:: python class AwsFooBarOperator(BaseOperator): def __init__( self, *, aws_conn_id: str | None = "aws_default", region_name: str | None = None, verify: bool | str | None = None, botocore_config: dict | None = None, foo: str = "bar", **kwargs, ): params = AwsHookParams.from_constructor( aws_conn_id, region_name, verify, botocore_config, additional_params=kwargs ) super().__init__(**kwargs) self.aws_conn_id = params.aws_conn_id self.region_name = params.region_name self.verify = params.verify self.botocore_config = params.botocore_config self.foo = foo
python
providers/amazon/src/airflow/providers/amazon/aws/utils/mixins.py
53
[ "cls", "aws_conn_id", "region_name", "verify", "botocore_config", "additional_params" ]
true
1
6.48
apache/airflow
43,597
unknown
false
of
static EnvironmentPostProcessorsFactory of(String... classNames) { return of(null, classNames); }
Return a {@link EnvironmentPostProcessorsFactory} that reflectively creates post processors from the given class names. @param classNames the post processor class names @return an {@link EnvironmentPostProcessorsFactory} instance
java
core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorsFactory.java
74
[]
EnvironmentPostProcessorsFactory
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
deleteHorizonMs
OptionalLong deleteHorizonMs();
Get the delete horizon, returns OptionalLong.EMPTY if the first timestamp is not the delete horizon @return timestamp of the delete horizon
java
clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java
220
[]
OptionalLong
true
1
6
apache/kafka
31,560
javadoc
false
toStringBase
protected String toStringBase() { return "owner='" + owner + '\'' + ", exponentialBackoff=" + exponentialBackoff + ", lastSentMs=" + lastSentMs + ", lastReceivedMs=" + lastReceivedMs + ", numAttempts=" + numAttempts + ", backoffMs=" + backoffMs + ", requestInFlight=" + requestInFlight; }
This method appends the instance variables together in a simple String of comma-separated key value pairs. This allows subclasses to include these values and not have to duplicate each variable, helping to prevent any variables from being omitted when new ones are added. @return String version of instance variables.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestState.java
149
[]
String
true
1
6.88
apache/kafka
31,560
javadoc
false
check_increasing
def check_increasing(x, y): """Determine whether y is monotonically correlated with x. y is found increasing or decreasing with respect to x based on a Spearman correlation test. Parameters ---------- x : array-like of shape (n_samples,) Training data. y : array-like of shape (n_samples,) Training target. Returns ------- increasing_bool : boolean Whether the relationship is increasing or decreasing. Notes ----- The Spearman correlation coefficient is estimated from the data, and the sign of the resulting estimate is used as the result. In the event that the 95% confidence interval based on Fisher transform spans zero, a warning is raised. References ---------- Fisher transformation. Wikipedia. https://en.wikipedia.org/wiki/Fisher_transformation Examples -------- >>> from sklearn.isotonic import check_increasing >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] >>> check_increasing(x, y) np.True_ >>> y = [10, 8, 6, 4, 2] >>> check_increasing(x, y) np.False_ """ # Calculate Spearman rho estimate and set return accordingly. rho, _ = spearmanr(x, y) increasing_bool = rho >= 0 # Run Fisher transform to get the rho CI, but handle rho=+/-1 if rho not in [-1.0, 1.0] and len(x) > 3: F = 0.5 * math.log((1.0 + rho) / (1.0 - rho)) F_se = 1 / math.sqrt(len(x) - 3) # Use a 95% CI, i.e., +/-1.96 S.E. # https://en.wikipedia.org/wiki/Fisher_transformation rho_0 = math.tanh(F - 1.96 * F_se) rho_1 = math.tanh(F + 1.96 * F_se) # Warn if the CI spans zero. if np.sign(rho_0) != np.sign(rho_1): warnings.warn( "Confidence interval of the Spearman " "correlation coefficient spans zero. " "Determination of ``increasing`` may be " "suspect." ) return increasing_bool
Determine whether y is monotonically correlated with x. y is found increasing or decreasing with respect to x based on a Spearman correlation test. Parameters ---------- x : array-like of shape (n_samples,) Training data. y : array-like of shape (n_samples,) Training target. Returns ------- increasing_bool : boolean Whether the relationship is increasing or decreasing. Notes ----- The Spearman correlation coefficient is estimated from the data, and the sign of the resulting estimate is used as the result. In the event that the 95% confidence interval based on Fisher transform spans zero, a warning is raised. References ---------- Fisher transformation. Wikipedia. https://en.wikipedia.org/wiki/Fisher_transformation Examples -------- >>> from sklearn.isotonic import check_increasing >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] >>> check_increasing(x, y) np.True_ >>> y = [10, 8, 6, 4, 2] >>> check_increasing(x, y) np.False_
python
sklearn/isotonic.py
31
[ "x", "y" ]
false
4
7.6
scikit-learn/scikit-learn
64,340
numpy
false
pollLast
@CanIgnoreReturnValue public @Nullable E pollLast() { return isEmpty() ? null : removeAndGet(getMaxElementIndex()); }
Removes and returns the greatest element of this queue, or returns {@code null} if the queue is empty.
java
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
367
[]
E
true
2
6.96
google/guava
51,352
javadoc
false
format_tags
def format_tags(source: Any, *, key_label: str = "Key", value_label: str = "Value"): """ Format tags for boto call which expect a given format. If given a dictionary, formats it as an array of objects with a key and a value field to be passed to boto calls that expect this format. Else, assumes that it's already in the right format and returns it as is. We do not validate the format here since it's done by boto anyway, and the error would not be clearer if thrown from here. :param source: a dict from which keys and values are read :param key_label: optional, the label to use for keys if not "Key" :param value_label: optional, the label to use for values if not "Value" """ if source is None: return [] if isinstance(source, dict): return [{key_label: kvp[0], value_label: kvp[1]} for kvp in source.items()] return source
Format tags for boto call which expect a given format. If given a dictionary, formats it as an array of objects with a key and a value field to be passed to boto calls that expect this format. Else, assumes that it's already in the right format and returns it as is. We do not validate the format here since it's done by boto anyway, and the error would not be clearer if thrown from here. :param source: a dict from which keys and values are read :param key_label: optional, the label to use for keys if not "Key" :param value_label: optional, the label to use for values if not "Value"
python
providers/amazon/src/airflow/providers/amazon/aws/utils/tags.py
22
[ "source", "key_label", "value_label" ]
true
3
7.04
apache/airflow
43,597
sphinx
false
compressionType
@Override public CompressionType compressionType() { return CompressionType.forId(attributes() & COMPRESSION_CODEC_MASK); }
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas. @return The base timestamp
java
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
216
[]
CompressionType
true
1
6.8
apache/kafka
31,560
javadoc
false
visitYieldExpression
function visitYieldExpression(node: YieldExpression) { if (enclosingFunctionFlags & FunctionFlags.Async && enclosingFunctionFlags & FunctionFlags.Generator) { if (node.asteriskToken) { const expression = visitNode(Debug.checkDefined(node.expression), visitor, isExpression); return setOriginalNode( setTextRange( factory.createYieldExpression( /*asteriskToken*/ undefined, emitHelpers().createAwaitHelper( factory.updateYieldExpression( node, node.asteriskToken, setTextRange( emitHelpers().createAsyncDelegatorHelper( setTextRange( emitHelpers().createAsyncValuesHelper(expression), expression, ), ), expression, ), ), ), ), node, ), node, ); } return setOriginalNode( setTextRange( factory.createYieldExpression( /*asteriskToken*/ undefined, createDownlevelAwait( node.expression ? visitNode(node.expression, visitor, isExpression) : factory.createVoidZero(), ), ), node, ), node, ); } return visitEachChild(node, visitor, context); }
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the expression of an `ExpressionStatement`).
typescript
src/compiler/transformers/es2018.ts
408
[ "node" ]
false
5
6.08
microsoft/TypeScript
107,154
jsdoc
false
excluded_combos
def excluded_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]: """ Return exclusion lists of elements that should be excluded from the matrix of the two list of items if what's left should be representative list of combos (i.e. each item from both lists, has to be present at least once in the combos). :param list_1: first list :param list_2: second list :return: list of exclusions = list 1 x list 2 - representative_combos """ all_combos: list[tuple[str, str]] = list(itertools.product(list_1, list_2)) return [item for item in all_combos if item not in set(representative_combos(list_1, list_2))]
Return exclusion lists of elements that should be excluded from the matrix of the two list of items if what's left should be representative list of combos (i.e. each item from both lists, has to be present at least once in the combos). :param list_1: first list :param list_2: second list :return: list of exclusions = list 1 x list 2 - representative_combos
python
dev/breeze/src/airflow_breeze/utils/exclude_from_matrix.py
36
[ "list_1", "list_2" ]
list[tuple[str, str]]
true
1
7.04
apache/airflow
43,597
sphinx
false
peek
ShareCompletedFetch peek() { lock.lock(); try { return completedFetches.peek(); } finally { lock.unlock(); } }
Returns {@code true} if there are no completed fetches pending to return to the user. @return {@code true} if the buffer is empty, {@code false} otherwise
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchBuffer.java
104
[]
ShareCompletedFetch
true
1
7.04
apache/kafka
31,560
javadoc
false
upgrade
def upgrade(): """Apply Change value column type to longblob in xcom table for mysql.""" conn = op.get_bind() if conn.dialect.name == "mysql": with op.batch_alter_table("xcom", schema=None) as batch_op: batch_op.alter_column("value", type_=sa.LargeBinary().with_variant(LONGBLOB, "mysql"))
Apply Change value column type to longblob in xcom table for mysql.
python
airflow-core/src/airflow/migrations/versions/0013_2_9_0_make_xcom_value_to_longblob_for_mysql.py
42
[]
false
2
6.24
apache/airflow
43,597
unknown
false
regionMatches
static boolean regionMatches(final CharSequence cs, final boolean ignoreCase, final int thisStart, final CharSequence substring, final int start, final int length) { if (cs instanceof String && substring instanceof String) { return ((String) cs).regionMatches(ignoreCase, thisStart, (String) substring, start, length); } int index1 = thisStart; int index2 = start; int tmpLen = length; // Extract these first so we detect NPEs the same as the java.lang.String version final int srcLen = cs.length() - thisStart; final int otherLen = substring.length() - start; // Check for invalid parameters if (thisStart < 0 || start < 0 || length < 0) { return false; } // Check that the regions are long enough if (srcLen < length || otherLen < length) { return false; } while (tmpLen-- > 0) { final char c1 = cs.charAt(index1++); final char c2 = substring.charAt(index2++); if (c1 == c2) { continue; } if (!ignoreCase) { return false; } // The real same check as in String#regionMatches(boolean, int, String, int, int): final char u1 = Character.toUpperCase(c1); final char u2 = Character.toUpperCase(c2); if (u1 != u2 && Character.toLowerCase(u1) != Character.toLowerCase(u2)) { return false; } } return true; }
Green implementation of regionMatches. @param cs the {@link CharSequence} to be processed. @param ignoreCase whether or not to be case-insensitive. @param thisStart the index to start on the {@code cs} CharSequence. @param substring the {@link CharSequence} to be looked for. @param start the index to start on the {@code substring} CharSequence. @param length character length of the region. @return whether the region matched. @see String#regionMatches(boolean, int, String, int, int)
java
src/main/java/org/apache/commons/lang3/CharSequenceUtils.java
295
[ "cs", "ignoreCase", "thisStart", "substring", "start", "length" ]
true
13
7.92
apache/commons-lang
2,896
javadoc
false
completeProxiedInterfaces
public static Class<?>[] completeProxiedInterfaces(AdvisedSupport advised) { return completeProxiedInterfaces(advised, false); }
Determine the complete set of interfaces to proxy for the given AOP configuration. <p>This will always add the {@link Advised} interface unless the AdvisedSupport's {@link AdvisedSupport#setOpaque "opaque"} flag is on. Always adds the {@link org.springframework.aop.SpringProxy} marker interface. @param advised the proxy config @return the complete set of interfaces to proxy @see SpringProxy @see Advised
java
spring-aop/src/main/java/org/springframework/aop/framework/AopProxyUtils.java
146
[ "advised" ]
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
read_log_chunks
def read_log_chunks( self, ti: TaskInstance | TaskInstanceHistory, try_number: int | None, metadata: LogMetadata, ) -> tuple[LogHandlerOutputStream, LogMetadata]: """ Read chunks of Task Instance logs. :param ti: The taskInstance :param try_number: :param metadata: A dictionary containing information about how to read the task log The following is an example of how to use this method to read log: .. code-block:: python logs, metadata = task_log_reader.read_log_chunks(ti, try_number, metadata) logs = logs[0] if try_number is not None else logs where task_log_reader is an instance of TaskLogReader. The metadata will always contain information about the task log which can enable you read logs to the end. """ if try_number == 0: msg = self.get_no_log_state_message(ti) # returns StructuredLogMessage # one message + tell the caller it's the end so stream stops return msg, {"end_of_log": True} return self.log_handler.read(ti, try_number, metadata=metadata)
Read chunks of Task Instance logs. :param ti: The taskInstance :param try_number: :param metadata: A dictionary containing information about how to read the task log The following is an example of how to use this method to read log: .. code-block:: python logs, metadata = task_log_reader.read_log_chunks(ti, try_number, metadata) logs = logs[0] if try_number is not None else logs where task_log_reader is an instance of TaskLogReader. The metadata will always contain information about the task log which can enable you read logs to the end.
python
airflow-core/src/airflow/utils/log/log_reader.py
77
[ "self", "ti", "try_number", "metadata" ]
tuple[LogHandlerOutputStream, LogMetadata]
true
2
6.88
apache/airflow
43,597
sphinx
false
visitNewExpression
function visitNewExpression(node: NewExpression): LeftHandSideExpression { if (some(node.arguments, isSpreadElement)) { // We are here because we contain a SpreadElementExpression. // [source] // new C(...a) // // [output] // new ((_a = C).bind.apply(_a, [void 0].concat(a)))() const { target, thisArg } = factory.createCallBinding(factory.createPropertyAccessExpression(node.expression, "bind"), hoistVariableDeclaration); return factory.createNewExpression( factory.createFunctionApplyCall( Debug.checkDefined(visitNode(target, visitor, isExpression)), thisArg, transformAndSpreadElements(factory.createNodeArray([factory.createVoidZero(), ...node.arguments!]), /*isArgumentList*/ true, /*multiLine*/ false, /*hasTrailingComma*/ false), ), /*typeArguments*/ undefined, [], ); } return visitEachChild(node, visitor, context); }
Visits a NewExpression that contains a spread element. @param node A NewExpression node.
typescript
src/compiler/transformers/es2015.ts
4,601
[ "node" ]
true
2
6.72
microsoft/TypeScript
107,154
jsdoc
false
leastOf
public <E extends T> List<E> leastOf(Iterable<E> iterable, int k) { if (iterable instanceof Collection) { Collection<E> collection = (Collection<E>) iterable; if (collection.size() <= 2L * k) { // In this case, just dumping the collection to an array and sorting is // faster than using the implementation for Iterator, which is // specialized for k much smaller than n. @SuppressWarnings("unchecked") // c only contains E's and doesn't escape E[] array = (E[]) collection.toArray(); sort(array, this); if (array.length > k) { array = Arrays.copyOf(array, k); } return unmodifiableList(asList(array)); } } return leastOf(iterable.iterator(), k); }
Returns the {@code k} least elements of the given iterable according to this ordering, in order from least to greatest. If there are fewer than {@code k} elements present, all will be included. <p>The implementation does not necessarily use a <i>stable</i> sorting algorithm; when multiple elements are equivalent, it is undefined which will come first. <p><b>Java 8+ users:</b> Use {@code Streams.stream(iterable).collect(Comparators.least(k, thisComparator))} instead. @return an immutable {@code RandomAccess} list of the {@code k} least elements in ascending order @throws IllegalArgumentException if {@code k} is negative @since 8.0
java
android/guava/src/com/google/common/collect/Ordering.java
745
[ "iterable", "k" ]
true
4
6.4
google/guava
51,352
javadoc
false
compare
@Override public int compare(LoggerConfiguration o1, LoggerConfiguration o2) { if (this.rootLoggerName.equals(o1.getName())) { return -1; } if (this.rootLoggerName.equals(o2.getName())) { return 1; } return o1.getName().compareTo(o2.getName()); }
Create a new {@link LoggerConfigurationComparator} instance. @param rootLoggerName the name of the "root" logger
java
core/spring-boot/src/main/java/org/springframework/boot/logging/LoggerConfigurationComparator.java
42
[ "o1", "o2" ]
true
3
6.08
spring-projects/spring-boot
79,428
javadoc
false
joinA
public <A extends Appendable> A joinA(final A appendable, final Iterable<T> elements) throws IOException { return joinIterable(appendable, prefix, suffix, delimiter, appender, elements); }
Joins stringified objects from the given Iterable into an Appendable. @param <A> the Appendable type. @param appendable The target. @param elements The source. @return The given StringBuilder. @throws IOException If an I/O error occurs
java
src/main/java/org/apache/commons/lang3/AppendableJoiner.java
296
[ "appendable", "elements" ]
A
true
1
6.64
apache/commons-lang
2,896
javadoc
false
random
@Deprecated public static String random(final int count, final char... chars) { return secure().next(count, chars); }
Creates a random string whose length is the number of characters specified. <p> Characters will be chosen from the set of characters specified. </p> @param count the length of random string to create. @param chars the character array containing the set of characters to use, may be null. @return the random string. @throws IllegalArgumentException if {@code count} &lt; 0. @deprecated Use {@link #next(int, char...)} from {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
java
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
171
[ "count" ]
String
true
1
6.8
apache/commons-lang
2,896
javadoc
false
isTraceEnabled
@Override public boolean isTraceEnabled() { synchronized (this.lines) { return (this.destination == null) || this.destination.isTraceEnabled(); } }
Create a new {@link DeferredLog} instance managed by a {@link DeferredLogFactory}. @param destination the switch-over destination @param lines the lines backing all related deferred logs @since 2.4.0
java
core/spring-boot/src/main/java/org/springframework/boot/logging/DeferredLog.java
65
[]
true
2
6.4
spring-projects/spring-boot
79,428
javadoc
false
ffill
def ffill(self, limit: int | None = None): """ Forward fill the values. This method fills missing values by propagating the last valid observation forward, up to the next valid observation. It is commonly used in time series analysis when resampling data to a higher frequency (upsampling) and filling gaps in the resampled output. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series The resampled data with missing values filled forward. See Also -------- Series.fillna: Fill NA/NaN values using the specified method. DataFrame.fillna: Fill NA/NaN values using the specified method. Examples -------- Here we only create a ``Series``. >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 Example for ``ffill`` with downsampling (we have fewer dates after resampling): >>> ser.resample("MS").ffill() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64 Example for ``ffill`` with upsampling (fill the new dates with the previous value): >>> ser.resample("W").ffill() 2023-01-01 1 2023-01-08 1 2023-01-15 2 2023-01-22 2 2023-01-29 2 2023-02-05 3 2023-02-12 3 2023-02-19 4 Freq: W-SUN, dtype: int64 With upsampling and limiting (only fill the first new date with the previous value): >>> ser.resample("W").ffill(limit=1) 2023-01-01 1.0 2023-01-08 1.0 2023-01-15 2.0 2023-01-22 2.0 2023-01-29 NaN 2023-02-05 3.0 2023-02-12 NaN 2023-02-19 4.0 Freq: W-SUN, dtype: float64 """ return self._upsample("ffill", limit=limit)
Forward fill the values. This method fills missing values by propagating the last valid observation forward, up to the next valid observation. It is commonly used in time series analysis when resampling data to a higher frequency (upsampling) and filling gaps in the resampled output. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series The resampled data with missing values filled forward. See Also -------- Series.fillna: Fill NA/NaN values using the specified method. DataFrame.fillna: Fill NA/NaN values using the specified method. Examples -------- Here we only create a ``Series``. >>> ser = pd.Series( ... [1, 2, 3, 4], ... index=pd.DatetimeIndex( ... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"] ... ), ... ) >>> ser 2023-01-01 1 2023-01-15 2 2023-02-01 3 2023-02-15 4 dtype: int64 Example for ``ffill`` with downsampling (we have fewer dates after resampling): >>> ser.resample("MS").ffill() 2023-01-01 1 2023-02-01 3 Freq: MS, dtype: int64 Example for ``ffill`` with upsampling (fill the new dates with the previous value): >>> ser.resample("W").ffill() 2023-01-01 1 2023-01-08 1 2023-01-15 2 2023-01-22 2 2023-01-29 2 2023-02-05 3 2023-02-12 3 2023-02-19 4 Freq: W-SUN, dtype: int64 With upsampling and limiting (only fill the first new date with the previous value): >>> ser.resample("W").ffill(limit=1) 2023-01-01 1.0 2023-01-08 1.0 2023-01-15 2.0 2023-01-22 2.0 2023-01-29 NaN 2023-02-05 3.0 2023-02-12 NaN 2023-02-19 4.0 Freq: W-SUN, dtype: float64
python
pandas/core/resample.py
608
[ "self", "limit" ]
true
1
7.2
pandas-dev/pandas
47,362
numpy
false
masked_all
def masked_all(shape, dtype=float): """ Empty masked array with all elements masked. Return an empty masked array of the given shape and dtype, where all the data are masked. Parameters ---------- shape : int or tuple of ints Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. dtype : dtype, optional Data type of the output. Returns ------- a : MaskedArray A masked array with all data masked. See Also -------- masked_all_like : Empty masked array modelled on an existing array. Notes ----- Unlike other masked array creation functions (e.g. `numpy.ma.zeros`, `numpy.ma.ones`, `numpy.ma.full`), `masked_all` does not initialize the values of the array, and may therefore be marginally faster. However, the values stored in the newly allocated array are arbitrary. For reproducible behavior, be sure to set each element of the array before reading. Examples -------- >>> import numpy as np >>> np.ma.masked_all((3, 3)) masked_array( data=[[--, --, --], [--, --, --], [--, --, --]], mask=[[ True, True, True], [ True, True, True], [ True, True, True]], fill_value=1e+20, dtype=float64) The `dtype` parameter defines the underlying data type. >>> a = np.ma.masked_all((3, 3)) >>> a.dtype dtype('float64') >>> a = np.ma.masked_all((3, 3), dtype=np.int32) >>> a.dtype dtype('int32') """ a = masked_array(np.empty(shape, dtype), mask=np.ones(shape, make_mask_descr(dtype))) return a
Empty masked array with all elements masked. Return an empty masked array of the given shape and dtype, where all the data are masked. Parameters ---------- shape : int or tuple of ints Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. dtype : dtype, optional Data type of the output. Returns ------- a : MaskedArray A masked array with all data masked. See Also -------- masked_all_like : Empty masked array modelled on an existing array. Notes ----- Unlike other masked array creation functions (e.g. `numpy.ma.zeros`, `numpy.ma.ones`, `numpy.ma.full`), `masked_all` does not initialize the values of the array, and may therefore be marginally faster. However, the values stored in the newly allocated array are arbitrary. For reproducible behavior, be sure to set each element of the array before reading. Examples -------- >>> import numpy as np >>> np.ma.masked_all((3, 3)) masked_array( data=[[--, --, --], [--, --, --], [--, --, --]], mask=[[ True, True, True], [ True, True, True], [ True, True, True]], fill_value=1e+20, dtype=float64) The `dtype` parameter defines the underlying data type. >>> a = np.ma.masked_all((3, 3)) >>> a.dtype dtype('float64') >>> a = np.ma.masked_all((3, 3), dtype=np.int32) >>> a.dtype dtype('int32')
python
numpy/ma/extras.py
120
[ "shape", "dtype" ]
false
1
6.32
numpy/numpy
31,054
numpy
false
equals
@Override public boolean equals(@Nullable Object other) { return (this == other || (other instanceof AbstractRegexpMethodPointcut otherPointcut && Arrays.equals(this.patterns, otherPointcut.patterns) && Arrays.equals(this.excludedPatterns, otherPointcut.excludedPatterns))); }
Does the exclusion pattern at the given index match the given String? @param pattern the {@code String} pattern to match @param patternIndex index of pattern (starting from 0) @return {@code true} if there is a match, {@code false} otherwise
java
spring-aop/src/main/java/org/springframework/aop/support/AbstractRegexpMethodPointcut.java
198
[ "other" ]
true
4
7.92
spring-projects/spring-framework
59,386
javadoc
false
set_context
def set_context( self, ti: TaskInstance | TaskInstanceHistory, *, identifier: str | None = None ) -> None | SetContextPropagate: """ Provide task_instance context to airflow task handler. Generally speaking returns None. But if attr `maintain_propagate` has been set to propagate, then returns sentinel MAINTAIN_PROPAGATE. This has the effect of overriding the default behavior to set `propagate` to False whenever set_context is called. At time of writing, this functionality is only used in unit testing. :param ti: task instance object :param identifier: if set, adds suffix to log file. For use when relaying exceptional messages to task logs from a context other than task or trigger run """ local_loc = self._init_file(ti, identifier=identifier) self.handler = NonCachingRotatingFileHandler( local_loc, encoding="utf-8", maxBytes=self.max_bytes, backupCount=self.backup_count, delay=self.delay, ) if self.formatter: self.handler.setFormatter(self.formatter) self.handler.setLevel(self.level) return SetContextPropagate.MAINTAIN_PROPAGATE if self.maintain_propagate else None
Provide task_instance context to airflow task handler. Generally speaking returns None. But if attr `maintain_propagate` has been set to propagate, then returns sentinel MAINTAIN_PROPAGATE. This has the effect of overriding the default behavior to set `propagate` to False whenever set_context is called. At time of writing, this functionality is only used in unit testing. :param ti: task instance object :param identifier: if set, adds suffix to log file. For use when relaying exceptional messages to task logs from a context other than task or trigger run
python
airflow-core/src/airflow/utils/log/file_task_handler.py
462
[ "self", "ti", "identifier" ]
None | SetContextPropagate
true
3
6.72
apache/airflow
43,597
sphinx
false
chebdiv
def chebdiv(c1, c2): """ Divide one Chebyshev series by another. Returns the quotient-with-remainder of two Chebyshev series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Chebyshev series coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of Chebyshev series coefficients representing the quotient and remainder. See Also -------- chebadd, chebsub, chebmulx, chebmul, chebpow Notes ----- In general, the (polynomial) division of one C-series by another results in quotient and remainder terms that are not in the Chebyshev polynomial basis set. Thus, to express these results as C-series, it is typically necessary to "reproject" the results onto said basis set, which typically produces "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial import chebyshev as C >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not (array([3.]), array([-8., -4.])) >>> c2 = (0,1,2,3) >>> C.chebdiv(c2,c1) # neither "intuitive" (array([0., 2.]), array([-2., -4.])) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: raise ZeroDivisionError # FIXME: add message with details to exception # note: this is more efficient than `pu._div(chebmul, c1, c2)` lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: return c1[:1] * 0, c1 elif lc2 == 1: return c1 / c2[-1], c1[:1] * 0 else: z1 = _cseries_to_zseries(c1) z2 = _cseries_to_zseries(c2) quo, rem = _zseries_div(z1, z2) quo = pu.trimseq(_zseries_to_cseries(quo)) rem = pu.trimseq(_zseries_to_cseries(rem)) return quo, rem
Divide one Chebyshev series by another. Returns the quotient-with-remainder of two Chebyshev series `c1` / `c2`. The arguments are sequences of coefficients from lowest order "term" to highest, e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Chebyshev series coefficients ordered from low to high. Returns ------- [quo, rem] : ndarrays Of Chebyshev series coefficients representing the quotient and remainder. See Also -------- chebadd, chebsub, chebmulx, chebmul, chebpow Notes ----- In general, the (polynomial) division of one C-series by another results in quotient and remainder terms that are not in the Chebyshev polynomial basis set. Thus, to express these results as C-series, it is typically necessary to "reproject" the results onto said basis set, which typically produces "unintuitive" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial import chebyshev as C >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not (array([3.]), array([-8., -4.])) >>> c2 = (0,1,2,3) >>> C.chebdiv(c2,c1) # neither "intuitive" (array([0., 2.]), array([-2., -4.]))
python
numpy/polynomial/chebyshev.py
747
[ "c1", "c2" ]
false
5
6.24
numpy/numpy
31,054
numpy
false
toLong
public @Nullable Long toLong() { return this.pid; }
Return the application PID as a {@link Long}. @return the application PID or {@code null} @since 3.4.0
java
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationPid.java
76
[]
Long
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
drainRecencyQueue
@GuardedBy("this") void drainRecencyQueue() { ReferenceEntry<K, V> e; while ((e = recencyQueue.poll()) != null) { // An entry may be in the recency queue despite it being removed from // the map . This can occur when the entry was concurrently read while a // writer is removing it from the segment or after a clear has removed // all the segment's entries. if (accessQueue.contains(e)) { accessQueue.add(e); } } }
Drains the recency queue, updating eviction metadata that the entries therein were read in the specified relative order. This currently amounts to adding them to relevant eviction lists (accounting for the fact that they could have been removed from the map since being added to the recency queue).
java
android/guava/src/com/google/common/cache/LocalCache.java
2,492
[]
void
true
3
7.04
google/guava
51,352
javadoc
false
postProcessBeforeInitialization
default @Nullable Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { return bean; }
Apply this {@code BeanPostProcessor} to the given new bean instance <i>before</i> any bean initialization callbacks (like InitializingBean's {@code afterPropertiesSet} or a custom init-method). The bean will already be populated with property values. The returned bean instance may be a wrapper around the original. <p>The default implementation returns the given {@code bean} as-is. @param bean the new bean instance @param beanName the name of the bean @return the bean instance to use, either the original or a wrapped one; if {@code null}, no subsequent BeanPostProcessors will be invoked @throws org.springframework.beans.BeansException in case of errors @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet
java
spring-beans/src/main/java/org/springframework/beans/factory/config/BeanPostProcessor.java
74
[ "bean", "beanName" ]
Object
true
1
6.16
spring-projects/spring-framework
59,386
javadoc
false
moduleResolve
function moduleResolve(specifier, base, conditions, preserveSymlinks) { const protocol = typeof base === 'string' ? StringPrototypeSlice(base, 0, StringPrototypeIndexOf(base, ':') + 1) : base.protocol; const isData = protocol === 'data:'; // Order swapped from spec for minor perf gain. // Ok since relative URLs cannot parse as URLs. let resolved; if (shouldBeTreatedAsRelativeOrAbsolutePath(specifier)) { try { resolved = new URL(specifier, base); } catch (cause) { const error = new ERR_UNSUPPORTED_RESOLVE_REQUEST(specifier, base); setOwnProperty(error, 'cause', cause); throw error; } } else if (protocol === 'file:' && specifier[0] === '#') { resolved = packageImportsResolve(specifier, base, conditions); } else { try { resolved = new URL(specifier); } catch (cause) { if (isData && !BuiltinModule.canBeRequiredWithoutScheme(specifier)) { const error = new ERR_UNSUPPORTED_RESOLVE_REQUEST(specifier, base); setOwnProperty(error, 'cause', cause); throw error; } resolved = packageResolve(specifier, base, conditions); } } if (resolved.protocol !== 'file:') { return resolved; } return finalizeResolution(resolved, base, preserveSymlinks); }
Resolves a module specifier to a URL. @param {string} specifier - The module specifier to resolve. @param {string | URL | undefined} base - The base URL to resolve against. @param {Set<string>} conditions - An object containing environment conditions. @param {boolean} preserveSymlinks - Whether to preserve symlinks in the resolved URL. @returns {URL}
javascript
lib/internal/modules/esm/resolve.js
830
[ "specifier", "base", "conditions", "preserveSymlinks" ]
false
12
6.08
nodejs/node
114,839
jsdoc
false
oss_write
def oss_write(self, log, remote_log_location, append=True) -> bool: """ Write the log to remote_log_location and return `True`; fails silently and returns `False` on error. :param log: the log to write to the remote_log_location :param remote_log_location: the log's location in remote storage :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :return: whether the log is successfully written to remote location or not. """ oss_remote_log_location = f"{self.base_folder}/{remote_log_location}" pos = 0 if append and self.oss_log_exists(remote_log_location): head = self.hook.head_key(self.bucket_name, oss_remote_log_location) pos = head.content_length self.log.info("log write pos is: %s", pos) try: self.log.info("writing remote log: %s", oss_remote_log_location) self.hook.append_string(self.bucket_name, log, oss_remote_log_location, pos) except Exception: self.log.exception( "Could not write logs to %s, log write pos is: %s, Append is %s", oss_remote_log_location, pos, append, ) return False return True
Write the log to remote_log_location and return `True`; fails silently and returns `False` on error. :param log: the log to write to the remote_log_location :param remote_log_location: the log's location in remote storage :param append: if False, any existing log file is overwritten. If True, the new log is appended to any existing logs. :return: whether the log is successfully written to remote location or not.
python
providers/alibaba/src/airflow/providers/alibaba/cloud/log/oss_task_handler.py
129
[ "self", "log", "remote_log_location", "append" ]
bool
true
3
7.92
apache/airflow
43,597
sphinx
false
parseDsaDer
private static DSAPrivateKeySpec parseDsaDer(byte[] keyBytes) throws IOException { DerParser parser = new DerParser(keyBytes); DerParser.Asn1Object sequence = parser.readAsn1Object(); parser = sequence.getParser(); parser.readAsn1Object().getInteger(); // (version) We don't need it but must read to get to p BigInteger p = parser.readAsn1Object().getInteger(); BigInteger q = parser.readAsn1Object().getInteger(); BigInteger g = parser.readAsn1Object().getInteger(); parser.readAsn1Object().getInteger(); // we don't need x BigInteger x = parser.readAsn1Object().getInteger(); return new DSAPrivateKeySpec(x, p, q, g); }
Parses a DER encoded DSA key to a {@link DSAPrivateKeySpec} using a minimal {@link DerParser} @param keyBytes the private key raw bytes @return {@link DSAPrivateKeySpec} @throws IOException if the DER encoded key can't be parsed
java
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
646
[ "keyBytes" ]
DSAPrivateKeySpec
true
1
6.08
elastic/elasticsearch
75,680
javadoc
false
genericArrayType
public static GenericArrayType genericArrayType(final Type componentType) { return new GenericArrayTypeImpl(Objects.requireNonNull(componentType, "componentType")); }
Creates a generic array type instance. @param componentType the type of the elements of the array. For example the component type of {@code boolean[]} is {@code boolean}. @return {@link GenericArrayType}. @since 3.2
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
573
[ "componentType" ]
GenericArrayType
true
1
6.64
apache/commons-lang
2,896
javadoc
false
setFileExtensions
public void setFileExtensions(List<String> fileExtensions) { Assert.isTrue(!CollectionUtils.isEmpty(fileExtensions), "At least one file extension is required"); for (String extension : fileExtensions) { if (!extension.startsWith(".")) { throw new IllegalArgumentException("File extension '" + extension + "' should start with '.'"); } } this.fileExtensions = Collections.unmodifiableList(fileExtensions); }
Set the list of supported file extensions. <p>The default is a list containing {@code .properties} and {@code .xml}. @param fileExtensions the file extensions (starts with a dot) @since 6.1
java
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
125
[ "fileExtensions" ]
void
true
2
6.88
spring-projects/spring-framework
59,386
javadoc
false
getCommands
async function getCommands(options: ExecOptionsWithStringEncoding, existingCommands?: Set<string>): Promise<ICompletionResource[]> { const output = await execHelper('Get-Command -All | Select-Object Name, CommandType, Definition, ModuleName, @{Name="Version";Expression={$_.Version.ToString()}} | ConvertTo-Json', { ...options, maxBuffer: 1024 * 1024 * 100 // This is a lot of content, increase buffer size }); let json: unknown; try { json = JSON.parse(output); } catch (e) { console.error('Error parsing pwsh output:', e); return []; } if (!Array.isArray(json)) { return []; } return ( (json as unknown[]) .filter(isPwshGetCommandEntry) .filter(e => e.CommandType !== PwshCommandType.Alias) .map(e => { const detailParts: string[] = []; if (e.Definition) { detailParts.push(e.Definition.trim()); } if (e.ModuleName && e.Version) { detailParts.push(`${e.ModuleName} v${e.Version}`); } return { label: e.Name, detail: detailParts.join('\n\n'), kind: pwshCommandTypeToCompletionKind.get(e.CommandType) }; }) ); }
The numeric values associated with CommandType from Get-Command. It appears that this is a bitfield based on the values but I think it's actually used as an enum where a CommandType can only be a single one of these. Source: ``` [enum]::GetValues([System.Management.Automation.CommandTypes]) | ForEach-Object { [pscustomobject]@{ Name = $_ Value = [int]$_ } } ```
typescript
extensions/terminal-suggest/src/shell/pwsh.ts
103
[ "options", "existingCommands?" ]
true
6
6.96
microsoft/vscode
179,840
jsdoc
true
findPropertySource
private String findPropertySource(MutablePropertySources sources) { if (ClassUtils.isPresent(SERVLET_ENVIRONMENT_CLASS, null)) { PropertySource<?> servletPropertySource = sources.stream() .filter((source) -> SERVLET_ENVIRONMENT_PROPERTY_SOURCES.contains(source.getName())) .findFirst() .orElse(null); if (servletPropertySource != null) { return servletPropertySource.getName(); } } return StandardEnvironment.SYSTEM_PROPERTIES_PROPERTY_SOURCE_NAME; }
Flatten the map keys using period separator. @param map the map that should be flattened @return the flattened map
java
core/spring-boot/src/main/java/org/springframework/boot/support/SpringApplicationJsonEnvironmentPostProcessor.java
166
[ "sources" ]
String
true
3
8.24
spring-projects/spring-boot
79,428
javadoc
false
from_range
def from_range(cls, data: range, name=None, dtype: Dtype | None = None) -> Self: """ Create :class:`pandas.RangeIndex` from a ``range`` object. This method provides a way to create a :class:`pandas.RangeIndex` directly from a Python ``range`` object. The resulting :class:`RangeIndex` will have the same start, stop, and step values as the input ``range`` object. It is particularly useful for constructing indices in an efficient and memory-friendly manner. Parameters ---------- data : range The range object to be converted into a RangeIndex. name : str, default None Name to be stored in the index. dtype : Dtype or None Data type for the RangeIndex. If None, the default integer type will be used. Returns ------- RangeIndex See Also -------- RangeIndex : Immutable Index implementing a monotonic integer range. Index : Immutable sequence used for indexing and alignment. Examples -------- >>> pd.RangeIndex.from_range(range(5)) RangeIndex(start=0, stop=5, step=1) >>> pd.RangeIndex.from_range(range(2, -10, -3)) RangeIndex(start=2, stop=-10, step=-3) """ if not isinstance(data, range): raise TypeError( f"{cls.__name__}(...) must be called with object coercible to a " f"range, {data!r} was passed" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name)
Create :class:`pandas.RangeIndex` from a ``range`` object. This method provides a way to create a :class:`pandas.RangeIndex` directly from a Python ``range`` object. The resulting :class:`RangeIndex` will have the same start, stop, and step values as the input ``range`` object. It is particularly useful for constructing indices in an efficient and memory-friendly manner. Parameters ---------- data : range The range object to be converted into a RangeIndex. name : str, default None Name to be stored in the index. dtype : Dtype or None Data type for the RangeIndex. If None, the default integer type will be used. Returns ------- RangeIndex See Also -------- RangeIndex : Immutable Index implementing a monotonic integer range. Index : Immutable sequence used for indexing and alignment. Examples -------- >>> pd.RangeIndex.from_range(range(5)) RangeIndex(start=0, stop=5, step=1) >>> pd.RangeIndex.from_range(range(2, -10, -3)) RangeIndex(start=2, stop=-10, step=-3)
python
pandas/core/indexes/range.py
188
[ "cls", "data", "name", "dtype" ]
Self
true
2
8
pandas-dev/pandas
47,362
numpy
false
availableMemory
long availableMemory();
Returns the amount of memory available for allocation by this pool. NOTE: result may be negative (pools may over allocate to avoid starvation issues) @return bytes available
java
clients/src/main/java/org/apache/kafka/common/memory/MemoryPool.java
84
[]
true
1
6.48
apache/kafka
31,560
javadoc
false
throwableOfThrowable
public static <T extends Throwable> T throwableOfThrowable(final Throwable throwable, final Class<T> clazz, final int fromIndex) { return throwableOf(throwable, clazz, fromIndex, false); }
Returns the first {@link Throwable} that matches the specified type in the exception chain from a specified index. Subclasses of the specified class do not match - see {@link #throwableOfType(Throwable, Class, int)} for the opposite. <p> A {@code null} throwable returns {@code null}. A {@code null} type returns {@code null}. No match in the chain returns {@code null}. A negative start index is treated as zero. A start index greater than the number of throwables returns {@code null}. </p> @param <T> the type of Throwable you are searching. @param throwable the throwable to inspect, may be null. @param clazz the class to search for, subclasses do not match, null returns null. @param fromIndex the (zero-based) index of the starting position, negative treated as zero, larger than chain size returns null. @return the first matching throwable from the throwable chain, null if no match or null input. @since 3.10
java
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
974
[ "throwable", "clazz", "fromIndex" ]
T
true
1
6.8
apache/commons-lang
2,896
javadoc
false
mean_pinball_loss
def mean_pinball_loss( y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" ): """Pinball loss for quantile regression. Read more in the :ref:`User Guide <pinball_loss>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. alpha : float, slope of the pinball loss, default=0.5, This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`, `alpha=0.95` is minimized by estimators of the 95th percentile. multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. The pinball loss output is a non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_pinball_loss >>> y_true = [1, 2, 3] >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1) 0.03... >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1) 0.3... >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9) 0.3... >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9) 0.03... >>> mean_pinball_loss(y_true, y_true, alpha=0.1) 0.0 >>> mean_pinball_loss(y_true, y_true, alpha=0.9) 0.0 """ xp, _ = get_namespace(y_true, y_pred, sample_weight, multioutput) _, y_true, y_pred, sample_weight, multioutput = ( _check_reg_targets_with_floating_dtype( y_true, y_pred, sample_weight, multioutput, xp=xp ) ) diff = y_true - y_pred sign = xp.astype(diff >= 0, diff.dtype) loss = alpha * sign * diff - (1 - alpha) * (1 - sign) * diff output_errors = _average(loss, weights=sample_weight, axis=0, xp=xp) if isinstance(multioutput, str) and multioutput == "raw_values": return output_errors if isinstance(multioutput, str) and multioutput == "uniform_average": # pass None as weights to _average: uniform mean multioutput = None # Average across the outputs (if needed). # The second call to `_average` should always return # a scalar array that we convert to a Python float to # consistently return the same eager evaluated value. # Therefore, `axis=None`. return float(_average(output_errors, weights=multioutput, xp=xp))
Pinball loss for quantile regression. Read more in the :ref:`User Guide <pinball_loss>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. alpha : float, slope of the pinball loss, default=0.5, This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`, `alpha=0.95` is minimized by estimators of the 95th percentile. multioutput : {'raw_values', 'uniform_average'} or array-like of shape \ (n_outputs,), default='uniform_average' Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. The pinball loss output is a non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_pinball_loss >>> y_true = [1, 2, 3] >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1) 0.03... >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1) 0.3... >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9) 0.3... >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9) 0.03... >>> mean_pinball_loss(y_true, y_true, alpha=0.1) 0.0 >>> mean_pinball_loss(y_true, y_true, alpha=0.9) 0.0
python
sklearn/metrics/_regression.py
318
[ "y_true", "y_pred", "sample_weight", "alpha", "multioutput" ]
false
5
7.28
scikit-learn/scikit-learn
64,340
numpy
false
outputKeys
public Set<String> outputKeys() { Set<String> result = new LinkedHashSet<>(matchPairs.size()); for (DissectPair matchPair : matchPairs) { if (matchPair.key.getModifier() != DissectKey.Modifier.NAMED_SKIP) { result.add(matchPair.key.getName()); } } return result; }
Returns the output keys produced by the instance (excluding named skip keys), e.g. for the pattern <code>"%{a} %{b} %{?c}"</code> the result is <code>[a, b]</code>. <p> The result is an ordered set, where the entries are in the same order as they appear in the pattern. <p> The reference keys are returned with the name they have in the pattern, e.g. for <code>"%{*x} %{&amp;x}"</code> the result is <code>[x]</code>. @return the output keys produced by the instance.
java
libs/dissect/src/main/java/org/elasticsearch/dissect/DissectParser.java
310
[]
true
2
8.24
elastic/elasticsearch
75,680
javadoc
false
removeAll
@CanIgnoreReturnValue public static boolean removeAll(Iterator<?> removeFrom, Collection<?> elementsToRemove) { checkNotNull(elementsToRemove); boolean result = false; while (removeFrom.hasNext()) { if (elementsToRemove.contains(removeFrom.next())) { removeFrom.remove(); result = true; } } return result; }
Traverses an iterator and removes every element that belongs to the provided collection. The iterator will be left exhausted: its {@code hasNext()} method will return {@code false}. @param removeFrom the iterator to (potentially) remove elements from @param elementsToRemove the elements to remove @return {@code true} if any element was removed from {@code iterator}
java
android/guava/src/com/google/common/collect/Iterators.java
205
[ "removeFrom", "elementsToRemove" ]
true
3
7.6
google/guava
51,352
javadoc
false
wrapHook
function wrapHook(index, userHookOrDefault, next) { return function nextStep(arg0, context) { lastRunIndex = index; if (context && context !== mergedContext) { ObjectAssign(mergedContext, context); } const hookResult = userHookOrDefault(arg0, mergedContext, next); if (lastRunIndex > 0 && lastRunIndex === index && !hookResult.shortCircuit) { throw new ERR_INVALID_RETURN_PROPERTY_VALUE('true', name, 'shortCircuit', hookResult.shortCircuit); } return validate(arg0, mergedContext, hookResult); }; }
Helper function to wrap around invocation of user hook or the default step in order to fill in missing arguments or check returned results. Due to the merging of the context, this must be a closure. @param {number} index Index in the chain. Default step is 0, last added hook is 1, and so on. @param {Function} userHookOrDefault Either the user hook or the default step to invoke. @param {Function|undefined} next The next wrapped step. If this is the default step, it's undefined. @returns {Function} Wrapped hook or default step.
javascript
lib/internal/modules/customization_hooks.js
183
[ "index", "userHookOrDefault", "next" ]
false
6
6.08
nodejs/node
114,839
jsdoc
false
bindOrCreate
public <T> T bindOrCreate(String name, Bindable<T> target) { return bindOrCreate(ConfigurationPropertyName.of(name), target, null); }
Bind the specified target {@link Bindable} using this binder's {@link ConfigurationPropertySource property sources} or create a new instance using the type of the {@link Bindable} if the result of the binding is {@code null}. @param name the configuration property name to bind @param target the target bindable @param <T> the bound type @return the bound or created object @since 2.2.0 @see #bindOrCreate(ConfigurationPropertyName, Bindable, BindHandler)
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
317
[ "name", "target" ]
T
true
1
6.16
spring-projects/spring-boot
79,428
javadoc
false
predict
def predict(self, X): """Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- y : ndarray of shape (n_samples,) The predicted classes. """ pred = self.decision_function(X) if self.n_classes_ == 2: return self.classes_.take(pred > 0, axis=0) return self.classes_.take(np.argmax(pred, axis=1), axis=0)
Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- y : ndarray of shape (n_samples,) The predicted classes.
python
sklearn/ensemble/_weight_boosting.py
572
[ "self", "X" ]
false
2
6.08
scikit-learn/scikit-learn
64,340
numpy
false
run_node
def run_node( tracer: Any, node: torch.fx.Node, args: Any, kwargs: Any, nnmodule: Any ) -> Any: """ Runs a given node, with the given args and kwargs. Behavior is dictated by a node's op. run_node is useful for extracting real values out of nodes. See get_real_value for more info on common usage. Note: The tracer arg is only used for 'get_attr' ops Note: The nnmodule arg is only used for 'call_module' ops Nodes that are not call_function, call_method, call_module, or get_attr will raise an AssertionError. """ op = node.op with set_current_node(node): def make_error_message(e: Any) -> str: return ( f"Dynamo failed to run FX node with fake tensors: {op} {node.target}(*{args}, **{kwargs}): got " + repr(e) ) from .exc import Unsupported try: if op == "call_function": return node.target(*args, **kwargs) # type: ignore[operator] elif op == "call_method": if not hasattr(args[0], node.target): # type: ignore[arg-type] from .exc import unimplemented unimplemented( gb_type="Missing attribute when running call_method node", context="", explanation=make_error_message("attribute not defined"), hints=[], ) return getattr(args[0], node.target)(*args[1:], **kwargs) # type: ignore[arg-type] elif op == "call_module": assert nnmodule is not None return nnmodule(*args, **kwargs) elif op == "get_attr": return tracer.output_graph.get_submodule(node.target) elif op == "placeholder": assert "example_value" in node.meta return node.meta["example_value"] except (NotImplementedError, UnsupportedFakeTensorException) as e: # NB: mimic how wrap_fake_exception does it from .exc import unimplemented hints = [] if isinstance(e, NotImplementedError): hints = [ "If the op is a PyTorch op, please file an issue to PyTorch.", ] unimplemented( gb_type="NotImplementedError/UnsupportedFakeTensorException when running FX node", context="", explanation=make_error_message(e), hints=hints, from_exc=e, ) except Unsupported: raise except Exception as e: raise RuntimeError(make_error_message(e)).with_traceback( e.__traceback__ ) from e raise AssertionError(op)
Runs a given node, with the given args and kwargs. Behavior is dictated by a node's op. run_node is useful for extracting real values out of nodes. See get_real_value for more info on common usage. Note: The tracer arg is only used for 'get_attr' ops Note: The nnmodule arg is only used for 'call_module' ops Nodes that are not call_function, call_method, call_module, or get_attr will raise an AssertionError.
python
torch/_dynamo/utils.py
3,689
[ "tracer", "node", "args", "kwargs", "nnmodule" ]
Any
true
8
6.96
pytorch/pytorch
96,034
unknown
false
indexOf
public static int indexOf(final Object[] array, final Object objectToFind, int startIndex) { if (array == null) { return INDEX_NOT_FOUND; } startIndex = max0(startIndex); if (objectToFind == null) { for (int i = startIndex; i < array.length; i++) { if (array[i] == null) { return i; } } } else { for (int i = startIndex; i < array.length; i++) { if (objectToFind.equals(array[i])) { return i; } } } return INDEX_NOT_FOUND; }
Finds the index of the given object in the array starting at the given index. <p> This method returns {@link #INDEX_NOT_FOUND} ({@code -1}) for a {@code null} input array. </p> <p> A negative startIndex is treated as zero. A startIndex larger than the array length will return {@link #INDEX_NOT_FOUND} ({@code -1}). </p> @param array the array to search for the object, may be {@code null}. @param objectToFind the object to find, may be {@code null}. @param startIndex the index to start searching. @return the index of the object within the array starting at the index, {@link #INDEX_NOT_FOUND} ({@code -1}) if not found or {@code null} array input.
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
2,719
[ "array", "objectToFind", "startIndex" ]
true
7
8.08
apache/commons-lang
2,896
javadoc
false
accepts
boolean accepts(String mode);
Returns if this accepts and can run the given mode. @param mode the mode to check @return if this instance accepts the mode
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jarmode/JarMode.java
33
[ "mode" ]
true
1
6.64
spring-projects/spring-boot
79,428
javadoc
false
inner
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R: """Retrieve the cached result without calling the function. Checks memory cache first, then disk cache. Populates memory cache from disk on a disk hit. Args: *args: Positional arguments to generate the cache key. **kwargs: Keyword arguments to generate the cache key. Returns: The cached result (decoded if decoder is provided). Raises: KeyError: If no cached result exists for the given parameters. """ # Try memory cache first via memoizer try: return memory_replay_fn(*args, **kwargs) except KeyError: pass # Memory miss, check disk # Memory miss - check disk cache cache_key = self._make_key(custom_params_encoder, *args, **kwargs) disk_hit = self._disk_cache.get(cache_key) if disk_hit is not None: # Disk cache hit - unpickle the CacheEntry pickled_value = disk_hit.value cache_entry = cast(CacheEntry, pickle.loads(pickled_value)) # Populate memory cache for future access self._memoizer._cache.insert(cache_key, cache_entry) # Decode and return if custom_result_decoder is not None: decoder_fn = custom_result_decoder(*args, **kwargs) return decoder_fn(cast(_EncodedR, cache_entry.encoded_result)) return cast(_R, cache_entry.encoded_result) # Complete miss raise KeyError(f"No cached result found for key: {cache_key}")
Retrieve the cached result without calling the function. Checks memory cache first, then disk cache. Populates memory cache from disk on a disk hit. Args: *args: Positional arguments to generate the cache key. **kwargs: Keyword arguments to generate the cache key. Returns: The cached result (decoded if decoder is provided). Raises: KeyError: If no cached result exists for the given parameters.
python
torch/_inductor/runtime/caching/interfaces.py
765
[]
_R
true
3
8.08
pytorch/pytorch
96,034
google
false
get_log_events
def get_log_events( self, log_group: str, log_stream_name: str, start_time: int = 0, skip: int = 0, start_from_head: bool | None = None, continuation_token: ContinuationToken | None = None, end_time: int | None = None, ) -> Generator[CloudWatchLogEvent, None, None]: """ Return a generator for log items in a single stream; yields all items available at the current moment. .. seealso:: - :external+boto3:py:meth:`CloudWatchLogs.Client.get_log_events` :param log_group: The name of the log group. :param log_stream_name: The name of the specific stream. :param start_time: The timestamp value in ms to start reading the logs from (default: 0). :param skip: The number of log entries to skip at the start (default: 0). This is for when there are multiple entries at the same timestamp. :param continuation_token: a token indicating where to read logs from. Will be updated as this method reads new logs, to be reused in subsequent calls. :param end_time: The timestamp value in ms to stop reading the logs from (default: None). If None is provided, reads it until the end of the log stream :return: | A CloudWatch log event with the following key-value pairs: | 'timestamp' (int): The time in milliseconds of the event. | 'message' (str): The log event data. | 'ingestionTime' (int): The time in milliseconds the event was ingested. """ if continuation_token is None: continuation_token = AwsLogsHook.ContinuationToken() num_consecutive_empty_response = 0 while True: if continuation_token.value is not None: token_arg: dict[str, str] = {"nextToken": continuation_token.value} else: token_arg = {} response = self.conn.get_log_events( **prune_dict( { "logGroupName": log_group, "logStreamName": log_stream_name, "startTime": start_time, "endTime": end_time, "startFromHead": True, **token_arg, } ) ) events = response["events"] event_count = len(events) if event_count > skip: events = events[skip:] skip = 0 else: skip -= event_count events = [] yield from events if continuation_token.value == response["nextForwardToken"]: return if not event_count: num_consecutive_empty_response += 1 if num_consecutive_empty_response >= NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD: # Exit if there are more than NUM_CONSECUTIVE_EMPTY_RESPONSE_EXIT_THRESHOLD consecutive # empty responses return else: num_consecutive_empty_response = 0 continuation_token.value = response["nextForwardToken"]
Return a generator for log items in a single stream; yields all items available at the current moment. .. seealso:: - :external+boto3:py:meth:`CloudWatchLogs.Client.get_log_events` :param log_group: The name of the log group. :param log_stream_name: The name of the specific stream. :param start_time: The timestamp value in ms to start reading the logs from (default: 0). :param skip: The number of log entries to skip at the start (default: 0). This is for when there are multiple entries at the same timestamp. :param continuation_token: a token indicating where to read logs from. Will be updated as this method reads new logs, to be reused in subsequent calls. :param end_time: The timestamp value in ms to stop reading the logs from (default: None). If None is provided, reads it until the end of the log stream :return: | A CloudWatch log event with the following key-value pairs: | 'timestamp' (int): The time in milliseconds of the event. | 'message' (str): The log event data. | 'ingestionTime' (int): The time in milliseconds the event was ingested.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/logs.py
69
[ "self", "log_group", "log_stream_name", "start_time", "skip", "start_from_head", "continuation_token", "end_time" ]
Generator[CloudWatchLogEvent, None, None]
true
11
8.16
apache/airflow
43,597
sphinx
false
findAnnotationOnBean
@Override public <A extends Annotation> @Nullable A findAnnotationOnBean( String beanName, Class<A> annotationType, boolean allowFactoryBeanInit) throws NoSuchBeanDefinitionException { Class<?> beanType = getType(beanName, allowFactoryBeanInit); return (beanType != null ? AnnotatedElementUtils.findMergedAnnotation(beanType, annotationType) : null); }
Add a new singleton bean. <p>Will overwrite any existing instance for the given name. @param name the name of the bean @param bean the bean instance
java
spring-beans/src/main/java/org/springframework/beans/factory/support/StaticListableBeanFactory.java
472
[ "beanName", "annotationType", "allowFactoryBeanInit" ]
A
true
2
6.88
spring-projects/spring-framework
59,386
javadoc
false
poke
def poke(self, context: Context) -> bool: """ Pokes until the QuickSight Ingestion has successfully finished. :param context: The task context during execution. :return: True if it COMPLETED and False if not. """ self.log.info("Poking for Amazon QuickSight Ingestion ID: %s", self.ingestion_id) quicksight_ingestion_state = self.hook.get_status(None, self.data_set_id, self.ingestion_id) self.log.info("QuickSight Status: %s", quicksight_ingestion_state) if quicksight_ingestion_state in self.errored_statuses: error = self.hook.get_error_info(None, self.data_set_id, self.ingestion_id) raise AirflowException(f"The QuickSight Ingestion failed. Error info: {error}") return quicksight_ingestion_state == self.success_status
Pokes until the QuickSight Ingestion has successfully finished. :param context: The task context during execution. :return: True if it COMPLETED and False if not.
python
providers/amazon/src/airflow/providers/amazon/aws/sensors/quicksight.py
63
[ "self", "context" ]
bool
true
2
7.92
apache/airflow
43,597
sphinx
false
_get_field_choices
def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = [*parent_path, field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) )
Yield all allowed field paths in breadth-first search order.
python
django/db/models/sql/compiler.py
1,452
[]
false
5
6.08
django/django
86,204
unknown
false
principalSerde
@Override public Optional<KafkaPrincipalSerde> principalSerde() { return Optional.of(principalBuilder); }
Constructs Principal using configured principalBuilder. @return the built principal
java
clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java
163
[]
true
1
6
apache/kafka
31,560
javadoc
false
sanitize_conn_id
def sanitize_conn_id(conn_id: str | None, max_length=CONN_ID_MAX_LEN) -> str | None: r""" Sanitizes the connection id and allows only specific characters to be within. Namely, it allows alphanumeric characters plus the symbols #,!,-,_,.,:,\,/ and () from 1 and up to 250 consecutive matches. If desired, the max length can be adjusted by setting `max_length`. You can try to play with the regex here: https://regex101.com/r/69033B/1 The character selection is such that it prevents the injection of javascript or executable bits to avoid any awkward behaviour in the front-end. :param conn_id: The connection id to sanitize. :param max_length: The max length of the connection ID, by default it is 250. :return: the sanitized string, `None` otherwise. """ # check if `conn_id` or our match group is `None` and the `conn_id` is within the specified length. if (not isinstance(conn_id, str) or len(conn_id) > max_length) or ( res := re.match(RE_SANITIZE_CONN_ID, conn_id) ) is None: return None # if we reach here, then we matched something, return the first match return res.group(0)
r""" Sanitizes the connection id and allows only specific characters to be within. Namely, it allows alphanumeric characters plus the symbols #,!,-,_,.,:,\,/ and () from 1 and up to 250 consecutive matches. If desired, the max length can be adjusted by setting `max_length`. You can try to play with the regex here: https://regex101.com/r/69033B/1 The character selection is such that it prevents the injection of javascript or executable bits to avoid any awkward behaviour in the front-end. :param conn_id: The connection id to sanitize. :param max_length: The max length of the connection ID, by default it is 250. :return: the sanitized string, `None` otherwise.
python
airflow-core/src/airflow/models/connection.py
55
[ "conn_id", "max_length" ]
str | None
true
4
8.4
apache/airflow
43,597
sphinx
false
readDeclaredField
public static Object readDeclaredField(final Object target, final String fieldName) throws IllegalAccessException { return readDeclaredField(target, fieldName, false); }
Reads the named {@code public} {@link Field}. Only the class of the specified object will be considered. @param target the object to reflect, must not be {@code null}. @param fieldName the field name to obtain. @return the value of the field. @throws NullPointerException if {@code target} is {@code null}. @throws IllegalArgumentException if {@code fieldName} is {@code null}, blank or empty, or could not be found. @throws IllegalAccessException if the named field is not {@code public} @throws SecurityException if an underlying accessible object's method denies the request. @see SecurityManager#checkPermission
java
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
277
[ "target", "fieldName" ]
Object
true
1
6.32
apache/commons-lang
2,896
javadoc
false
containsDescendantOf
@Override public ConfigurationPropertyState containsDescendantOf(ConfigurationPropertyName name) { PropertySource<?> source = getPropertySource(); Object underlyingSource = source.getSource(); if (underlyingSource instanceof Random) { return containsDescendantOfForRandom("random", name); } if (underlyingSource instanceof PropertySource<?> underlyingPropertySource && underlyingPropertySource.getSource() instanceof Random) { // Assume wrapped random sources use the source name as the prefix return containsDescendantOfForRandom(source.getName(), name); } return ConfigurationPropertyState.UNKNOWN; }
Create a new {@link SpringConfigurationPropertySource} implementation. @param propertySource the source property source @param systemEnvironmentSource if the source is from the system environment @param mappers the property mappers
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/SpringConfigurationPropertySource.java
119
[ "name" ]
ConfigurationPropertyState
true
4
6.24
spring-projects/spring-boot
79,428
javadoc
false
getExistingKeys
function getExistingKeys(keys: Array<string | symbol>, keysToLayerMap: Map<string | symbol, CompositeProxyLayer>) { return keys.filter((key) => { const layer = keysToLayerMap.get(key) return layer?.has?.(key) ?? true }) }
Creates a proxy from a set of layers. Each layer is a building for a proxy (potentially, reusable) that can add or override property on top of the target. When multiple layers define the same property, last one wins @param target @param layers @returns
typescript
packages/client/src/runtime/core/compositeProxy/createCompositeProxy.ts
141
[ "keys", "keysToLayerMap" ]
false
1
6.08
prisma/prisma
44,834
jsdoc
false
read
@Override public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > dsts.length - length)) throw new IndexOutOfBoundsException(); int totalRead = 0; int i = offset; while (i < offset + length) { if (dsts[i].hasRemaining()) { int read = read(dsts[i]); if (read > 0) totalRead += read; else break; } if (!dsts[i].hasRemaining()) { i++; } } return totalRead; }
Reads a sequence of bytes from this channel into a subsequence of the given buffers. @param dsts - The buffers into which bytes are to be transferred @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length. @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. @throws IOException if some other I/O error occurs
java
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
679
[ "dsts", "offset", "length" ]
true
8
8.24
apache/kafka
31,560
javadoc
false
destroy
@Override public void destroy() throws Exception { if (this.pool != null) { logger.debug("Closing Commons ObjectPool"); this.pool.close(); } }
Closes the underlying {@code ObjectPool} when destroying this object.
java
spring-aop/src/main/java/org/springframework/aop/target/CommonsPool2TargetSource.java
259
[]
void
true
2
6.08
spring-projects/spring-framework
59,386
javadoc
false
nullToEmpty
public static Short[] nullToEmpty(final Short[] array) { return nullTo(array, EMPTY_SHORT_OBJECT_ARRAY); }
Defensive programming technique to change a {@code null} reference to an empty one. <p> This method returns an empty array for a {@code null} input array. </p> <p> As a memory optimizing technique an empty array passed in will be overridden with the empty {@code public static} references in this class. </p> @param array the array to check for {@code null} or empty. @return the same array, {@code public static} empty array if {@code null} or empty input. @since 2.5
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
4,603
[ "array" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
getBeans
private static <T> Map<String, T> getBeans(ListableBeanFactory beanFactory, Class<T> type, @Nullable String qualifier) { return (!StringUtils.hasLength(qualifier)) ? beanFactory.getBeansOfType(type) : BeanFactoryAnnotationUtils.qualifiedBeansOfType(beanFactory, type, qualifier); }
Add {@link Printer}, {@link Parser}, {@link Formatter}, {@link Converter}, {@link ConverterFactory}, {@link GenericConverter}, and beans from the specified bean factory. @param registry the service to register beans with @param beanFactory the bean factory to get the beans from @param qualifier the qualifier required on the beans or {@code null} @return the beans that were added @since 3.5.0
java
core/spring-boot/src/main/java/org/springframework/boot/convert/ApplicationConversionService.java
344
[ "beanFactory", "type", "qualifier" ]
true
2
7.76
spring-projects/spring-boot
79,428
javadoc
false
duplicated
def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] A numpy array of boolean values indicating duplicate index values. See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(["llama", "cow", "llama", "beetle", "llama"]) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep="first") array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep="last") array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep)
Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] A numpy array of boolean values indicating duplicate index values. See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(["llama", "cow", "llama", "beetle", "llama"]) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep="first") array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep="last") array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True])
python
pandas/core/indexes/base.py
2,907
[ "self", "keep" ]
npt.NDArray[np.bool_]
true
2
7.28
pandas-dev/pandas
47,362
numpy
false
to_timestamp
def to_timestamp(self, freq=None, how: str = "start") -> DatetimeArray: """ Cast to DatetimeArray/Index. Parameters ---------- freq : str or DateOffset, optional Target frequency. The default is 'D' for week or longer, 's' otherwise. how : {'s', 'e', 'start', 'end'} Whether to use the start or end of the time period being converted. Returns ------- DatetimeArray/Index Timestamp representation of given Period-like object. See Also -------- PeriodIndex.day : The days of the period. PeriodIndex.from_fields : Construct a PeriodIndex from fields (year, month, day, etc.). PeriodIndex.from_ordinals : Construct a PeriodIndex from ordinals. PeriodIndex.hour : The hour of the period. PeriodIndex.minute : The minute of the period. PeriodIndex.month : The month as January=1, December=12. PeriodIndex.second : The second of the period. PeriodIndex.year : The year of the period. Examples -------- >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") >>> idx.to_timestamp() DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'], dtype='datetime64[ns]', freq='MS') The frequency will not be inferred if the index contains less than three elements, or if the values of index are not strictly monotonic: >>> idx = pd.PeriodIndex(["2023-01", "2023-02"], freq="M") >>> idx.to_timestamp() DatetimeIndex(['2023-01-01', '2023-02-01'], dtype='datetime64[ns]', freq=None) >>> idx = pd.PeriodIndex( ... ["2023-01", "2023-02", "2023-02", "2023-03"], freq="2M" ... ) >>> idx.to_timestamp() DatetimeIndex(['2023-01-01', '2023-02-01', '2023-02-01', '2023-03-01'], dtype='datetime64[ns]', freq=None) """ from pandas.core.arrays import DatetimeArray how = libperiod.validate_end_alias(how) end = how == "E" if end: if freq == "B" or self.freq == "B": # roll forward to ensure we land on B date adjust = Timedelta(1, "D") - Timedelta(1, "ns") return self.to_timestamp(how="start") + adjust else: adjust = Timedelta(1, "ns") return (self + self.freq).to_timestamp(how="start") - adjust if freq is None: freq_code = self._dtype._get_to_timestamp_base() dtype = PeriodDtypeBase(freq_code, 1) freq = dtype._freqstr base = freq_code else: freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code new_parr = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) dta = DatetimeArray._from_sequence(new_data, dtype=np.dtype("M8[ns]")) if self.freq.name == "B": # See if we can retain BDay instead of Day in cases where # len(self) is too small for infer_freq to distinguish between them diffs = libalgos.unique_deltas(self.asi8) if len(diffs) == 1: diff = diffs[0] if diff == self.dtype._n: dta._freq = self.freq elif diff == 1: dta._freq = self.freq.base # TODO: other cases? return dta else: dta = dta._with_freq("infer") if freq is not None: freq = to_offset(freq) if ( isinstance(dta.freq, Day) and not isinstance(freq, Day) and Timedelta(freq) == Timedelta(days=dta.freq.n) ): dta._freq = freq return dta
Cast to DatetimeArray/Index. Parameters ---------- freq : str or DateOffset, optional Target frequency. The default is 'D' for week or longer, 's' otherwise. how : {'s', 'e', 'start', 'end'} Whether to use the start or end of the time period being converted. Returns ------- DatetimeArray/Index Timestamp representation of given Period-like object. See Also -------- PeriodIndex.day : The days of the period. PeriodIndex.from_fields : Construct a PeriodIndex from fields (year, month, day, etc.). PeriodIndex.from_ordinals : Construct a PeriodIndex from ordinals. PeriodIndex.hour : The hour of the period. PeriodIndex.minute : The minute of the period. PeriodIndex.month : The month as January=1, December=12. PeriodIndex.second : The second of the period. PeriodIndex.year : The year of the period. Examples -------- >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M") >>> idx.to_timestamp() DatetimeIndex(['2023-01-01', '2023-02-01', '2023-03-01'], dtype='datetime64[ns]', freq='MS') The frequency will not be inferred if the index contains less than three elements, or if the values of index are not strictly monotonic: >>> idx = pd.PeriodIndex(["2023-01", "2023-02"], freq="M") >>> idx.to_timestamp() DatetimeIndex(['2023-01-01', '2023-02-01'], dtype='datetime64[ns]', freq=None) >>> idx = pd.PeriodIndex( ... ["2023-01", "2023-02", "2023-02", "2023-03"], freq="2M" ... ) >>> idx.to_timestamp() DatetimeIndex(['2023-01-01', '2023-02-01', '2023-02-01', '2023-03-01'], dtype='datetime64[ns]', freq=None)
python
pandas/core/arrays/period.py
758
[ "self", "freq", "how" ]
DatetimeArray
true
16
6.72
pandas-dev/pandas
47,362
numpy
false
getEntry
public Entry getEntry(CharSequence namePrefix, CharSequence name) { int nameHash = nameHash(namePrefix, name); int lookupIndex = getFirstLookupIndex(nameHash); int size = size(); while (lookupIndex >= 0 && lookupIndex < size && this.nameHashLookups[lookupIndex] == nameHash) { long pos = getCentralDirectoryFileHeaderRecordPos(lookupIndex); ZipCentralDirectoryFileHeaderRecord centralRecord = loadZipCentralDirectoryFileHeaderRecord(pos); if (hasName(lookupIndex, centralRecord, pos, namePrefix, name)) { return new Entry(lookupIndex, centralRecord); } lookupIndex++; } return null; }
Return the entry with the given name, if any. @param namePrefix an optional prefix for the name @param name the name of the entry to find @return the entry or {@code null}
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
210
[ "namePrefix", "name" ]
Entry
true
5
7.92
spring-projects/spring-boot
79,428
javadoc
false