function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
getOptionsHelp
public Collection<OptionHelp> getOptionsHelp() { if (this.optionHelp == null) { OptionHelpFormatter formatter = new OptionHelpFormatter(); getParser().formatHelpWith(formatter); try { getParser().printHelpOn(new ByteArrayOutputStream()); } catch (Exception ex) { // Ignore and provide no hints } this.optionHelp = formatter.getOptionHelp(); } return this.optionHelp; }
Run the command using the specified parsed {@link OptionSet}. @param options the parsed option set @return an ExitStatus @throws Exception in case of errors
java
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/options/OptionHandler.java
134
[]
true
3
7.92
spring-projects/spring-boot
79,428
javadoc
false
_extract_task_identifiers
def _extract_task_identifiers( self, entity: str | BulkTaskInstanceBody ) -> tuple[str, str, str, int | None]: """ Extract task identifiers from an id or entity object. :param entity: Task identifier as string or BulkTaskInstanceBody object :return: tuple of (dag_id, dag_run_id, task_id, map_index) """ if isinstance(entity, str): dag_id = self.dag_id dag_run_id = self.dag_run_id task_id = entity map_index = None else: dag_id = entity.dag_id if entity.dag_id else self.dag_id dag_run_id = entity.dag_run_id if entity.dag_run_id else self.dag_run_id task_id = entity.task_id map_index = entity.map_index return dag_id, dag_run_id, task_id, map_index
Extract task identifiers from an id or entity object. :param entity: Task identifier as string or BulkTaskInstanceBody object :return: tuple of (dag_id, dag_run_id, task_id, map_index)
python
airflow-core/src/airflow/api_fastapi/core_api/services/public/task_instances.py
172
[ "self", "entity" ]
tuple[str, str, str, int | None]
true
5
7.44
apache/airflow
43,597
sphinx
false
asend_message
async def asend_message( self, queue_url: str, message_body: str, delay_seconds: int = 0, message_attributes: dict | None = None, message_group_id: str | None = None, message_deduplication_id: str | None = None, ) -> dict: """ Send message to the queue (async). .. seealso:: - :external+boto3:py:meth:`SQS.Client.send_message` :param queue_url: queue url :param message_body: the contents of the message :param delay_seconds: seconds to delay the message :param message_attributes: additional attributes for the message (default: None) :param message_group_id: This applies only to FIFO (first-in-first-out) queues. (default: None) :param message_deduplication_id: This applies only to FIFO (first-in-first-out) queues. :return: dict with the information about the message sent """ params = self._build_msg_params( queue_url=queue_url, message_body=message_body, delay_seconds=delay_seconds, message_attributes=message_attributes, message_group_id=message_group_id, message_deduplication_id=message_deduplication_id, ) async with await self.get_async_conn() as async_conn: return await async_conn.send_message(**params)
Send message to the queue (async). .. seealso:: - :external+boto3:py:meth:`SQS.Client.send_message` :param queue_url: queue url :param message_body: the contents of the message :param delay_seconds: seconds to delay the message :param message_attributes: additional attributes for the message (default: None) :param message_group_id: This applies only to FIFO (first-in-first-out) queues. (default: None) :param message_deduplication_id: This applies only to FIFO (first-in-first-out) queues. :return: dict with the information about the message sent
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/sqs.py
109
[ "self", "queue_url", "message_body", "delay_seconds", "message_attributes", "message_group_id", "message_deduplication_id" ]
dict
true
1
6.4
apache/airflow
43,597
sphinx
false
def_kernel
def def_kernel( self, inputs: list[IRNode], outputs: list[IRNode], names_str: str = "", input_reorder: Optional[list[int]] = None, ) -> str: """ Hook called from template code to generate function definition and needed args. Args: inputs: List of input IRNodes outputs: List of output IRNodes names_str: Comma separated list of input + output argument names. input_reorder: The actual order of input nodes. e.g. The template might have input argument defined as [X, W, Bias], and the actual input passed into this template could be [Bias, X, W]. In this case, the `input_reorder` would be [2, 0, 1]. additional_size_args: Additional size arguments for epilogue inputs """ # NB: name order matters here, it's used to match up offsets names = [x.strip() for x in names_str.strip().split(",")] if len(inputs) + len(outputs) != len(names): raise RuntimeError( f"{len(inputs) + len(outputs)=} != {len(names)=}, {inputs=}, {outputs=}, {names=}" ) if input_reorder is not None: assert len(inputs) == len(input_reorder) else: input_reorder = list(range(len(inputs))) for idx in input_reorder: name = names[idx] node = inputs[idx] if node is not None: self.named_nodes[name] = node self.args.input_buffers[node.get_name()] = name free_symbols: OrderedSet[Expr] = OrderedSet() for name, node in zip(names[len(inputs) : len(inputs) + len(outputs)], outputs): if node is not None: # NB: named nodes must be populated in the order of names self.named_nodes[name] = node self.args.output_buffers[node.get_name()] = name if name not in ( "X", "W", "Bias", "Y", ): # we handle these symbolic shapes explicitly for expr in itertools.chain(node.get_size(), node.get_stride()): if isinstance(expr, Expr): for s in expr.free_symbols: free_symbols.add(s) # type: ignore[arg-type] arg_defs, *_ = self.args.cpp_argdefs(DTYPE_TO_CUTLASS_TYPE) self.init_layout_args() size_vars = ["M", "N", "K", "B", "lda", "ldb", "ldc", "ldd"] size_vars.extend(str(s) for s in free_symbols) self.size_args.extend(free_symbols) size_args = [f"const int {s}" for s in size_vars] offset_args = [f"const int {name}_offset" for name in self.named_nodes] runtime_arg_decls = ",".join( [f"{arg.ty} {arg.name}" for arg in self.runtime_arg_info] ) if runtime_arg_decls: runtime_arg_decls += ", " signature = ( f"int {self.kernel_name}({', '.join(arg_defs + size_args + offset_args)},\ {runtime_arg_decls}{self._EXTRA_CPP_ARGS})" ) self.signature = signature return signature
Hook called from template code to generate function definition and needed args. Args: inputs: List of input IRNodes outputs: List of output IRNodes names_str: Comma separated list of input + output argument names. input_reorder: The actual order of input nodes. e.g. The template might have input argument defined as [X, W, Bias], and the actual input passed into this template could be [Bias, X, W]. In this case, the `input_reorder` would be [2, 0, 1]. additional_size_args: Additional size arguments for epilogue inputs
python
torch/_inductor/codegen/cuda/cuda_kernel.py
249
[ "self", "inputs", "outputs", "names_str", "input_reorder" ]
str
true
13
6.8
pytorch/pytorch
96,034
google
false
afterPropertiesSet
@Override public void afterPropertiesSet() { this.cache = (this.store != null ? new ConcurrentMapCache(this.name, this.store, this.allowNullValues) : new ConcurrentMapCache(this.name, this.allowNullValues)); }
Set whether to allow {@code null} values (adapting them to an internal null holder value). <p>Default is "true".
java
spring-context/src/main/java/org/springframework/cache/concurrent/ConcurrentMapCacheFactoryBean.java
86
[]
void
true
2
7.04
spring-projects/spring-framework
59,386
javadoc
false
getExitingScheduledExecutorService
@J2ktIncompatible @GwtIncompatible // TODO public static ScheduledExecutorService getExitingScheduledExecutorService( ScheduledThreadPoolExecutor executor) { return new Application().getExitingScheduledExecutorService(executor); }
Converts the given ScheduledThreadPoolExecutor into a ScheduledExecutorService that exits when the application is complete. It does so by using daemon threads and adding a shutdown hook to wait for their completion. <p>This method waits 120 seconds before continuing with JVM termination, even if the executor has not finished its work. <p>This is mainly for fixed thread pools. See {@link Executors#newScheduledThreadPool(int)}. @param executor the executor to modify to make sure it exits when the application is finished @return an unmodifiable version of the input which will not hang the JVM
java
android/guava/src/com/google/common/util/concurrent/MoreExecutors.java
192
[ "executor" ]
ScheduledExecutorService
true
1
6.72
google/guava
51,352
javadoc
false
linearize
def linearize(func: Callable, *primals) -> tuple[Any, Callable]: """ Returns the value of ``func`` at ``primals`` and linear approximation at ``primals``. Args: func (Callable): A Python function that takes one or more arguments. primals (Tensors): Positional arguments to ``func`` that must all be Tensors. These are the values at which the function is linearly approximated. Returns: Returns a ``(output, jvp_fn)`` tuple containing the output of ``func`` applied to ``primals`` and a function that computes the jvp of ``func`` evaluated at ``primals``. linearize is useful if jvp is to be computed multiple times at ``primals``. However, to achieve this, linearize saves intermediate computation and has higher memory requirements than directly applying `jvp`. So, if all the ``tangents`` are known, it maybe more efficient to compute vmap(jvp) instead of using linearize. .. note:: linearize evaluates ``func`` twice. Please file an issue for an implementation with a single evaluation. Example:: >>> import torch >>> from torch.func import linearize >>> def fn(x): ... return x.sin() ... >>> output, jvp_fn = linearize(fn, torch.zeros(3, 3)) >>> jvp_fn(torch.ones(3, 3)) tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) >>> """ # Note: We evaluate `fn` twice. # Once for returning the output and other while # tracing the graph. # If this becomes a bottle-neck, we should update # make_fx such that it also returns the output. output = func(*primals) _, output_spec = tree_flatten(output) flat_primals, primals_argspec = tree_flatten(primals) # tangents for tracing flat_tangents = tuple(p.new_empty(()).expand_as(p) for p in flat_primals) # function to trace def trace_fn(flat_tangents): with fwAD.dual_level(): flat_duals = tuple( fwAD.make_dual(p, t) for p, t in zip(flat_primals, flat_tangents) ) duals = tree_unflatten(flat_duals, primals_argspec) output = func(*duals) tangents = tree_map_only( torch.Tensor, lambda dual: safe_unpack_dual(dual, False)[1], output ) return tangents jvp_graph = lazy_dynamo_disallow(make_fx)(trace_fn)(flat_tangents) const_folded_jvp_graph = lazy_dynamo_disallow(const_fold.split_const_subgraphs)( jvp_graph ) # Hold only the meta-data regarding the primals. flat_primals_shape = tuple(p.shape for p in flat_primals) flat_primals_device = tuple(p.device for p in flat_primals) flat_primals_dtype = tuple(p.dtype for p in flat_primals) def forward_ad_checks(flat_tangents): for idx, t in enumerate(flat_tangents): if t.shape != flat_primals_shape[idx]: msg = ( f"tangent:{idx} with shape {t.shape} in flattened " f"pytree doesn't match the shape {flat_primals_shape[idx]} " "of the corresponding primal." ) raise RuntimeError(msg) if t.device != flat_primals_device[idx]: msg = ( f"tangent:{idx} with device {t.device} in flattened " f"pytree doesn't match the device {flat_primals_device[idx]} " "of the corresponding primal." ) raise RuntimeError(msg) if t.dtype != flat_primals_dtype[idx]: msg = ( f"tangent:{idx} with dtype {t.dtype} in flattened " f"pytree doesn't match the dtype {flat_primals_dtype[idx]} " "of the corresponding primal." ) raise RuntimeError(msg) # jvp_fn : callable to return # It takes care of checking the argspec of tangents, # calling the folded fx graph and unflattening fx graph output def jvp_fn(*tangents): flat_tangents, tangent_argspec = tree_flatten(tangents) if tangent_argspec != primals_argspec: raise RuntimeError( f"Expected the tangents {tangent_argspec} to have " f"the same argspec as the primals {primals_argspec}" ) forward_ad_checks(flat_tangents) flat_output = const_folded_jvp_graph(*flat_tangents) # const folded graph can return flat output, # so transform output. return tree_unflatten(flat_output, output_spec) return output, jvp_fn
Returns the value of ``func`` at ``primals`` and linear approximation at ``primals``. Args: func (Callable): A Python function that takes one or more arguments. primals (Tensors): Positional arguments to ``func`` that must all be Tensors. These are the values at which the function is linearly approximated. Returns: Returns a ``(output, jvp_fn)`` tuple containing the output of ``func`` applied to ``primals`` and a function that computes the jvp of ``func`` evaluated at ``primals``. linearize is useful if jvp is to be computed multiple times at ``primals``. However, to achieve this, linearize saves intermediate computation and has higher memory requirements than directly applying `jvp`. So, if all the ``tangents`` are known, it maybe more efficient to compute vmap(jvp) instead of using linearize. .. note:: linearize evaluates ``func`` twice. Please file an issue for an implementation with a single evaluation. Example:: >>> import torch >>> from torch.func import linearize >>> def fn(x): ... return x.sin() ... >>> output, jvp_fn = linearize(fn, torch.zeros(3, 3)) >>> jvp_fn(torch.ones(3, 3)) tensor([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) >>>
python
torch/_functorch/eager_transforms.py
1,680
[ "func" ]
tuple[Any, Callable]
true
6
9.44
pytorch/pytorch
96,034
google
false
findAnnotationOnBean
@Override public <A extends Annotation> @Nullable A findAnnotationOnBean(String beanName, Class<A> annotationType) throws NoSuchBeanDefinitionException { return findAnnotationOnBean(beanName, annotationType, true); }
Check whether the specified bean would need to be eagerly initialized in order to determine its type. @param factoryBeanName a factory-bean reference that the bean definition defines a factory method for @return whether eager initialization is necessary
java
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
799
[ "beanName", "annotationType" ]
A
true
1
6.4
spring-projects/spring-framework
59,386
javadoc
false
writeReplace
@Override @J2ktIncompatible @GwtIncompatible Object writeReplace() { return new SerializedForm(toArray()); }
Returns a view of this immutable list in reverse order. For example, {@code ImmutableList.of(1, 2, 3).reverse()} is equivalent to {@code ImmutableList.of(3, 2, 1)}. @return a view of this immutable list in reverse order @since 7.0
java
android/guava/src/com/google/common/collect/ImmutableList.java
714
[]
Object
true
1
6.88
google/guava
51,352
javadoc
false
columnMap
Map<C, Map<R, V>> columnMap();
Returns a view that associates each column key with the corresponding map from row keys to values. Changes to the returned map will update this table. The returned map does not support {@code put()} or {@code putAll()}, or {@code setValue()} on its entries. <p>In contrast, the maps returned by {@code columnMap().get()} have the same behavior as those returned by {@link #column}. Those maps may support {@code setValue()}, {@code put()}, and {@code putAll()}. @return a map view from each column key to a secondary map from row keys to values
java
android/guava/src/com/google/common/collect/Table.java
258
[]
true
1
6.64
google/guava
51,352
javadoc
false
isna
def isna(self) -> Self: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values. Returns ------- Series/DataFrame Mask of bool values for each element in Series/DataFrame that indicates whether an element is an NA value. See Also -------- Series.isnull : Alias of isna. DataFrame.isnull : Alias of isna. Series.notna : Boolean inverse of isna. DataFrame.notna : Boolean inverse of isna. Series.dropna : Omit axes labels with missing values. DataFrame.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame( ... dict( ... age=[5, 6, np.nan], ... born=[ ... pd.NaT, ... pd.Timestamp("1939-05-27"), ... pd.Timestamp("1940-04-25"), ... ], ... name=["Alfred", "Batman", ""], ... toy=[None, "Batmobile", "Joker"], ... ) ... ) >>> df age born name toy 0 5.0 NaT Alfred NaN 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.nan]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna")
Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values. Returns ------- Series/DataFrame Mask of bool values for each element in Series/DataFrame that indicates whether an element is an NA value. See Also -------- Series.isnull : Alias of isna. DataFrame.isnull : Alias of isna. Series.notna : Boolean inverse of isna. DataFrame.notna : Boolean inverse of isna. Series.dropna : Omit axes labels with missing values. DataFrame.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame( ... dict( ... age=[5, 6, np.nan], ... born=[ ... pd.NaT, ... pd.Timestamp("1939-05-27"), ... pd.Timestamp("1940-04-25"), ... ], ... name=["Alfred", "Batman", ""], ... toy=[None, "Batmobile", "Joker"], ... ) ... ) >>> df age born name toy 0 5.0 NaT Alfred NaN 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.nan]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool
python
pandas/core/generic.py
7,981
[ "self" ]
Self
true
1
7.2
pandas-dev/pandas
47,362
unknown
false
shortToBinary
public static boolean[] shortToBinary(final short src, final int srcPos, final boolean[] dst, final int dstPos, final int nBools) { if (0 == nBools) { return dst; } if (nBools - 1 + srcPos >= Short.SIZE) { throw new IllegalArgumentException("nBools - 1 + srcPos >= 16"); } assert nBools - 1 < Short.SIZE - srcPos; for (int i = 0; i < nBools; i++) { final int shift = i + srcPos; dst[dstPos + i] = (0x1 & src >> shift) != 0; } return dst; }
Converts a short into an array of boolean using the default (little-endian, LSB0) byte and bit ordering. @param src the short to convert. @param srcPos the position in {@code src}, in bits, from where to start the conversion. @param dst the destination array. @param dstPos the position in {@code dst} where to copy the result. @param nBools the number of booleans to copy to {@code dst}, must be smaller or equal to the width of the input (from srcPos to MSB). @return {@code dst}. @throws NullPointerException if {@code dst} is {@code null}. @throws IllegalArgumentException if {@code nBools - 1 + srcPos >= 16}. @throws ArrayIndexOutOfBoundsException if {@code dstPos + nBools > dst.length}.
java
src/main/java/org/apache/commons/lang3/Conversion.java
1,286
[ "src", "srcPos", "dst", "dstPos", "nBools" ]
true
4
8.08
apache/commons-lang
2,896
javadoc
false
intersection
def intersection(self, other, sort: bool = False): # default sort keyword is different here from other setops intentionally # done in GH#25063 """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like An Index or an array-like object containing elements to form the intersection with the original Index. sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Returns a new Index object with elements common to both the original Index and the `other` Index. See Also -------- Index.union : Form the union of two Index objects. Index.difference : Return a new Index with elements of index not in other. Index.isin : Return a boolean array where the index values are in values. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if self.dtype != other.dtype: self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if not self.is_unique: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if self.dtype == dtype: # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif self.dtype != other.dtype: dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result)
Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like An Index or an array-like object containing elements to form the intersection with the original Index. sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Returns a new Index object with elements common to both the original Index and the `other` Index. See Also -------- Index.union : Form the union of two Index objects. Index.difference : Return a new Index with elements of index not in other. Index.isin : Return a boolean array where the index values are in values. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64')
python
pandas/core/indexes/base.py
3,245
[ "self", "other", "sort" ]
true
16
7.2
pandas-dev/pandas
47,362
numpy
false
doDoubleValue
@Override public double doDoubleValue() throws IOException { try { return parser.getDoubleValue(); } catch (IOException e) { throw handleParserException(e); } }
Handle parser exception depending on type. This converts known exceptions to XContentParseException and rethrows them.
java
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java
290
[]
true
2
6.08
elastic/elasticsearch
75,680
javadoc
false
parseResponse
public static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader) { try { return AbstractResponse.parseResponse(responseBuffer, requestHeader); } catch (BufferUnderflowException e) { throw new SchemaException("Buffer underflow while parsing response for request with header " + requestHeader, e); } catch (CorrelationIdMismatchException e) { if (SaslClientAuthenticator.isReserved(requestHeader.correlationId()) && !SaslClientAuthenticator.isReserved(e.responseCorrelationId())) throw new SchemaException("The response is unrelated to Sasl request since its correlation id is " + e.responseCorrelationId() + " and the reserved range for Sasl request is [ " + SaslClientAuthenticator.MIN_RESERVED_CORRELATION_ID + "," + SaslClientAuthenticator.MAX_RESERVED_CORRELATION_ID + "]"); else { throw e; } } }
Choose the node with the fewest outstanding requests which is at least eligible for connection. This method will prefer a node with an existing connection, but will potentially choose a node for which we don't yet have a connection if all existing connections are in use. If no connection exists, this method will prefer a node with least recent connection attempts. This method will never choose a node for which there is no existing connection and from which we have disconnected within the reconnect backoff period, or an active connection which is being throttled. @return The node with the fewest in-flight requests.
java
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
824
[ "responseBuffer", "requestHeader" ]
AbstractResponse
true
5
7.04
apache/kafka
31,560
javadoc
false
isin
def isin(element, test_elements, assume_unique=False, invert=False): """ Calculates `element in test_elements`, broadcasting over `element` only. The output is always a masked array of the same shape as `element`. See `numpy.isin` for more details. See Also -------- in1d : Flattened version of this function. numpy.isin : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) >>> test_elements = [0, 2] >>> np.ma.isin(element, test_elements) masked_array(data=[False, True, False, False, False, False], mask=False, fill_value=True) """ element = ma.asarray(element) return in1d(element, test_elements, assume_unique=assume_unique, invert=invert).reshape(element.shape)
Calculates `element in test_elements`, broadcasting over `element` only. The output is always a masked array of the same shape as `element`. See `numpy.isin` for more details. See Also -------- in1d : Flattened version of this function. numpy.isin : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) >>> test_elements = [0, 2] >>> np.ma.isin(element, test_elements) masked_array(data=[False, True, False, False, False, False], mask=False, fill_value=True)
python
numpy/ma/extras.py
1,434
[ "element", "test_elements", "assume_unique", "invert" ]
false
1
6
numpy/numpy
31,054
unknown
false
applyAsLong
long applyAsLong(long operand) throws E;
Applies this operator to the given operand. @param operand the operand @return the operator result @throws E Thrown when a consumer fails.
java
src/main/java/org/apache/commons/lang3/function/FailableLongUnaryOperator.java
76
[ "operand" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
times
function times(n, iteratee) { n = toInteger(n); if (n < 1 || n > MAX_SAFE_INTEGER) { return []; } var index = MAX_ARRAY_LENGTH, length = nativeMin(n, MAX_ARRAY_LENGTH); iteratee = getIteratee(iteratee); n -= MAX_ARRAY_LENGTH; var result = baseTimes(length, iteratee); while (++index < n) { iteratee(index); } return result; }
Invokes the iteratee `n` times, returning an array of the results of each invocation. The iteratee is invoked with one argument; (index). @static @since 0.1.0 @memberOf _ @category Util @param {number} n The number of times to invoke `iteratee`. @param {Function} [iteratee=_.identity] The function invoked per iteration. @returns {Array} Returns the array of results. @example _.times(3, String); // => ['0', '1', '2'] _.times(4, _.constant(0)); // => [0, 0, 0, 0]
javascript
lodash.js
16,247
[ "n", "iteratee" ]
false
4
7.52
lodash/lodash
61,490
jsdoc
false
addAotGeneratedInitializerIfNecessary
private void addAotGeneratedInitializerIfNecessary(List<ApplicationContextInitializer<?>> initializers) { if (NativeDetector.inNativeImage()) { NativeImageRequirementsException.throwIfNotMet(); } if (AotDetector.useGeneratedArtifacts()) { List<ApplicationContextInitializer<?>> aotInitializers = new ArrayList<>( initializers.stream().filter(AotApplicationContextInitializer.class::isInstance).toList()); if (aotInitializers.isEmpty()) { Assert.state(this.mainApplicationClass != null, "No application main class found"); String initializerClassName = this.mainApplicationClass.getName() + "__ApplicationContextInitializer"; if (!ClassUtils.isPresent(initializerClassName, getClassLoader())) { throw new AotInitializerNotFoundException(this.mainApplicationClass, initializerClassName); } aotInitializers.add(AotApplicationContextInitializer.forInitializerClasses(initializerClassName)); } initializers.removeAll(aotInitializers); initializers.addAll(0, aotInitializers); } }
Run the Spring application, creating and refreshing a new {@link ApplicationContext}. @param args the application arguments (usually passed from a Java main method) @return a running {@link ApplicationContext}
java
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
421
[ "initializers" ]
void
true
5
7.28
spring-projects/spring-boot
79,428
javadoc
false
generate
def generate( # type: ignore[override] self, name: str, description: str, input_key: str, layout_repr: str, input_tensor_meta: Union[TensorMeta, list[TensorMeta]], output_tensor_meta: Union[TensorMeta, list[TensorMeta]], **kwargs, ) -> CUDATemplateCaller: """ Generates the CUDA template caller object for the given GEMM template and operation. This CUDATemplateCaller may be used to call and benchmark the generated CUDA kernel in a standalone manner to enable Autotuning. Args: description: op name followed by swizzle. kwargs: Additional keyword arguments. Returns: A CUDATemplateCaller object representing the generated CUDA template caller. """ code, extra_args = self.generate_code_and_args( name=name, input_key=input_key, layout_repr=layout_repr, **kwargs, ) # not caching since kernel name is needed below kernel_hash = hashlib.sha256(code.encode("utf-8")).hexdigest()[:8] kernel_name = f"cutlass_{kernel_hash}" code = code.replace(self.name, kernel_name) # create the BenchmarkRequest bmreq = CUDABenchmarkRequest( kernel_name=kernel_name, input_tensor_meta=input_tensor_meta, output_tensor_meta=output_tensor_meta, extra_args=extra_args, source_code=code, ) # kwargs has "op" argument in case of CUTLASSGemmTemplate op = kwargs["op"] if not op: supports_epilogue_fusion = False else: # epilogue fusion is only supported for TMA kernels supports_epilogue_fusion = self.supports_epilogue_fusion(op) def make_kernel_render( template_node: CUDATemplateBuffer, epilogue_nodes: Optional[list[BaseSchedulerNode]] = None, ) -> tuple[CUDATemplateKernel, functools.partial[str]]: assert supports_epilogue_fusion or not epilogue_nodes, ( "epilogue fusion is not supported for this kernel" ) kernel = CUDATemplateKernel( kernel_name=str(Placeholder.KERNEL_NAME), runtime_arg_info=self.get_runtime_arg_info(), runtime_arg_values=self.get_runtime_arg_values(**kwargs), ) render = functools.partial( self.render, kernel=kernel, template_buffer_node=template_node, epilogue_nodes=epilogue_nodes, **kwargs, # includes "op" argument in case of CUTLASSGemmTemplate ) return kernel, render return CUDATemplateCaller( kernel_name, "cutlass_gemm", self.input_nodes, self.output_node.get_layout(), make_kernel_render, bmreq, supports_epilogue_fusion, self, kwargs, description, )
Generates the CUDA template caller object for the given GEMM template and operation. This CUDATemplateCaller may be used to call and benchmark the generated CUDA kernel in a standalone manner to enable Autotuning. Args: description: op name followed by swizzle. kwargs: Additional keyword arguments. Returns: A CUDATemplateCaller object representing the generated CUDA template caller.
python
torch/_inductor/codegen/cuda/cuda_template.py
168
[ "self", "name", "description", "input_key", "layout_repr", "input_tensor_meta", "output_tensor_meta" ]
CUDATemplateCaller
true
4
7.52
pytorch/pytorch
96,034
google
false
next
public char next() { return this.pos < this.in.length() ? this.in.charAt(this.pos++) : '\0'; }
Returns the current position and the entire input string. @return the current position and the entire input string.
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
481
[]
true
2
8
spring-projects/spring-boot
79,428
javadoc
false
LoadBufferFromGCS
static int64_t LoadBufferFromGCS(const std::string& path, size_t offset, size_t buffer_size, char* buffer, tf_gcs_filesystem::GCSFile* gcs_file, TF_Status* status) { std::string bucket, object; ParseGCSPath(path, false, &bucket, &object, status); if (TF_GetCode(status) != TF_OK) return -1; auto stream = gcs_file->gcs_client.ReadObject( bucket, object, gcs::ReadRange(offset, offset + buffer_size)); TF_SetStatusFromGCSStatus(stream.status(), status); if ((TF_GetCode(status) != TF_OK) && (TF_GetCode(status) != TF_OUT_OF_RANGE)) { return -1; } int64_t read; auto content_length = stream.headers().find("content-length"); if (content_length == stream.headers().end()) { // When we read a file with offset that is bigger than the actual file size. // GCS will return an empty header (e.g no `content-length` header). In this // case, we will set read to `0` and continue. read = 0; } else if (!absl::SimpleAtoi(content_length->second, &read)) { TF_SetStatus(status, TF_UNKNOWN, "Could not get content-length header"); return -1; } // `TF_OUT_OF_RANGE` isn't considered as an error. So we clear it here. TF_SetStatus(status, TF_OK, ""); VLOG(1) << absl::StrFormat("Successful read of %s @ %u of size: %u", path, offset, read); stream.read(buffer, read); read = stream.gcount(); if (read < buffer_size) { // Check stat cache to see if we encountered an interrupted read. tf_gcs_filesystem::GcsFileStat stat; if (gcs_file->stat_cache->Lookup(path, &stat)) { if (offset + read < stat.base.length) { TF_SetStatus(status, TF_INTERNAL, absl::StrCat("File contents are inconsistent for file: ", path, " @ ", offset) .c_str()); } VLOG(2) << absl::StrFormat("Successful integrity check for: %s @ %u", path, offset); } } return read; }
Appends a trailing slash if the name doesn't already have one.
cpp
tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.cc
128
[ "offset", "buffer_size" ]
true
10
6
tensorflow/tensorflow
192,880
doxygen
false
reorder_levels
def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- Series Type of caller with index as MultiIndex (new object). See Also -------- DataFrame.reorder_levels : Rearrange index or column levels using input ``order``. Examples -------- >>> arrays = [ ... np.array(["dog", "dog", "cat", "cat", "bird", "bird"]), ... np.array(["white", "black", "white", "black", "white", "black"]), ... ] >>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays) >>> s dog white 1 black 2 cat white 3 black 3 bird white 5 black 2 dtype: int64 >>> s.reorder_levels([1, 0]) white dog 1 black dog 2 white cat 3 black cat 3 white bird 5 black bird 2 dtype: int64 """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=False) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result
Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- Series Type of caller with index as MultiIndex (new object). See Also -------- DataFrame.reorder_levels : Rearrange index or column levels using input ``order``. Examples -------- >>> arrays = [ ... np.array(["dog", "dog", "cat", "cat", "bird", "bird"]), ... np.array(["white", "black", "white", "black", "white", "black"]), ... ] >>> s = pd.Series([1, 2, 3, 3, 5, 2], index=arrays) >>> s dog white 1 black 2 cat white 3 black 3 bird white 5 black 2 dtype: int64 >>> s.reorder_levels([1, 0]) white dog 1 black dog 2 white cat 3 black cat 3 white bird 5 black bird 2 dtype: int64
python
pandas/core/series.py
4,225
[ "self", "order" ]
Series
true
2
8.48
pandas-dev/pandas
47,362
numpy
false
parseCustomElement
public @Nullable BeanDefinition parseCustomElement(Element ele, @Nullable BeanDefinition containingBd) { String namespaceUri = getNamespaceURI(ele); if (namespaceUri == null) { return null; } NamespaceHandler handler = this.readerContext.getNamespaceHandlerResolver().resolve(namespaceUri); if (handler == null) { error("Unable to locate Spring NamespaceHandler for XML schema namespace [" + namespaceUri + "]", ele); return null; } return handler.parse(ele, new ParserContext(this.readerContext, this, containingBd)); }
Parse a custom element (outside the default namespace). @param ele the element to parse @param containingBd the containing bean definition (if any) @return the resulting bean definition
java
spring-beans/src/main/java/org/springframework/beans/factory/xml/BeanDefinitionParserDelegate.java
1,369
[ "ele", "containingBd" ]
BeanDefinition
true
3
7.6
spring-projects/spring-framework
59,386
javadoc
false
createCollection
@Override SortedSet<V> createCollection() { return new TreeSet<>(valueComparator); }
{@inheritDoc} <p>Creates an empty {@code TreeSet} for a collection of values for one key. @return a new {@code TreeSet} containing a collection of values for one key
java
android/guava/src/com/google/common/collect/TreeMultimap.java
139
[]
true
1
6.64
google/guava
51,352
javadoc
false
registrySuffixIndex
private int registrySuffixIndex() { int registrySuffixIndexLocal = registrySuffixIndexCache; if (registrySuffixIndexLocal == SUFFIX_NOT_INITIALIZED) { registrySuffixIndexCache = registrySuffixIndexLocal = findSuffixOfType(Optional.of(PublicSuffixType.REGISTRY)); } return registrySuffixIndexLocal; }
The index in the {@link #parts()} list at which the registry suffix begins. For example, for the domain name {@code myblog.blogspot.co.uk}, the value would be 2 (the index of the {@code co} part). The value is negative (specifically, {@link #NO_SUFFIX_FOUND}) if no registry suffix was found.
java
android/guava/src/com/google/common/net/InternetDomainName.java
195
[]
true
2
7.04
google/guava
51,352
javadoc
false
requestRejoinIfNecessary
public synchronized void requestRejoinIfNecessary(final String shortReason, final String fullReason) { if (!this.rejoinNeeded) { requestRejoin(shortReason, fullReason); } }
Get the current generation state if the group is stable, otherwise return null @return the current generation or null
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
1,089
[ "shortReason", "fullReason" ]
void
true
2
6.4
apache/kafka
31,560
javadoc
false
findLastIndex
function findLastIndex(array, predicate, fromIndex) { var length = array == null ? 0 : array.length; if (!length) { return -1; } var index = length - 1; if (fromIndex !== undefined) { index = toInteger(fromIndex); index = fromIndex < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1); } return baseFindIndex(array, getIteratee(predicate, 3), index, true); }
This method is like `_.findIndex` except that it iterates over elements of `collection` from right to left. @static @memberOf _ @since 2.0.0 @category Array @param {Array} array The array to inspect. @param {Function} [predicate=_.identity] The function invoked per iteration. @param {number} [fromIndex=array.length-1] The index to search from. @returns {number} Returns the index of the found element, else `-1`. @example var users = [ { 'user': 'barney', 'active': true }, { 'user': 'fred', 'active': false }, { 'user': 'pebbles', 'active': false } ]; _.findLastIndex(users, function(o) { return o.user == 'pebbles'; }); // => 2 // The `_.matches` iteratee shorthand. _.findLastIndex(users, { 'user': 'barney', 'active': true }); // => 0 // The `_.matchesProperty` iteratee shorthand. _.findLastIndex(users, ['active', false]); // => 2 // The `_.property` iteratee shorthand. _.findLastIndex(users, 'active'); // => 0
javascript
lodash.js
7,399
[ "array", "predicate", "fromIndex" ]
false
5
7.2
lodash/lodash
61,490
jsdoc
false
callWithTimeout
@ParametricNullness private <T extends @Nullable Object> T callWithTimeout( Callable<T> callable, long timeoutDuration, TimeUnit timeoutUnit, boolean amInterruptible) throws Exception { checkNotNull(callable); checkNotNull(timeoutUnit); checkPositiveTimeout(timeoutDuration); Future<T> future = executor.submit(callable); try { return amInterruptible ? future.get(timeoutDuration, timeoutUnit) : getUninterruptibly(future, timeoutDuration, timeoutUnit); } catch (InterruptedException e) { future.cancel(true); throw e; } catch (ExecutionException e) { throw throwCause(e, true /* combineStackTraces */); } catch (TimeoutException e) { future.cancel(true); throw new UncheckedTimeoutException(e); } }
Creates a TimeLimiter instance using the given executor service to execute method calls. <p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills up, any time callers spend waiting for a thread may count toward their time limit, and in this case the call may even time out before the target method is ever invoked. @param executor the ExecutorService that will execute the method calls on the target objects; for example, a {@link Executors#newCachedThreadPool()}. @since 22.0
java
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
110
[ "callable", "timeoutDuration", "timeoutUnit", "amInterruptible" ]
T
true
5
6.72
google/guava
51,352
javadoc
false
join
public StringBuilder join(final StringBuilder stringBuilder, final Iterable<T> elements) { return joinI(stringBuilder, prefix, suffix, delimiter, appender, elements); }
Joins stringified objects from the given Iterable into a StringBuilder. @param stringBuilder The target. @param elements The source. @return The given StringBuilder.
java
src/main/java/org/apache/commons/lang3/AppendableJoiner.java
272
[ "stringBuilder", "elements" ]
StringBuilder
true
1
6.48
apache/commons-lang
2,896
javadoc
false
getTemplateLoaderForPath
protected TemplateLoader getTemplateLoaderForPath(String templateLoaderPath) { if (isPreferFileSystemAccess()) { // Try to load via the file system, fall back to SpringTemplateLoader // (for hot detection of template changes, if possible). try { Resource path = getResourceLoader().getResource(templateLoaderPath); File file = path.getFile(); // will fail if not resolvable in the file system if (logger.isDebugEnabled()) { logger.debug( "Template loader path [" + path + "] resolved to file path [" + file.getAbsolutePath() + "]"); } return new FileTemplateLoader(file); } catch (Exception ex) { if (logger.isDebugEnabled()) { logger.debug("Cannot resolve template loader path [" + templateLoaderPath + "] to [java.io.File]: using SpringTemplateLoader as fallback", ex); } return new SpringTemplateLoader(getResourceLoader(), templateLoaderPath); } } else { // Always load via SpringTemplateLoader (without hot detection of template changes). logger.debug("File system access not preferred: using SpringTemplateLoader"); return new SpringTemplateLoader(getResourceLoader(), templateLoaderPath); } }
Determine a FreeMarker {@link TemplateLoader} for the given path. <p>Default implementation creates either a {@link FileTemplateLoader} or a {@link SpringTemplateLoader}. @param templateLoaderPath the path to load templates from @return an appropriate {@code TemplateLoader} @see freemarker.cache.FileTemplateLoader @see SpringTemplateLoader
java
spring-context-support/src/main/java/org/springframework/ui/freemarker/FreeMarkerConfigurationFactory.java
363
[ "templateLoaderPath" ]
TemplateLoader
true
5
7.6
spring-projects/spring-framework
59,386
javadoc
false
readFile
private static String readFile(final String envVarFile, final String key) { try { final byte[] bytes = Files.readAllBytes(Paths.get(envVarFile)); final String content = new String(bytes, Charset.defaultCharset()); // Split by null byte character final String[] lines = content.split(String.valueOf(CharUtils.NUL)); final String prefix = key + "="; // @formatter:off return Arrays.stream(lines) .filter(line -> line.startsWith(prefix)) .map(line -> line.split("=", 2)) .map(keyValue -> keyValue[1]) .findFirst() .orElse(null); // @formatter:on } catch (final IOException e) { return null; } }
Tests whether the {@code /proc/N/environ} file at the given path string contains a specific line prefix. @param envVarFile The path to a /proc/N/environ file. @param key The env var key to find. @return value The env var value or null.
java
src/main/java/org/apache/commons/lang3/RuntimeEnvironment.java
118
[ "envVarFile", "key" ]
String
true
2
8.24
apache/commons-lang
2,896
javadoc
false
maybe_box_native
def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: """ If passed a scalar cast the scalar to a python native type. Parameters ---------- value : scalar or Series Returns ------- scalar or Series """ if is_float(value): value = float(value) elif is_integer(value): value = int(value) elif is_bool(value): value = bool(value) elif isinstance(value, (np.datetime64, np.timedelta64)): value = maybe_box_datetimelike(value) elif value is NA: value = None return value
If passed a scalar cast the scalar to a python native type. Parameters ---------- value : scalar or Series Returns ------- scalar or Series
python
pandas/core/dtypes/cast.py
186
[ "value" ]
Scalar | None | NAType
true
6
6.88
pandas-dev/pandas
47,362
numpy
false
logInvalidating
private void logInvalidating(CacheOperationContext context, CacheEvictOperation operation, @Nullable Object key) { if (logger.isTraceEnabled()) { logger.trace("Invalidating " + (key != null ? "cache key [" + key + "]" : "entire cache") + " for operation " + operation + " on method " + context.metadata.method); } }
Find a cached value only for {@link CacheableOperation} that passes the condition. @param contexts the cacheable operations @return a {@link Cache.ValueWrapper} holding the cached value, or {@code null} if none is found
java
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
709
[ "context", "operation", "key" ]
void
true
3
7.92
spring-projects/spring-framework
59,386
javadoc
false
describe_endpoint_config
def describe_endpoint_config(self, name: str) -> dict: """ Get the endpoint config info associated with the name. .. seealso:: - :external+boto3:py:meth:`SageMaker.Client.describe_endpoint_config` :param name: the name of the endpoint config :return: A dict contains all the endpoint config info """ return self.get_conn().describe_endpoint_config(EndpointConfigName=name)
Get the endpoint config info associated with the name. .. seealso:: - :external+boto3:py:meth:`SageMaker.Client.describe_endpoint_config` :param name: the name of the endpoint config :return: A dict contains all the endpoint config info
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
688
[ "self", "name" ]
dict
true
1
6.24
apache/airflow
43,597
sphinx
false
perform_heartbeat
def perform_heartbeat( job: Job, heartbeat_callback: Callable[[Session], None], only_if_necessary: bool ) -> None: """ Perform heartbeat for the Job passed to it,optionally checking if it is necessary. :param job: job to perform heartbeat for :param heartbeat_callback: callback to run by the heartbeat :param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for triggerer for example) """ seconds_remaining: float = 0.0 if job.latest_heartbeat and job.heartrate: seconds_remaining = job.heartrate - (timezone.utcnow() - job.latest_heartbeat).total_seconds() if seconds_remaining > 0 and only_if_necessary: return job.heartbeat(heartbeat_callback=heartbeat_callback)
Perform heartbeat for the Job passed to it,optionally checking if it is necessary. :param job: job to perform heartbeat for :param heartbeat_callback: callback to run by the heartbeat :param only_if_necessary: only heartbeat if it is necessary (i.e. if there are things to run for triggerer for example)
python
airflow-core/src/airflow/jobs/job.py
418
[ "job", "heartbeat_callback", "only_if_necessary" ]
None
true
5
6.72
apache/airflow
43,597
sphinx
false
BASIC_UNESCAPE
public static String[][] BASIC_UNESCAPE() { return BASIC_UNESCAPE.clone(); }
Reverse of {@link #BASIC_ESCAPE()} for unescaping purposes. @return the mapping table.
java
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
391
[]
true
1
6.48
apache/commons-lang
2,896
javadoc
false
registerMetric
synchronized KafkaMetric registerMetric(KafkaMetric metric) { MetricName metricName = metric.metricName(); KafkaMetric existingMetric = this.metrics.putIfAbsent(metricName, metric); if (existingMetric != null) { return existingMetric; } // newly added metric for (MetricsReporter reporter : reporters) { try { reporter.metricChange(metric); } catch (Exception e) { log.error("Error when registering metric on {}", reporter.getClass().getName(), e); } } log.trace("Registered metric named {}", metricName); return null; }
Register a metric if not present or return the already existing metric with the same name. When a metric is newly registered, this method returns null @param metric The KafkaMetric to register @return the existing metric with the same name or null
java
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
588
[ "metric" ]
KafkaMetric
true
3
7.92
apache/kafka
31,560
javadoc
false
argwhere
def argwhere(a): """ Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. This array will have shape ``(N, a.ndim)`` where ``N`` is the number of non-zero items. See Also -------- where, nonzero Notes ----- ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, but produces a result of the correct shape for a 0D array. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``nonzero(a)`` instead. Examples -------- >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.argwhere(x>1) array([[0, 2], [1, 0], [1, 1], [1, 2]]) """ # nonzero does not behave well on 0d, so promote to 1d if np.ndim(a) == 0: a = shape_base.atleast_1d(a) # then remove the added dimension return argwhere(a)[:, :0] return transpose(nonzero(a))
Find the indices of array elements that are non-zero, grouped by element. Parameters ---------- a : array_like Input data. Returns ------- index_array : (N, a.ndim) ndarray Indices of elements that are non-zero. Indices are grouped by element. This array will have shape ``(N, a.ndim)`` where ``N`` is the number of non-zero items. See Also -------- where, nonzero Notes ----- ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, but produces a result of the correct shape for a 0D array. The output of ``argwhere`` is not suitable for indexing arrays. For this purpose use ``nonzero(a)`` instead. Examples -------- >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], [3, 4, 5]]) >>> np.argwhere(x>1) array([[0, 2], [1, 0], [1, 1], [1, 2]])
python
numpy/_core/numeric.py
625
[ "a" ]
false
2
7.84
numpy/numpy
31,054
numpy
false
withSuppliedValue
public Bindable<T> withSuppliedValue(@Nullable Supplier<T> suppliedValue) { return new Bindable<>(this.type, this.boxedType, suppliedValue, this.annotations, this.bindRestrictions, this.bindMethod); }
Create an updated {@link Bindable} instance with a value supplier. @param suppliedValue the supplier for the value @return an updated {@link Bindable}
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
214
[ "suppliedValue" ]
true
1
6.48
spring-projects/spring-boot
79,428
javadoc
false
visitGetAccessor
function visitGetAccessor(node: GetAccessorDeclaration, parent: ClassLikeDeclaration | ObjectLiteralExpression) { if (!(node.transformFlags & TransformFlags.ContainsTypeScript)) { return node; } if (!shouldEmitAccessorDeclaration(node)) { return undefined; } let modifiers = isClassLike(parent) ? visitNodes(node.modifiers, visitor, isModifierLike) : visitNodes(node.modifiers, decoratorElidingVisitor, isModifierLike); modifiers = injectClassElementTypeMetadata(modifiers, node, parent); return factory.updateGetAccessorDeclaration( node, modifiers, visitPropertyNameOfClassElement(node), visitParameterList(node.parameters, visitor, context), /*type*/ undefined, visitFunctionBody(node.body, visitor, context) || factory.createBlock([]), ); }
Determines whether to emit an accessor declaration. We should not emit the declaration if it does not have a body and is abstract. @param node The declaration node.
typescript
src/compiler/transformers/ts.ts
1,502
[ "node", "parent" ]
false
5
6.08
microsoft/TypeScript
107,154
jsdoc
false
concat
public static char[] concat(char[]... arrays) { long length = 0; for (char[] array : arrays) { length += array.length; } char[] result = new char[checkNoOverflow(length)]; int pos = 0; for (char[] array : arrays) { System.arraycopy(array, 0, result, pos, array.length); pos += array.length; } return result; }
Returns the values from each provided array combined into a single array. For example, {@code concat(new char[] {a, b}, new char[] {}, new char[] {c}} returns the array {@code {a, b, c}}. @param arrays zero or more {@code char} arrays @return a single array containing all the values from the source arrays, in order @throws IllegalArgumentException if the total number of elements in {@code arrays} does not fit in an {@code int}
java
android/guava/src/com/google/common/primitives/Chars.java
280
[]
true
1
6.56
google/guava
51,352
javadoc
false
mode
@SafeVarargs public static <T> T mode(final T... items) { if (ArrayUtils.isNotEmpty(items)) { final HashMap<T, MutableInt> occurrences = new HashMap<>(items.length); for (final T t : items) { ArrayUtils.increment(occurrences, t); } T result = null; int max = 0; for (final Map.Entry<T, MutableInt> e : occurrences.entrySet()) { final int cmp = e.getValue().intValue(); if (cmp == max) { result = null; } else if (cmp > max) { max = cmp; result = e.getKey(); } } return result; } return null; }
Finds the most frequently occurring item. @param <T> type of values processed by this method. @param items to check. @return most populous T, {@code null} if non-unique or no items supplied. @since 3.0.1
java
src/main/java/org/apache/commons/lang3/ObjectUtils.java
1,122
[]
T
true
4
8.24
apache/commons-lang
2,896
javadoc
false
isConditionPassing
private boolean isConditionPassing(CacheOperationContext context, @Nullable Object result) { boolean passing = context.isConditionPassing(result); if (!passing && logger.isTraceEnabled()) { logger.trace("Cache condition failed on method " + context.metadata.method + " for operation " + context.metadata.operation); } return passing; }
Collect a {@link CachePutRequest} for every {@link CacheOperation} using the specified result value. @param contexts the contexts to handle @param result the result value @param putRequests the collection to update
java
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
733
[ "context", "result" ]
true
3
6.08
spring-projects/spring-framework
59,386
javadoc
false
createBind
function createBind(func, bitmask, thisArg) { var isBind = bitmask & WRAP_BIND_FLAG, Ctor = createCtor(func); function wrapper() { var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; return fn.apply(isBind ? thisArg : this, arguments); } return wrapper; }
Creates a function that wraps `func` to invoke it with the optional `this` binding of `thisArg`. @private @param {Function} func The function to wrap. @param {number} bitmask The bitmask flags. See `createWrap` for more details. @param {*} [thisArg] The `this` binding of `func`. @returns {Function} Returns the new wrapped function.
javascript
lodash.js
5,024
[ "func", "bitmask", "thisArg" ]
false
5
6.08
lodash/lodash
61,490
jsdoc
false
reduction_prefix_array
def reduction_prefix_array( acc_var: Union[str, CSEVariable], acc_type: str, reduction_type: str, dtype: torch.dtype, len: Union[str, int], init_fn, ): """ MSVC don't support dynamic array(VLA). So we use std::unique_ptr here. Ref: https://stackoverflow.com/questions/56555406/creating-dynamic-sized-array-using-msvc-c-compiler MSVC is the only one compiler without VLA. support. Since MSVC can't get good performance here. We just use unique_ptr make it works on MSVC. For other compilers, we continue to use VLA to get best performance. """ code_buffer = IndentedBuffer() acc_decl = ( f"auto {acc_var}_arr = std::make_unique<{acc_type}[]>({len});" if cpp_builder.is_msvc_cl() else f"{acc_type} {acc_var}_arr[{len}];" ) code_buffer.writeline(f"{acc_decl}") code_buffer.writelines( [ f"for (int i = 0; i < {len}; i++)", "{", f" {acc_var}_arr[i] = {init_fn(reduction_type, dtype)};", "}", ], ) return code_buffer
MSVC don't support dynamic array(VLA). So we use std::unique_ptr here. Ref: https://stackoverflow.com/questions/56555406/creating-dynamic-sized-array-using-msvc-c-compiler MSVC is the only one compiler without VLA. support. Since MSVC can't get good performance here. We just use unique_ptr make it works on MSVC. For other compilers, we continue to use VLA to get best performance.
python
torch/_inductor/codegen/cpp.py
316
[ "acc_var", "acc_type", "reduction_type", "dtype", "len", "init_fn" ]
true
2
6.72
pytorch/pytorch
96,034
unknown
false
of
static AutowiredArguments of(@Nullable Object[] arguments) { Assert.notNull(arguments, "'arguments' must not be null"); return () -> arguments; }
Factory method to create a new {@link AutowiredArguments} instance from the given object array. @param arguments the arguments @return a new {@link AutowiredArguments} instance
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredArguments.java
85
[ "arguments" ]
AutowiredArguments
true
1
6
spring-projects/spring-framework
59,386
javadoc
false
unmodifiableListMultimap
public static <K extends @Nullable Object, V extends @Nullable Object> ListMultimap<K, V> unmodifiableListMultimap(ListMultimap<K, V> delegate) { if (delegate instanceof UnmodifiableListMultimap || delegate instanceof ImmutableListMultimap) { return delegate; } return new UnmodifiableListMultimap<>(delegate); }
Returns an unmodifiable view of the specified {@code ListMultimap}. Query operations on the returned multimap "read through" to the specified multimap, and attempts to modify the returned multimap, either directly or through the multimap's views, result in an {@code UnsupportedOperationException}. <p>The returned multimap will be serializable if the specified multimap is serializable. @param delegate the multimap for which an unmodifiable view is to be returned @return an unmodifiable view of the specified multimap
java
android/guava/src/com/google/common/collect/Multimaps.java
1,000
[ "delegate" ]
true
3
7.44
google/guava
51,352
javadoc
false
bindExportDeclaration
function bindExportDeclaration(node: ExportDeclaration) { if (!container.symbol || !container.symbol.exports) { // Export * in some sort of block construct bindAnonymousDeclaration(node, SymbolFlags.ExportStar, getDeclarationName(node)!); } else if (!node.exportClause) { // All export * declarations are collected in an __export symbol declareSymbol(container.symbol.exports, container.symbol, node, SymbolFlags.ExportStar, SymbolFlags.None); } else if (isNamespaceExport(node.exportClause)) { // declareSymbol walks up parents to find name text, parent _must_ be set // but won't be set by the normal binder walk until `bindChildren` later on. setParent(node.exportClause, node); declareSymbol(container.symbol.exports, container.symbol, node.exportClause, SymbolFlags.Alias, SymbolFlags.AliasExcludes); } }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
3,164
[ "node" ]
false
7
6.08
microsoft/TypeScript
107,154
jsdoc
false
invocableClone
MethodInvocation invocableClone(@Nullable Object... arguments);
Create a clone of this object. If cloning is done before {@code proceed()} is invoked on this object, {@code proceed()} can be invoked once per clone to invoke the joinpoint (and the rest of the advice chain) more than once. @param arguments the arguments that the cloned invocation is supposed to use, overriding the original arguments @return an invocable clone of this invocation. {@code proceed()} can be called once per clone.
java
spring-aop/src/main/java/org/springframework/aop/ProxyMethodInvocation.java
61
[]
MethodInvocation
true
1
6.8
spring-projects/spring-framework
59,386
javadoc
false
toString
@Override public String toString() { StringBuilder builder = new StringBuilder(getOrDeduceName(this)); if (this.servletNames.isEmpty() && this.urlPatterns.isEmpty()) { builder.append(" urls=").append(Arrays.toString(DEFAULT_URL_MAPPINGS)); } else { if (!this.servletNames.isEmpty()) { builder.append(" servlets=").append(this.servletNames); } if (!this.urlPatterns.isEmpty()) { builder.append(" urls=").append(this.urlPatterns); } } builder.append(" order=").append(getOrder()); return builder.toString(); }
Returns the filter name that will be registered. @return the filter name @since 3.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/web/servlet/AbstractFilterRegistrationBean.java
280
[]
String
true
5
8.24
spring-projects/spring-boot
79,428
javadoc
false
createAcls
default CreateAclsResult createAcls(Collection<AclBinding> acls) { return createAcls(acls, new CreateAclsOptions()); }
This is a convenience method for {@link #createAcls(Collection, CreateAclsOptions)} with default options. See the overload for more details. <p> This operation is supported by brokers with version 0.11.0.0 or higher. @param acls The ACLs to create @return The CreateAclsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
393
[ "acls" ]
CreateAclsResult
true
1
6.32
apache/kafka
31,560
javadoc
false
equals
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if ((obj == null) || (getClass() != obj.getClass())) { return false; } StandardConfigDataReference other = (StandardConfigDataReference) obj; return this.resourceLocation.equals(other.resourceLocation); }
Create a new {@link StandardConfigDataReference} instance. @param configDataLocation the original location passed to the resolver @param directory the directory of the resource or {@code null} if the reference is to a file @param root the root of the resource location @param profile the profile being loaded @param extension the file extension for the resource @param propertySourceLoader the property source loader that should be used for this reference
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataReference.java
91
[ "obj" ]
true
4
6.4
spring-projects/spring-boot
79,428
javadoc
false
unsatisfiedNonSimpleProperties
protected String[] unsatisfiedNonSimpleProperties(AbstractBeanDefinition mbd, BeanWrapper bw) { Set<String> result = new TreeSet<>(); PropertyValues pvs = mbd.getPropertyValues(); PropertyDescriptor[] pds = bw.getPropertyDescriptors(); for (PropertyDescriptor pd : pds) { if (pd.getWriteMethod() != null && !isExcludedFromDependencyCheck(pd) && !pvs.contains(pd.getName()) && !BeanUtils.isSimpleProperty(pd.getPropertyType())) { result.add(pd.getName()); } } return StringUtils.toStringArray(result); }
Return an array of non-simple bean properties that are unsatisfied. These are probably unsatisfied references to other beans in the factory. Does not include simple properties like primitives or Strings. @param mbd the merged bean definition the bean was created with @param bw the BeanWrapper the bean was created with @return an array of bean property names @see org.springframework.beans.BeanUtils#isSimpleProperty
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
1,558
[ "mbd", "bw" ]
true
5
7.44
spring-projects/spring-framework
59,386
javadoc
false
erase
@CanIgnoreReturnValue public @Nullable V erase(@Nullable Object rowKey, @Nullable Object columnKey) { Integer rowIndex = rowKeyToIndex.get(rowKey); Integer columnIndex = columnKeyToIndex.get(columnKey); if (rowIndex == null || columnIndex == null) { return null; } return set(rowIndex, columnIndex, null); }
Associates the value {@code null} with the specified keys, assuming both keys are valid. If either key is null or isn't among the keys provided during construction, this method has no effect. <p>This method is equivalent to {@code put(rowKey, columnKey, null)} when both provided keys are valid. @param rowKey row key of mapping to be erased @param columnKey column key of mapping to be erased @return the value previously associated with the keys, or {@code null} if no mapping existed for the keys
java
android/guava/src/com/google/common/collect/ArrayTable.java
512
[ "rowKey", "columnKey" ]
V
true
3
7.92
google/guava
51,352
javadoc
false
randomBoolean
public boolean randomBoolean() { return random().nextBoolean(); }
Generates a random boolean value. @return the random boolean. @since 3.16.0
java
src/main/java/org/apache/commons/lang3/RandomUtils.java
303
[]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
shift
def shift( self, periods: int | Sequence[int] = 1, freq=None, fill_value=lib.no_default, suffix: str | None = None, ): """ Shift each group by periods observations. If freq is passed, the index will be increased using the periods and the freq. Parameters ---------- periods : int | Sequence[int], default 1 Number of periods to shift. If a list of values, shift each group by each period. freq : str, optional Frequency string. fill_value : optional The scalar value to use for newly introduced missing values. .. versionchanged:: 2.1.0 Will raise a ``ValueError`` if ``freq`` is provided too. suffix : str, optional A string to add to each shifted column if there are multiple periods. Ignored otherwise. Returns ------- Series or DataFrame Object shifted within each group. See Also -------- Index.shift : Shift values of Index. Examples -------- For SeriesGroupBy: >>> lst = ["a", "a", "b", "b"] >>> ser = pd.Series([1, 2, 3, 4], index=lst) >>> ser a 1 a 2 b 3 b 4 dtype: int64 >>> ser.groupby(level=0).shift(1) a NaN a 1.0 b NaN b 3.0 dtype: float64 For DataFrameGroupBy: >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] >>> df = pd.DataFrame( ... data, ... columns=["a", "b", "c"], ... index=["tuna", "salmon", "catfish", "goldfish"], ... ) >>> df a b c tuna 1 2 3 salmon 1 5 6 catfish 2 5 8 goldfish 2 6 9 >>> df.groupby("a").shift(1) b c tuna NaN NaN salmon 2.0 3.0 catfish NaN NaN goldfish 5.0 8.0 """ if is_list_like(periods): periods = cast(Sequence, periods) if len(periods) == 0: raise ValueError("If `periods` is an iterable, it cannot be empty.") from pandas.core.reshape.concat import concat add_suffix = True else: if not is_integer(periods): raise TypeError( f"Periods must be integer, but {periods} is {type(periods)}." ) if suffix: raise ValueError("Cannot specify `suffix` if `periods` is an int.") periods = [cast(int, periods)] add_suffix = False shifted_dataframes = [] for period in periods: if not is_integer(period): raise TypeError( f"Periods must be integer, but {period} is {type(period)}." ) period = cast(int, period) if freq is not None: f = lambda x: x.shift( period, freq, 0, # axis fill_value, ) shifted = self._python_apply_general( f, self._selected_obj, is_transform=True ) else: if fill_value is lib.no_default: fill_value = None ids = self._grouper.ids ngroups = self._grouper.ngroups res_indexer = np.zeros(len(ids), dtype=np.int64) libgroupby.group_shift_indexer(res_indexer, ids, ngroups, period) obj = self._obj_with_exclusions shifted = obj._reindex_with_indexers( {0: (obj.index, res_indexer)}, fill_value=fill_value, allow_dups=True, ) if add_suffix: if isinstance(shifted, Series): shifted = cast(NDFrameT, shifted.to_frame()) shifted = shifted.add_suffix( f"{suffix}_{period}" if suffix else f"_{period}" ) shifted_dataframes.append(cast(Union[Series, DataFrame], shifted)) return ( shifted_dataframes[0] if len(shifted_dataframes) == 1 else concat(shifted_dataframes, axis=1, sort=False) )
Shift each group by periods observations. If freq is passed, the index will be increased using the periods and the freq. Parameters ---------- periods : int | Sequence[int], default 1 Number of periods to shift. If a list of values, shift each group by each period. freq : str, optional Frequency string. fill_value : optional The scalar value to use for newly introduced missing values. .. versionchanged:: 2.1.0 Will raise a ``ValueError`` if ``freq`` is provided too. suffix : str, optional A string to add to each shifted column if there are multiple periods. Ignored otherwise. Returns ------- Series or DataFrame Object shifted within each group. See Also -------- Index.shift : Shift values of Index. Examples -------- For SeriesGroupBy: >>> lst = ["a", "a", "b", "b"] >>> ser = pd.Series([1, 2, 3, 4], index=lst) >>> ser a 1 a 2 b 3 b 4 dtype: int64 >>> ser.groupby(level=0).shift(1) a NaN a 1.0 b NaN b 3.0 dtype: float64 For DataFrameGroupBy: >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] >>> df = pd.DataFrame( ... data, ... columns=["a", "b", "c"], ... index=["tuna", "salmon", "catfish", "goldfish"], ... ) >>> df a b c tuna 1 2 3 salmon 1 5 6 catfish 2 5 8 goldfish 2 6 9 >>> df.groupby("a").shift(1) b c tuna NaN NaN salmon 2.0 3.0 catfish NaN NaN goldfish 5.0 8.0
python
pandas/core/groupby/groupby.py
5,116
[ "self", "periods", "freq", "fill_value", "suffix" ]
true
15
8.24
pandas-dev/pandas
47,362
numpy
false
setException
@CanIgnoreReturnValue protected boolean setException(Throwable throwable) { Object valueToSet = new Failure(checkNotNull(throwable)); if (casValue(this, null, valueToSet)) { complete(this, /* callInterruptTask= */ false); return true; } return false; }
Sets the failed result of this {@code Future} unless this {@code Future} has already been cancelled or set (including {@linkplain #setFuture set asynchronously}). When a call to this method returns, the {@code Future} is guaranteed to be {@linkplain #isDone done} <b>only if</b> the call was accepted (in which case it returns {@code true}). If it returns {@code false}, the {@code Future} may have previously been set asynchronously, in which case its result may not be known yet. That result, though not yet known, cannot be overridden by a call to a {@code set*} method, only by a call to {@link #cancel}. <p>Beware of completing a future while holding a lock. Its listeners may do slow work or acquire other locks, risking deadlocks. @param throwable the exception to be used as the failed result @return true if the attempt was accepted, completing the {@code Future}
java
android/guava/src/com/google/common/util/concurrent/AbstractFuture.java
512
[ "throwable" ]
true
2
8.08
google/guava
51,352
javadoc
false
skip
static <T> boolean skip(@Nullable T extracted) { return extracted == SKIP; }
Return if the extracted value should be skipped. @param <T> the value type @param extracted the value to test @return if the value is to be skipped
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
762
[ "extracted" ]
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
filled
def filled(a, fill_value=None): """ Return input as an `~numpy.ndarray`, with masked values replaced by `fill_value`. If `a` is not a `MaskedArray`, `a` itself is returned. If `a` is a `MaskedArray` with no masked values, then ``a.data`` is returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. Parameters ---------- a : MaskedArray or array_like An input object. fill_value : array_like, optional. Can be scalar or non-scalar. If non-scalar, the resulting filled array should be broadcastable over input array. Default is None. Returns ------- a : ndarray The filled array. See Also -------- compressed Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> x.filled() array([[999999, 1, 2], [999999, 4, 5], [ 6, 7, 8]]) >>> x.filled(fill_value=333) array([[333, 1, 2], [333, 4, 5], [ 6, 7, 8]]) >>> x.filled(fill_value=np.arange(3)) array([[0, 1, 2], [0, 4, 5], [6, 7, 8]]) """ if hasattr(a, 'filled'): return a.filled(fill_value) elif isinstance(a, ndarray): # Should we check for contiguity ? and a.flags['CONTIGUOUS']: return a elif isinstance(a, dict): return np.array(a, 'O') else: return np.array(a)
Return input as an `~numpy.ndarray`, with masked values replaced by `fill_value`. If `a` is not a `MaskedArray`, `a` itself is returned. If `a` is a `MaskedArray` with no masked values, then ``a.data`` is returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. Parameters ---------- a : MaskedArray or array_like An input object. fill_value : array_like, optional. Can be scalar or non-scalar. If non-scalar, the resulting filled array should be broadcastable over input array. Default is None. Returns ------- a : ndarray The filled array. See Also -------- compressed Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> x.filled() array([[999999, 1, 2], [999999, 4, 5], [ 6, 7, 8]]) >>> x.filled(fill_value=333) array([[333, 1, 2], [333, 4, 5], [ 6, 7, 8]]) >>> x.filled(fill_value=np.arange(3)) array([[0, 1, 2], [0, 4, 5], [6, 7, 8]])
python
numpy/ma/core.py
619
[ "a", "fill_value" ]
false
5
7.76
numpy/numpy
31,054
numpy
false
contains
public boolean contains(CompletableEvent<?> event) { return event != null && tracked.contains(event); }
It is possible for the {@link AsyncKafkaConsumer#close() consumer to close} before completing the processing of all the events in the queue. In this case, we need to {@link CompletableFuture#completeExceptionally(Throwable) expire} any remaining events. <p/> Check each of the {@link #add(CompletableEvent) previously-added} {@link CompletableEvent completable events}, and for any that are incomplete, expire them. Also check the core event queue for any incomplete events and likewise expire them. <p/> <em>Note</em>: because this is called in the context of {@link AsyncKafkaConsumer#close() closing consumer}, don't take the deadline into consideration, just close it regardless. @param events Events from a queue that have not yet been tracked that also need to be reviewed @return The number of events that were expired
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java
159
[ "event" ]
true
2
7.52
apache/kafka
31,560
javadoc
false
tryLockMemory
@Override public void tryLockMemory() { Handle process = kernel.GetCurrentProcess(); // By default, Windows limits the number of pages that can be locked. // Thus, we need to first increase the working set size of the JVM by // the amount of memory we wish to lock, plus a small overhead (1MB). long size = getMemoryMXBean().getHeapMemoryUsage().getInit() + (1024 * 1024); if (kernel.SetProcessWorkingSetSize(process, size, size) == false) { logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", kernel.GetLastError()); } else { var memInfo = kernel.newMemoryBasicInformation(); var address = memInfo.BaseAddress(); while (kernel.VirtualQueryEx(process, address, memInfo) != 0) { boolean lockable = memInfo.State() == MEM_COMMIT && (memInfo.Protect() & PAGE_NOACCESS) != PAGE_NOACCESS && (memInfo.Protect() & PAGE_GUARD) != PAGE_GUARD; if (lockable) { kernel.VirtualLock(memInfo.BaseAddress(), memInfo.RegionSize()); } // Move to the next region address = address.add(memInfo.RegionSize()); } isMemoryLocked = true; } // note: no need to close the process handle because GetCurrentProcess returns a pseudo handle }
Constant for LimitFlags, indicating a process limit has been set
java
libs/native/src/main/java/org/elasticsearch/nativeaccess/WindowsNativeAccess.java
61
[]
void
true
6
6.24
elastic/elasticsearch
75,680
javadoc
false
tryComputeIndentationForListItem
function tryComputeIndentationForListItem(startPos: number, endPos: number, parentStartLine: number, range: TextRange, inheritedIndentation: number): number { if ( rangeOverlapsWithStartEnd(range, startPos, endPos) || rangeContainsStartEnd(range, startPos, endPos) /* Not to miss zero-range nodes e.g. JsxText */ ) { if (inheritedIndentation !== Constants.Unknown) { return inheritedIndentation; } } else { const startLine = sourceFile.getLineAndCharacterOfPosition(startPos).line; const startLinePosition = getLineStartPositionForPosition(startPos, sourceFile); const column = SmartIndenter.findFirstNonWhitespaceColumn(startLinePosition, startPos, sourceFile, options); if (startLine !== parentStartLine || startPos === column) { // Use the base indent size if it is greater than // the indentation of the inherited predecessor. const baseIndentSize = SmartIndenter.getBaseIndentation(options); return baseIndentSize > column ? baseIndentSize : column; } } return Constants.Unknown; }
Tries to compute the indentation for a list element. If list element is not in range then function will pick its actual indentation so it can be pushed downstream as inherited indentation. If list element is in the range - its indentation will be equal to inherited indentation from its predecessors.
typescript
src/services/formatting/formatting.ts
592
[ "startPos", "endPos", "parentStartLine", "range", "inheritedIndentation" ]
true
8
6
microsoft/TypeScript
107,154
jsdoc
false
findTopicNameInGlobalOrLocalCache
private Optional<String> findTopicNameInGlobalOrLocalCache(Uuid topicId) { String nameFromMetadataCache = metadata.topicNames().getOrDefault(topicId, null); if (nameFromMetadataCache != null) { // Add topic name to local cache, so it can be reused if included in a next target // assignment if metadata cache not available. assignedTopicNamesCache.put(topicId, nameFromMetadataCache); return Optional.of(nameFromMetadataCache); } else { // Topic ID was not found in metadata. Check if the topic name is in the local // cache of topics currently assigned. This will avoid a metadata request in the // case where the metadata cache may have been flushed right before the // revocation of a previously assigned topic. String nameFromSubscriptionCache = assignedTopicNamesCache.getOrDefault(topicId, null); return Optional.ofNullable(nameFromSubscriptionCache); } }
Look for topic in the global metadata cache. If found, add it to the local cache and return it. If not found, look for it in the local metadata cache. Return empty if not found in any of the two.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
1,102
[ "topicId" ]
true
2
6
apache/kafka
31,560
javadoc
false
registerDeprecationIfNecessary
private void registerDeprecationIfNecessary(@Nullable AnnotatedElement element) { if (element == null) { return; } register(element.getAnnotation(Deprecated.class)); if (element instanceof Class<?> type) { registerDeprecationIfNecessary(type.getEnclosingClass()); } }
Return the currently registered warnings. @return the warnings
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/CodeWarnings.java
142
[ "element" ]
void
true
3
6.4
spring-projects/spring-framework
59,386
javadoc
false
_is_all_dates
def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values))
Whether or not the index values only consist of dates.
python
pandas/core/indexes/base.py
2,551
[ "self" ]
bool
true
4
6
pandas-dev/pandas
47,362
unknown
false
intToHex
function intToHex(int: number) { const hex = int.toString(16); return hex.length === 1 ? `0${hex}` : hex; }
Converts a color from CSS hex format to CSS rgb format. @param color - Hex color, i.e. #nnn or #nnnnnn @returns A CSS rgb color string @beta
typescript
packages/grafana-data/src/themes/colorManipulator.ts
56
[ "int" ]
false
2
7.36
grafana/grafana
71,362
jsdoc
false
setitem_datetimelike_compat
def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other): """ Parameters ---------- values : np.ndarray num_set : int For putmask, this is mask.sum() other : Any """ if values.dtype == object: dtype, _ = infer_dtype_from(other) if lib.is_np_dtype(dtype, "mM"): # https://github.com/numpy/numpy/issues/12550 # timedelta64 will incorrectly cast to int if not is_list_like(other): other = [other] * num_set else: other = list(other) return other
Parameters ---------- values : np.ndarray num_set : int For putmask, this is mask.sum() other : Any
python
pandas/core/array_algos/putmask.py
130
[ "values", "num_set", "other" ]
true
5
6.4
pandas-dev/pandas
47,362
numpy
false
checkAndRemoveCompletedAcknowledgements
private boolean checkAndRemoveCompletedAcknowledgements() { boolean areAnyAcksLeft = false; Iterator<Map.Entry<Integer, Tuple<AcknowledgeRequestState>>> iterator = acknowledgeRequestStates.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<Integer, Tuple<AcknowledgeRequestState>> acknowledgeRequestStatePair = iterator.next(); boolean areAsyncAcksLeft = true, areSyncAcksLeft = true; if (!isRequestStateInProgress(acknowledgeRequestStatePair.getValue().getAsyncRequest())) { acknowledgeRequestStatePair.getValue().setAsyncRequest(null); areAsyncAcksLeft = false; } if (!areRequestStatesInProgress(acknowledgeRequestStatePair.getValue().getSyncRequestQueue())) { acknowledgeRequestStatePair.getValue().nullifySyncRequestQueue(); areSyncAcksLeft = false; } if (!isRequestStateInProgress(acknowledgeRequestStatePair.getValue().getCloseRequest())) { acknowledgeRequestStatePair.getValue().setCloseRequest(null); } if (areAsyncAcksLeft || areSyncAcksLeft) { areAnyAcksLeft = true; } else if (acknowledgeRequestStatePair.getValue().getCloseRequest() == null) { iterator.remove(); } } if (!acknowledgeRequestStates.isEmpty()) areAnyAcksLeft = true; return areAnyAcksLeft; }
Prunes the empty acknowledgementRequestStates in {@link #acknowledgeRequestStates} @return Returns true if there are still any acknowledgements left to be processed.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
479
[]
true
9
7.28
apache/kafka
31,560
javadoc
false
read
def read(self, nrows: int | None = None) -> pd.DataFrame: """Read observations from SAS Xport file, returning as data frame. Parameters ---------- nrows : int Number of rows to read from data file; if None, read whole file. Returns ------- A DataFrame. """ if nrows is None: nrows = self.nobs read_lines = min(nrows, self.nobs - self._lines_read) read_len = read_lines * self.record_length if read_len <= 0: self.close() raise StopIteration raw = self.filepath_or_buffer.read(read_len) data = np.frombuffer(raw, dtype=self._dtype, count=read_lines) df_data = {} for j, x in enumerate(self.columns): vec = data["s" + str(j)] ntype = self.fields[j]["ntype"] if ntype == "numeric": vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"]) miss = self._missing_double(vec) v = _parse_float_vec(vec) v[miss] = np.nan elif self.fields[j]["ntype"] == "char": v = [y.rstrip() for y in vec] if self._encoding is not None: v = [y.decode(self._encoding) for y in v] df_data.update({x: v}) df = pd.DataFrame(df_data) if self._index is None: df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines)) else: df = df.set_index(self._index) self._lines_read += read_lines return df
Read observations from SAS Xport file, returning as data frame. Parameters ---------- nrows : int Number of rows to read from data file; if None, read whole file. Returns ------- A DataFrame.
python
pandas/io/sas/sas_xport.py
452
[ "self", "nrows" ]
pd.DataFrame
true
9
6.88
pandas-dev/pandas
47,362
numpy
false
transformAndSpreadElements
function transformAndSpreadElements(elements: NodeArray<Expression>, isArgumentList: boolean, multiLine: boolean, hasTrailingComma: boolean): Expression { // When there is no leading SpreadElement: // // [source] // [a, ...b, c] // // [output (downlevelIteration)] // __spreadArray(__spreadArray([a], __read(b)), [c]) // // [output] // __spreadArray(__spreadArray([a], b), [c]) // // When there *is* a leading SpreadElement: // // [source] // [...a, b] // // [output (downlevelIteration)] // __spreadArray(__spreadArray([], __read(a)), [b]) // // [output] // __spreadArray(__spreadArray([], a), [b]) // // NOTE: We use `isPackedArrayLiteral` below rather than just `isArrayLiteral` // because ES2015 spread will replace _missing_ array elements with `undefined`, // so we cannot just use an array as is. For example: // // `[1, ...[2, , 3]]` becomes `[1, 2, undefined, 3]` // // However, for packed array literals (i.e., an array literal with no OmittedExpression // elements), we can use the array as-is. // Map spans of spread expressions into their expressions and spans of other // expressions into an array literal. const numElements = elements.length; const segments = flatten<SpreadSegment>( // As we visit each element, we return one of two functions to use as the "key": // - `visitSpanOfSpreads` for one or more contiguous `...` spread expressions, i.e. `...a, ...b` in `[1, 2, ...a, ...b]` // - `visitSpanOfNonSpreads` for one or more contiguous non-spread elements, i.e. `1, 2`, in `[1, 2, ...a, ...b]` spanMap(elements, partitionSpread, (partition, visitPartition, _start, end) => visitPartition(partition, multiLine, hasTrailingComma && end === numElements)), ); if (segments.length === 1) { const firstSegment = segments[0]; // If we don't need a unique copy, then we are spreading into an argument list for // a CallExpression or NewExpression. When using `--downlevelIteration`, we need // to coerce this into an array for use with `apply`, so we will use the code path // that follows instead. if ( isArgumentList && !compilerOptions.downlevelIteration || isPackedArrayLiteral(firstSegment.expression) // see NOTE (above) || isCallToHelper(firstSegment.expression, "___spreadArray" as __String) ) { return firstSegment.expression; } } const helpers = emitHelpers(); const startsWithSpread = segments[0].kind !== SpreadSegmentKind.None; let expression: Expression = startsWithSpread ? factory.createArrayLiteralExpression() : segments[0].expression; for (let i = startsWithSpread ? 0 : 1; i < segments.length; i++) { const segment = segments[i]; // If this is for an argument list, it doesn't matter if the array is packed or sparse expression = helpers.createSpreadArrayHelper( expression, segment.expression, segment.kind === SpreadSegmentKind.UnpackedSpread && !isArgumentList, ); } return expression; }
Transforms an array of Expression nodes that contains a SpreadExpression. @param elements The array of Expression nodes. @param isArgumentList A value indicating whether to ensure that the result is a fresh array. This should be `false` when spreading into an `ArrayLiteral`, and `true` when spreading into an argument list. @param multiLine A value indicating whether the result should be emitted on multiple lines.
typescript
src/compiler/transformers/es2015.ts
4,633
[ "elements", "isArgumentList", "multiLine", "hasTrailingComma" ]
true
11
6.8
microsoft/TypeScript
107,154
jsdoc
false
initializeAllParameterDetails
private static List<CacheParameterDetail> initializeAllParameterDetails(Method method) { int parameterCount = method.getParameterCount(); List<CacheParameterDetail> result = new ArrayList<>(parameterCount); for (int i = 0; i < parameterCount; i++) { CacheParameterDetail detail = new CacheParameterDetail(method, i); result.add(detail); } return result; }
Construct a new {@code AbstractJCacheOperation}. @param methodDetails the {@link CacheMethodDetails} related to the cached method @param cacheResolver the cache resolver to resolve regular caches
java
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/AbstractJCacheOperation.java
68
[ "method" ]
true
2
6.08
spring-projects/spring-framework
59,386
javadoc
false
quantile_with_mask
def quantile_with_mask( values: np.ndarray, mask: npt.NDArray[np.bool_], fill_value, qs: npt.NDArray[np.float64], interpolation: str, ) -> np.ndarray: """ Compute the quantiles of the given values for each quantile in `qs`. Parameters ---------- values : np.ndarray For ExtensionArray, this is _values_for_factorize()[0] mask : np.ndarray[bool] mask = isna(values) For ExtensionArray, this is computed before calling _value_for_factorize fill_value : Scalar The value to interpret fill NA entries with For ExtensionArray, this is _values_for_factorize()[1] qs : np.ndarray[float64] interpolation : str Type of interpolation Returns ------- np.ndarray Notes ----- Assumes values is already 2D. For ExtensionArray this means np.atleast_2d has been called on _values_for_factorize()[0] Quantile is computed along axis=1. """ assert values.shape == mask.shape if values.ndim == 1: # unsqueeze, operate, re-squeeze values = np.atleast_2d(values) mask = np.atleast_2d(mask) res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation) return res_values[0] assert values.ndim == 2 is_empty = values.shape[1] == 0 if is_empty: # create the array of na_values # 2d len(values) * len(qs) flat = np.full(len(qs), fill_value) result = np.repeat(flat, len(values)).reshape(len(values), len(qs)) else: result = _nanquantile( values, qs, na_value=fill_value, mask=mask, interpolation=interpolation, ) result = np.asarray(result) result = result.T return result
Compute the quantiles of the given values for each quantile in `qs`. Parameters ---------- values : np.ndarray For ExtensionArray, this is _values_for_factorize()[0] mask : np.ndarray[bool] mask = isna(values) For ExtensionArray, this is computed before calling _value_for_factorize fill_value : Scalar The value to interpret fill NA entries with For ExtensionArray, this is _values_for_factorize()[1] qs : np.ndarray[float64] interpolation : str Type of interpolation Returns ------- np.ndarray Notes ----- Assumes values is already 2D. For ExtensionArray this means np.atleast_2d has been called on _values_for_factorize()[0] Quantile is computed along axis=1.
python
pandas/core/array_algos/quantile.py
44
[ "values", "mask", "fill_value", "qs", "interpolation" ]
np.ndarray
true
4
6.32
pandas-dev/pandas
47,362
numpy
false
stop
@Override public void stop() throws SchedulingException { if (this.scheduler != null) { try { this.scheduler.standby(); } catch (SchedulerException ex) { throw new SchedulingException("Could not stop Quartz Scheduler", ex); } } }
Start the Quartz Scheduler, respecting the "startupDelay" setting. @param scheduler the Scheduler to start @param startupDelay the number of seconds to wait before starting the Scheduler asynchronously
java
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerFactoryBean.java
798
[]
void
true
3
6.24
spring-projects/spring-framework
59,386
javadoc
false
merge
protected abstract T merge(Supplier<T> existing, T additional);
Merge any additional elements into the existing aggregate. @param existing the supplier for the existing value @param additional the additional elements to merge @return the merged result
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/AggregateBinder.java
83
[ "existing", "additional" ]
T
true
1
6.48
spring-projects/spring-boot
79,428
javadoc
false
_is_method_overridden
def _is_method_overridden(self, method_name: str) -> bool: """Checks if a method is overridden in the NamedTuple subclass. Args: method_name (str): The name of the method to check. Returns: bool: True if the method is overridden in the subclass, False otherwise. Raises: ValueError: If the NamedTuple class does not inherit from both Tuple and Object. """ if len(self.tuple_cls.__mro__) < 3: raise ValueError("NamedTuple should inherit from Tuple and Object.") if getattr(self.tuple_cls, method_name, None) == getattr( self.tuple_cls.__mro__[-3], method_name, None ): return False return True
Checks if a method is overridden in the NamedTuple subclass. Args: method_name (str): The name of the method to check. Returns: bool: True if the method is overridden in the subclass, False otherwise. Raises: ValueError: If the NamedTuple class does not inherit from both Tuple and Object.
python
torch/_dynamo/variables/lists.py
1,467
[ "self", "method_name" ]
bool
true
3
8.08
pytorch/pytorch
96,034
google
false
produce_sbom_for_application_via_cdxgen_server
def produce_sbom_for_application_via_cdxgen_server( job: SbomApplicationJob, output: Output | None, github_token: str | None, port_map: dict[str, int] | None = None, ) -> tuple[int, str]: """ Produces SBOM for application using cdxgen server. :param job: Job to run :param output: Output to use :param github_token: GitHub token to use for downloading files` :param port_map map of process name to port - making sure that one process talks to one server in case parallel processing is used :return: tuple with exit code and output """ if port_map is None: port = 9090 else: port = port_map[multiprocessing.current_process().name] get_console(output=output).print(f"[info]Using port {port}") return job.produce(output, port, github_token)
Produces SBOM for application using cdxgen server. :param job: Job to run :param output: Output to use :param github_token: GitHub token to use for downloading files` :param port_map map of process name to port - making sure that one process talks to one server in case parallel processing is used :return: tuple with exit code and output
python
dev/breeze/src/airflow_breeze/utils/cdxgen.py
520
[ "job", "output", "github_token", "port_map" ]
tuple[int, str]
true
3
7.76
apache/airflow
43,597
sphinx
false
_value_with_fmt
def _value_with_fmt( self, val ) -> tuple[ int | float | bool | str | datetime.datetime | datetime.date, str | None ]: """ Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format """ fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif is_decimal(val): val = Decimal(val) elif isinstance(val, datetime.datetime): fmt = self._datetime_format elif isinstance(val, datetime.date): fmt = self._date_format elif isinstance(val, datetime.timedelta): val = val.total_seconds() / 86400 fmt = "0" else: val = str(val) # GH#56954 # Excel's limitation on cell contents is 32767 characters # xref https://support.microsoft.com/en-au/office/excel-specifications-and-limits-1672b34d-7043-467e-8e27-269d656771c3 if len(val) > 32767: warnings.warn( f"Cell contents too long ({len(val)}), " "truncated to 32767 characters", UserWarning, stacklevel=find_stack_level(), ) return val, fmt
Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format
python
pandas/io/excel/_base.py
1,328
[ "self", "val" ]
tuple[ int | float | bool | str | datetime.datetime | datetime.date, str | None ]
true
10
6.72
pandas-dev/pandas
47,362
numpy
false
_from_dataframe
def _from_dataframe(df: DataFrameXchg, allow_copy: bool = True) -> pd.DataFrame: """ Build a ``pd.DataFrame`` from the DataFrame interchange object. Parameters ---------- df : DataFrameXchg Object supporting the interchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame """ pandas_dfs = [] for chunk in df.get_chunks(): pandas_df = protocol_df_chunk_to_pandas(chunk) pandas_dfs.append(pandas_df) if not allow_copy and len(pandas_dfs) > 1: raise RuntimeError( "To join chunks a copy is required which is forbidden by allow_copy=False" ) if not pandas_dfs: pandas_df = protocol_df_chunk_to_pandas(df) elif len(pandas_dfs) == 1: pandas_df = pandas_dfs[0] else: pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False) index_obj = df.metadata.get("pandas.index", None) if index_obj is not None: pandas_df.index = index_obj return pandas_df
Build a ``pd.DataFrame`` from the DataFrame interchange object. Parameters ---------- df : DataFrameXchg Object supporting the interchange protocol, i.e. `__dataframe__` method. allow_copy : bool, default: True Whether to allow copying the memory to perform the conversion (if false then zero-copy approach is requested). Returns ------- pd.DataFrame
python
pandas/core/interchange/from_dataframe.py
139
[ "df", "allow_copy" ]
pd.DataFrame
true
8
6.4
pandas-dev/pandas
47,362
numpy
false
perform_krb181_workaround
def perform_krb181_workaround(principal: str): """ Workaround for Kerberos 1.8.1. :param principal: principal name :return: None """ cmdv: list[str] = [ conf.get_mandatory_value("kerberos", "kinit_path"), "-c", conf.get_mandatory_value("kerberos", "ccache"), "-R", ] # Renew ticket_cache log.info("Renewing kerberos ticket to work around kerberos 1.8.1: %s", " ".join(cmdv)) ret = subprocess.call(cmdv, close_fds=True) if ret != 0: principal = f"{principal or conf.get('kerberos', 'principal')}/{get_hostname()}" ccache = conf.get("kerberos", "ccache") log.error( "Couldn't renew kerberos ticket in order to work around Kerberos 1.8.1 issue. Please check that " "the ticket for '%s' is still renewable:\n $ kinit -f -c %s\nIf the 'renew until' date is the " "same as the 'valid starting' date, the ticket cannot be renewed. Please check your KDC " "configuration, and the ticket renewal policy (maxrenewlife) for the '%s' and `krbtgt' " "principals.", principal, ccache, principal, ) return ret
Workaround for Kerberos 1.8.1. :param principal: principal name :return: None
python
airflow-core/src/airflow/security/kerberos.py
138
[ "principal" ]
true
3
7.92
apache/airflow
43,597
sphinx
false
cloneWith
function cloneWith(value, customizer) { customizer = typeof customizer == 'function' ? customizer : undefined; return baseClone(value, CLONE_SYMBOLS_FLAG, customizer); }
This method is like `_.clone` except that it accepts `customizer` which is invoked to produce the cloned value. If `customizer` returns `undefined`, cloning is handled by the method instead. The `customizer` is invoked with up to four arguments; (value [, index|key, object, stack]). @static @memberOf _ @since 4.0.0 @category Lang @param {*} value The value to clone. @param {Function} [customizer] The function to customize cloning. @returns {*} Returns the cloned value. @see _.cloneDeepWith @example function customizer(value) { if (_.isElement(value)) { return value.cloneNode(false); } } var el = _.cloneWith(document.body, customizer); console.log(el === document.body); // => false console.log(el.nodeName); // => 'BODY' console.log(el.childNodes.length); // => 0
javascript
lodash.js
11,171
[ "value", "customizer" ]
false
2
6.96
lodash/lodash
61,490
jsdoc
false
resetPositionsIfNeeded
CompletableFuture<Void> resetPositionsIfNeeded() { Map<TopicPartition, AutoOffsetResetStrategy> partitionAutoOffsetResetStrategyMap; try { partitionAutoOffsetResetStrategyMap = offsetFetcherUtils.getOffsetResetStrategyForPartitions(); } catch (Exception e) { CompletableFuture<Void> result = new CompletableFuture<>(); result.completeExceptionally(e); return result; } if (partitionAutoOffsetResetStrategyMap.isEmpty()) return CompletableFuture.completedFuture(null); return sendListOffsetsRequestsAndResetPositions(partitionAutoOffsetResetStrategyMap); }
Reset offsets for all assigned partitions that require it. Offsets will be reset with timestamps according to the reset strategy defined for each partition. This will generate ListOffsets requests for the partitions and timestamps, and enqueue them to be sent on the next call to {@link #poll(long)}. <p/> When a response is received, positions are updated in-memory, on the subscription state. If an error is received in the response, it will be saved to be thrown on the next call to this function (ex. {@link org.apache.kafka.common.errors.TopicAuthorizationException})
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
475
[]
true
3
6.72
apache/kafka
31,560
javadoc
false
bindDataObject
private @Nullable Object bindDataObject(ConfigurationPropertyName name, Bindable<?> target, BindHandler handler, Context context, boolean allowRecursiveBinding) { if (isUnbindableBean(name, target, context)) { return null; } Class<?> type = target.getType().resolve(Object.class); BindMethod bindMethod = target.getBindMethod(); if (!allowRecursiveBinding && context.isBindingDataObject(type)) { return null; } DataObjectPropertyBinder propertyBinder = (propertyName, propertyTarget) -> bind(name.append(propertyName), propertyTarget, handler, context, false, false); Supplier<@Nullable Object> supplier = () -> fromDataObjectBinders(bindMethod, (dataObjectBinder) -> dataObjectBinder.bind(name, target, context, propertyBinder)); return context.withDataObject(type, supplier); }
Bind the specified target {@link Bindable} using this binder's {@link ConfigurationPropertySource property sources} or create a new instance using the type of the {@link Bindable} if the result of the binding is {@code null}. @param name the configuration property name to bind @param target the target bindable @param handler the bind handler (may be {@code null}) @param <T> the bound or created type @return the bound or created object @since 2.2.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
495
[ "name", "target", "handler", "context", "allowRecursiveBinding" ]
Object
true
4
7.92
spring-projects/spring-boot
79,428
javadoc
false
toString
public String toString(int indentSpaces) throws JSONException { JSONStringer stringer = new JSONStringer(indentSpaces); writeTo(stringer); return stringer.toString(); }
Encodes this array as a human-readable JSON string for debugging, such as: <pre> [ 94043, 90210 ]</pre> @param indentSpaces the number of spaces to indent for each level of nesting. @return a human-readable JSON string of this array @throws JSONException if processing of json failed
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
644
[ "indentSpaces" ]
String
true
1
6.56
spring-projects/spring-boot
79,428
javadoc
false
codes
def codes(self) -> Series: """ Return Series of codes as well as the index. See Also -------- Series.cat.categories : Return the categories of this categorical. Series.cat.as_ordered : Set the Categorical to be ordered. Series.cat.as_unordered : Set the Categorical to be unordered. Examples -------- >>> raw_cate = pd.Categorical(["a", "b", None, "a"], categories=["a", "b"]) >>> ser = pd.Series(raw_cate) >>> ser.cat.codes 0 0 1 1 2 -1 3 0 dtype: int8 """ from pandas import Series return Series(self._parent.codes, index=self._index)
Return Series of codes as well as the index. See Also -------- Series.cat.categories : Return the categories of this categorical. Series.cat.as_ordered : Set the Categorical to be ordered. Series.cat.as_unordered : Set the Categorical to be unordered. Examples -------- >>> raw_cate = pd.Categorical(["a", "b", None, "a"], categories=["a", "b"]) >>> ser = pd.Series(raw_cate) >>> ser.cat.codes 0 0 1 1 2 -1 3 0 dtype: int8
python
pandas/core/arrays/categorical.py
2,996
[ "self" ]
Series
true
1
7.12
pandas-dev/pandas
47,362
unknown
false
values
@Override public Collection<@Nullable V> values() { return super.values(); }
Returns an unmodifiable collection of all values, which may contain duplicates. Changes to the table will update the returned collection. <p>The returned collection's iterator traverses the values of the first row key, the values of the second row key, and so on. @return collection of values
java
android/guava/src/com/google/common/collect/ArrayTable.java
776
[]
true
1
6.96
google/guava
51,352
javadoc
false
size
public int size() { return this.lookupIndexes.length; }
Returns the number of entries in the ZIP file. @return the number of entries
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
175
[]
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
_validate_list_of_stringables
def _validate_list_of_stringables(vals: Sequence[str | int | float]) -> bool: """ Check the values in the provided list can be converted to strings. :param vals: list to validate """ if ( vals is None or not isinstance(vals, (tuple, list)) or not all(isinstance(val, (str, int, float)) for val in vals) ): raise ValueError("List of strings expected") return True
Check the values in the provided list can be converted to strings. :param vals: list to validate
python
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/analyticdb_spark.py
300
[ "vals" ]
bool
true
4
7.04
apache/airflow
43,597
sphinx
false
getCanonicalName
public static String getCanonicalName(final Class<?> cls, final String valueIfNull) { if (cls == null) { return valueIfNull; } final String canonicalName = cls.getCanonicalName(); return canonicalName == null ? valueIfNull : canonicalName; }
Gets the canonical name for a {@link Class}. @param cls the class for which to get the canonical class name; may be null. @param valueIfNull the return value if null. @return the canonical name of the class, or {@code valueIfNull}. @since 3.7 @see Class#getCanonicalName()
java
src/main/java/org/apache/commons/lang3/ClassUtils.java
438
[ "cls", "valueIfNull" ]
String
true
3
8.08
apache/commons-lang
2,896
javadoc
false
bean
<T> T bean(Class<T> beanClass) throws BeansException;
Return the bean instance that uniquely matches the given type, if any. @param beanClass the type the bean must match; can be an interface or superclass @return an instance of the single bean matching the bean type @see BeanFactory#getBean(String)
java
spring-beans/src/main/java/org/springframework/beans/factory/BeanRegistry.java
230
[ "beanClass" ]
T
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
castArray
function castArray() { if (!arguments.length) { return []; } var value = arguments[0]; return isArray(value) ? value : [value]; }
Casts `value` as an array if it's not one. @static @memberOf _ @since 4.4.0 @category Lang @param {*} value The value to inspect. @returns {Array} Returns the cast array. @example _.castArray(1); // => [1] _.castArray({ 'a': 1 }); // => [{ 'a': 1 }] _.castArray('abc'); // => ['abc'] _.castArray(null); // => [null] _.castArray(undefined); // => [undefined] _.castArray(); // => [] var array = [1, 2, 3]; console.log(_.castArray(array) === array); // => true
javascript
lodash.js
11,102
[]
false
3
8.72
lodash/lodash
61,490
jsdoc
false
describeConsumerGroups
default DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds) { return describeConsumerGroups(groupIds, new DescribeConsumerGroupsOptions()); }
Describe some consumer groups in the cluster, with the default options. <p> This is a convenience method for {@link #describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)} with default options. See the overload for more details. @param groupIds The IDs of the groups to describe. @return The DescribeConsumerGroupsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
876
[ "groupIds" ]
DescribeConsumerGroupsResult
true
1
6.32
apache/kafka
31,560
javadoc
false
enable_history_recording
def enable_history_recording() -> Generator[None, None, None]: "Turns on history recording in the CUDA Caching Allocator" enabled = torch._C._cuda_isHistoryEnabled() try: if not enabled: torch.cuda.memory._record_memory_history() yield finally: if not enabled: torch.cuda.memory._record_memory_history(None)
"Turns on history recording in the CUDA Caching Allocator"
python
torch/_inductor/cudagraph_trees.py
168
[]
Generator[None, None, None]
true
3
6.24
pytorch/pytorch
96,034
unknown
false
isin
def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: """ Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : np.ndarray or ExtensionArray The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- np.ndarray[bool] Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(["llama", "cow", "llama", "beetle", "llama", "hippo"]) >>> s.isin(["cow", "llama"]) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('llama')`` will raise an error. Use a list of one element instead: >>> s.isin(["llama"]) array([ True, False, True, False, True, False]) """ null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer_for(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values)
Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : np.ndarray or ExtensionArray The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- np.ndarray[bool] Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(["llama", "cow", "llama", "beetle", "llama", "hippo"]) >>> s.isin(["cow", "llama"]) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('llama')`` will raise an error. Use a list of one element instead: >>> s.isin(["llama"]) array([ True, False, True, False, True, False])
python
pandas/core/arrays/categorical.py
2,695
[ "self", "values" ]
npt.NDArray[np.bool_]
true
1
7.12
pandas-dev/pandas
47,362
numpy
false
matchesPattern
public static void matchesPattern(final CharSequence input, final String pattern, final String message, final Object... values) { // TODO when breaking BC, consider returning input if (!Pattern.matches(pattern, input)) { throw new IllegalArgumentException(getMessage(message, values)); } }
Validate that the specified argument character sequence matches the specified regular expression pattern; otherwise throwing an exception with the specified message. <pre>Validate.matchesPattern("hi", "[a-z]*", "%s does not match %s", "hi" "[a-z]*");</pre> <p>The syntax of the pattern is the one used in the {@link Pattern} class.</p> @param input the character sequence to validate, not null. @param pattern the regular expression pattern, not null. @param message the {@link String#format(String, Object...)} exception message if invalid, not null. @param values the optional values for the formatted exception message, null array not recommended. @throws IllegalArgumentException if the character sequence does not match the pattern. @see #matchesPattern(CharSequence, String) @since 3.0
java
src/main/java/org/apache/commons/lang3/Validate.java
637
[ "input", "pattern", "message" ]
void
true
2
6.56
apache/commons-lang
2,896
javadoc
false
getDouble
public double getDouble(String name) throws JSONException { Object object = get(name); Double result = JSON.toDouble(object); if (result == null) { throw JSON.typeMismatch(name, object, "double"); } return result; }
Returns the value mapped by {@code name} if it exists and is a double or can be coerced to a double. @param name the name of the property @return the value @throws JSONException if the mapping doesn't exist or cannot be coerced to a double.
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
435
[ "name" ]
true
2
8.24
spring-projects/spring-boot
79,428
javadoc
false
whenTrue
public Source<T> whenTrue() { return when(Boolean.TRUE::equals); }
Return a filtered version of the source that will only map values that are {@code true}. @return a new filtered source instance
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
216
[]
true
1
6.8
spring-projects/spring-boot
79,428
javadoc
false
cumprod
def cumprod(a, axis=None, dtype=None, out=None): """ Return the cumulative product of elements along a given axis. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. By default the input is flattened. dtype : dtype, optional Type of the returned array, as well as of the accumulator in which the elements are multiplied. If *dtype* is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type of the resulting values will be cast if necessary. Returns ------- cumprod : ndarray A new array holding the result is returned unless `out` is specified, in which case a reference to out is returned. See Also -------- cumulative_prod : Array API compatible alternative for ``cumprod``. :ref:`ufuncs-output-type` Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> import numpy as np >>> a = np.array([1,2,3]) >>> np.cumprod(a) # intermediate results 1, 1*2 ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: >>> np.cumprod(a, axis=0) array([[ 1, 2, 3], [ 4, 10, 18]]) The cumulative product for each row (i.e. over the columns) of `a`: >>> np.cumprod(a,axis=1) array([[ 1, 2, 6], [ 4, 20, 120]]) """ return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
Return the cumulative product of elements along a given axis. Parameters ---------- a : array_like Input array. axis : int, optional Axis along which the cumulative product is computed. By default the input is flattened. dtype : dtype, optional Type of the returned array, as well as of the accumulator in which the elements are multiplied. If *dtype* is not specified, it defaults to the dtype of `a`, unless `a` has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used instead. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type of the resulting values will be cast if necessary. Returns ------- cumprod : ndarray A new array holding the result is returned unless `out` is specified, in which case a reference to out is returned. See Also -------- cumulative_prod : Array API compatible alternative for ``cumprod``. :ref:`ufuncs-output-type` Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. Examples -------- >>> import numpy as np >>> a = np.array([1,2,3]) >>> np.cumprod(a) # intermediate results 1, 1*2 ... # total product 1*2*3 = 6 array([1, 2, 6]) >>> a = np.array([[1, 2, 3], [4, 5, 6]]) >>> np.cumprod(a, dtype=np.float64) # specify type of output array([ 1., 2., 6., 24., 120., 720.]) The cumulative product for each column (i.e., over the rows) of `a`: >>> np.cumprod(a, axis=0) array([[ 1, 2, 3], [ 4, 10, 18]]) The cumulative product for each row (i.e. over the columns) of `a`: >>> np.cumprod(a,axis=1) array([[ 1, 2, 6], [ 4, 20, 120]])
python
numpy/_core/fromnumeric.py
3,413
[ "a", "axis", "dtype", "out" ]
false
1
6.24
numpy/numpy
31,054
numpy
false
formatSplitTime
public String formatSplitTime() { return DurationFormatUtils.formatDurationHMS(getSplitDuration().toMillis()); }
Formats the split time with {@link DurationFormatUtils#formatDurationHMS}. @return the split time formatted by {@link DurationFormatUtils#formatDurationHMS}. @since 3.10
java
src/main/java/org/apache/commons/lang3/time/StopWatch.java
329
[]
String
true
1
6.16
apache/commons-lang
2,896
javadoc
false
step
def step(self) -> int: """ The value of the `step` parameter (``1`` if this was not supplied). The ``step`` parameter determines the increment (or decrement in the case of negative values) between consecutive elements in the ``RangeIndex``. See Also -------- RangeIndex : Immutable index implementing a range-based index. RangeIndex.stop : Returns the stop value of the RangeIndex. RangeIndex.start : Returns the start value of the RangeIndex. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.step 1 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.step -3 Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if not supplied. >>> idx = pd.RangeIndex(1, 0) >>> idx.step 1 """ # GH 25710 return self._range.step
The value of the `step` parameter (``1`` if this was not supplied). The ``step`` parameter determines the increment (or decrement in the case of negative values) between consecutive elements in the ``RangeIndex``. See Also -------- RangeIndex : Immutable index implementing a range-based index. RangeIndex.stop : Returns the stop value of the RangeIndex. RangeIndex.start : Returns the start value of the RangeIndex. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.step 1 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.step -3 Even if :class:`pandas.RangeIndex` is empty, ``step`` is still ``1`` if not supplied. >>> idx = pd.RangeIndex(1, 0) >>> idx.step 1
python
pandas/core/indexes/range.py
372
[ "self" ]
int
true
1
7.28
pandas-dev/pandas
47,362
unknown
false