function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
choice
def choice(self) -> Optional[ChoiceCaller]: """ Lazily evaluate and return the ChoiceCaller for this template choice. On first access, calls template.choice_or_none() with the stored parameters. If successful, caches and returns the ChoiceCaller. If it fails, caches and returns None. Subsequent accesses return the cached value. Returns: ChoiceCaller if the template choice succeeds, None otherwise """ if not hasattr(self, "_choice"): # First time accessing choice - try to generate it kwargs = self.params.to_kwargs() self._choice = self.template.choice_or_none( **kwargs, **self.extra_kwargs, layout=self.layout, input_nodes=self.inputs.nodes(), ) if self._choice is not None: self._choice.annotations = self.annotations return self._choice
Lazily evaluate and return the ChoiceCaller for this template choice. On first access, calls template.choice_or_none() with the stored parameters. If successful, caches and returns the ChoiceCaller. If it fails, caches and returns None. Subsequent accesses return the cached value. Returns: ChoiceCaller if the template choice succeeds, None otherwise
python
torch/_inductor/kernel_template_choice.py
42
[ "self" ]
Optional[ChoiceCaller]
true
3
7.76
pytorch/pytorch
96,034
unknown
false
_can_fuse_epilogue_impl
def _can_fuse_epilogue_impl( self, cuda_template_buffer: CUDATemplateBuffer, existing_epilogue_nodes: list[BaseSchedulerNode], node_to_fuse: BaseSchedulerNode, ) -> bool: """ Check if the given node can be fused with the epilogue. At the moment, Kernels support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes. Args: cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer existing_epilogue_nodes : List[SchedulerNode]: The list of already fused epilogue nodes. node_to_fuse: The SchedulerNode node to be checked if it can be fused with the epilogue. Returns: - bool: True if the given node can be fused with the epilogue, False otherwise. """ why = WhyNoFuseNames(cuda_template_buffer.get_name(), node_to_fuse.get_name()) scheduler_nodes_to_fuse = node_to_fuse.get_nodes() assert isinstance(cuda_template_buffer, CUDATemplateBuffer) # Checks on constituent nodes for s_node in scheduler_nodes_to_fuse: node = s_node.node if not isinstance(node, ComputedBuffer): why(f"{node} is not a ComputedBuffer") return False elif not isinstance(node.data, Pointwise): why(f"{node} is not a Pointwise op") return False elif not node.get_computed_buffer_name(): # type: ignore[attr-defined] why(f"{node} does not have a computed buffer name") return False name = node.get_computed_buffer_name() # type: ignore[attr-defined] # dtype can differ, and strides can differ as long as they are broadcastable if node.get_size() != cuda_template_buffer.get_size(): why( f"{name}'s size: {node.get_size()} differs from {cuda_template_buffer.get_name()}'s \ size: {cuda_template_buffer.get_size()}" ) return False assert len( existing_epilogue_nodes ) or cuda_template_buffer.get_name() in OrderedSet( [rd.name for rd in node_to_fuse.read_writes.reads] ), "First epilogue node must read from cuda template buffer" if node_to_fuse.has_aliasing_or_mutation(): why(f"{node_to_fuse.get_name()} has aliasing or mutation") return False elif node_to_fuse.is_reduction(): why( f"{node_to_fuse.get_name()} is a reduction which is not yet supported by EVT" ) return False elif ( not config.cuda.cutlass_epilogue_fusion_enabled or not config.epilogue_fusion ): why("cutlass epilogue fusion is not enabled") return False elif not cuda_template_buffer.supports_epilogue_fusion: why("epilogue fusion is only supported for TMA-enabled gemm ops") return False try: from torch._inductor.codegen.cuda.cutlass_python_evt import ( CutlassEVTCodegen, ) CutlassEVTCodegen.ir_to_evt_python_code( cuda_template_buffer.get_name(), existing_epilogue_nodes + list(node_to_fuse.get_nodes()), OrderedSet(), ) except NotImplementedError as e: not_implemented_op = str(e) if not_implemented_op.startswith("_op_"): not_implemented_op = not_implemented_op[4:] why( f"Cannot fuse epilogue node {node_to_fuse} into {cuda_template_buffer.name}, \ likely due to unsupported operation: {not_implemented_op}" # noqa: G004, B950 ) return False else: # Likely due to unsupported dtype. why( f"Cannot fuse epilogue node {node_to_fuse} into {cuda_template_buffer.name}. \ Reason: {not_implemented_op}" # noqa: G004, B950 ) return False return True
Check if the given node can be fused with the epilogue. At the moment, Kernels support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes. Args: cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer existing_epilogue_nodes : List[SchedulerNode]: The list of already fused epilogue nodes. node_to_fuse: The SchedulerNode node to be checked if it can be fused with the epilogue. Returns: - bool: True if the given node can be fused with the epilogue, False otherwise.
python
torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py
198
[ "self", "cuda_template_buffer", "existing_epilogue_nodes", "node_to_fuse" ]
bool
true
14
8
pytorch/pytorch
96,034
google
false
containsWhitespace
public static boolean containsWhitespace(final CharSequence seq) { if (isEmpty(seq)) { return false; } final int strLen = seq.length(); for (int i = 0; i < strLen; i++) { if (Character.isWhitespace(seq.charAt(i))) { return true; } } return false; }
Tests whether the given CharSequence contains any whitespace characters. <p> Whitespace is defined by {@link Character#isWhitespace(char)}. </p> <pre> StringUtils.containsWhitespace(null) = false StringUtils.containsWhitespace("") = false StringUtils.containsWhitespace("ab") = false StringUtils.containsWhitespace(" ab") = true StringUtils.containsWhitespace("a b") = true StringUtils.containsWhitespace("ab ") = true </pre> @param seq the CharSequence to check (may be {@code null}). @return {@code true} if the CharSequence is not empty and contains at least 1 (breaking) whitespace character. @since 3.0
java
src/main/java/org/apache/commons/lang3/StringUtils.java
1,356
[ "seq" ]
true
4
7.6
apache/commons-lang
2,896
javadoc
false
getLogger
private @Nullable LoggerConfig getLogger(@Nullable String name) { if (!StringUtils.hasLength(name) || ROOT_LOGGER_NAME.equals(name)) { return findLogger(LogManager.ROOT_LOGGER_NAME); } return findLogger(name); }
Return the configuration location. The result may be: <ul> <li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li> <li>A file path: if provided explicitly by the user</li> <li>A URI: if loaded from the classpath default or a custom location</li> </ul> @param configuration the source configuration @return the config location or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
470
[ "name" ]
LoggerConfig
true
3
7.28
spring-projects/spring-boot
79,428
javadoc
false
tz_convert
def tz_convert(self, tz) -> Self: """ Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, zoneinfo.ZoneInfo, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Datetme Array/Index with target `tz`. Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='h') >>> dti.tz_convert("US/Central") DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='h') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='h') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='h') """ # noqa: E501 arr = self._data.tz_convert(tz) return type(self)._simple_new(arr, name=self.name, refs=self._references)
Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, zoneinfo.ZoneInfo, pytz.timezone, dateutil.tz.tzfile, datetime.tzinfo or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Datetme Array/Index with target `tz`. Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='h') >>> dti.tz_convert("US/Central") DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='h') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range( ... start="2014-08-01 09:00", freq="h", periods=3, tz="Europe/Berlin" ... ) >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='h') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='h')
python
pandas/core/indexes/datetimes.py
328
[ "self", "tz" ]
Self
true
1
6.72
pandas-dev/pandas
47,362
numpy
false
create
TokenStream create(TokenStream tokenStream);
Transform the specified input TokenStream. @param tokenStream a token stream to be transformed @return transformed token stream
java
libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenFilterFactory.java
27
[ "tokenStream" ]
TokenStream
true
1
6
elastic/elasticsearch
75,680
javadoc
false
resolvePropertyPlaceholders
private void resolvePropertyPlaceholders() { for (String name : this.properties.stringPropertyNames()) { String value = this.properties.getProperty(name); String resolved = SystemPropertyUtils.resolvePlaceholders(this.properties, value); if (resolved != null) { this.properties.put(name, resolved); } } }
Properties key for boolean flag (default false) which, if set, will cause the external configuration properties to be copied to System properties (assuming that is allowed by Java security).
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/PropertiesLauncher.java
277
[]
void
true
2
6.72
spring-projects/spring-boot
79,428
javadoc
false
listFiles
private List<File> listFiles(File file) { File[] files = file.listFiles(); if (files == null) { return Collections.emptyList(); } Arrays.sort(files, entryComparator); return Arrays.asList(files); }
Create a new {@link ExplodedArchive} instance. @param rootDirectory the root directory
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/ExplodedArchive.java
108
[ "file" ]
true
2
6.08
spring-projects/spring-boot
79,428
javadoc
false
optJSONObject
public JSONObject optJSONObject(String name) { Object object = opt(name); return object instanceof JSONObject ? (JSONObject) object : null; }
Returns the value mapped by {@code name} if it exists and is a {@code JSONObject}. Returns null otherwise. @param name the name of the property @return the value or {@code null}
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
641
[ "name" ]
JSONObject
true
2
8
spring-projects/spring-boot
79,428
javadoc
false
allVersions
public List<Short> allVersions() { List<Short> versions = new ArrayList<>(latestVersion() - oldestVersion() + 1); for (short version = oldestVersion(); version <= latestVersion(); version++) { versions.add(version); } return versions; }
indicates whether the API is enabled for forwarding
java
clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java
232
[]
true
2
6.4
apache/kafka
31,560
javadoc
false
buildGenericTypeAwarePropertyDescriptor
private PropertyDescriptor buildGenericTypeAwarePropertyDescriptor(Class<?> beanClass, PropertyDescriptor pd) { try { return new GenericTypeAwarePropertyDescriptor(beanClass, pd.getName(), pd.getReadMethod(), pd.getWriteMethod(), pd.getPropertyEditorClass()); } catch (IntrospectionException ex) { throw new FatalBeanException("Failed to re-introspect class [" + beanClass.getName() + "]", ex); } }
Create a new CachedIntrospectionResults instance for the given class. @param beanClass the bean class to analyze @throws BeansException in case of introspection failure
java
spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java
394
[ "beanClass", "pd" ]
PropertyDescriptor
true
2
6.24
spring-projects/spring-framework
59,386
javadoc
false
topicPartitionsToLogString
private String topicPartitionsToLogString(Collection<TopicPartition> partitions) { if (!log.isTraceEnabled()) { return String.format("%d partition(s)", partitions.size()); } return "(" + partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", ")) + ")"; }
A builder that allows for presizing the PartitionData hashmap, and avoiding making a secondary copy of the sessionPartitions, in cases where this is not necessarily. This builder is primarily for use by the Replica Fetcher @param size the initial size of the PartitionData hashmap @param copySessionPartitions boolean denoting whether the builder should make a deep copy of session partitions
java
clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
392
[ "partitions" ]
String
true
2
6.4
apache/kafka
31,560
javadoc
false
Row
function Row({ attribute, attributePlaceholder, changeAttribute, changeValue, validAttributes, value, valuePlaceholder, }: RowProps) { // TODO (RN style editor) Use @reach/combobox to auto-complete attributes. // The list of valid attributes would need to be injected by RN backend, // which would need to require them from ReactNativeViewViewConfig "validAttributes.style" keys. // This would need to degrade gracefully for react-native-web, // although we could let it also inject a custom set of allowed attributes. const [localAttribute, setLocalAttribute] = useState(attribute); const [localValue, setLocalValue] = useState(JSON.stringify(value)); const [isAttributeValid, setIsAttributeValid] = useState(true); const [isValueValid, setIsValueValid] = useState(true); // $FlowFixMe[missing-local-annot] const validateAndSetLocalAttribute = newAttribute => { const isValid = newAttribute === '' || validAttributes === null || validAttributes.indexOf(newAttribute) >= 0; setLocalAttribute(newAttribute); setIsAttributeValid(isValid); }; // $FlowFixMe[missing-local-annot] const validateAndSetLocalValue = newValue => { let isValid = false; try { JSON.parse(sanitizeForParse(newValue)); isValid = true; } catch (error) {} setLocalValue(newValue); setIsValueValid(isValid); }; const resetAttribute = () => { setLocalAttribute(attribute); }; const resetValue = () => { setLocalValue(value); }; const submitValueChange = () => { if (isAttributeValid && isValueValid) { const parsedLocalValue = JSON.parse(sanitizeForParse(localValue)); if (value !== parsedLocalValue) { changeValue(attribute, parsedLocalValue); } } }; const submitAttributeChange = () => { if (isAttributeValid && isValueValid) { if (attribute !== localAttribute) { changeAttribute(attribute, localAttribute, value); } } }; return ( <div className={styles.Row}> <Field className={isAttributeValid ? styles.Attribute : styles.Invalid} onChange={validateAndSetLocalAttribute} onReset={resetAttribute} onSubmit={submitAttributeChange} placeholder={attributePlaceholder} value={localAttribute} /> :&nbsp; <Field className={isValueValid ? styles.Value : styles.Invalid} onChange={validateAndSetLocalValue} onReset={resetValue} onSubmit={submitValueChange} placeholder={valuePlaceholder} value={localValue} /> ; </div> ); }
Copyright (c) Meta Platforms, Inc. and affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. @flow
javascript
packages/react-devtools-shared/src/devtools/views/Components/NativeStyleEditor/StyleEditor.js
157
[]
false
12
6.32
facebook/react
241,750
jsdoc
false
get_mssql_table_constraints
def get_mssql_table_constraints(conn, table_name) -> dict[str, dict[str, list[str]]]: """ Return the primary and unique constraint along with column name. Some tables like `task_instance` are missing the primary key constraint name and the name is auto-generated by the SQL server, so this function helps to retrieve any primary or unique constraint name. :param conn: sql connection object :param table_name: table name :return: a dictionary of ((constraint name, constraint type), column name) of table """ query = text( f"""SELECT tc.CONSTRAINT_NAME , tc.CONSTRAINT_TYPE, ccu.COLUMN_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS ccu ON ccu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME WHERE tc.TABLE_NAME = '{table_name}' AND (tc.CONSTRAINT_TYPE = 'PRIMARY KEY' or UPPER(tc.CONSTRAINT_TYPE) = 'UNIQUE' or UPPER(tc.CONSTRAINT_TYPE) = 'FOREIGN KEY') """ ) result = conn.execute(query).fetchall() constraint_dict = defaultdict(lambda: defaultdict(list)) for constraint, constraint_type, col_name in result: constraint_dict[constraint_type][constraint].append(col_name) return constraint_dict
Return the primary and unique constraint along with column name. Some tables like `task_instance` are missing the primary key constraint name and the name is auto-generated by the SQL server, so this function helps to retrieve any primary or unique constraint name. :param conn: sql connection object :param table_name: table name :return: a dictionary of ((constraint name, constraint type), column name) of table
python
airflow-core/src/airflow/migrations/utils.py
26
[ "conn", "table_name" ]
dict[str, dict[str, list[str]]]
true
2
8.08
apache/airflow
43,597
sphinx
false
issubdtype
def issubdtype(arg1, arg2): r""" Returns True if first argument is a typecode lower/equal in type hierarchy. This is like the builtin :func:`issubclass`, but for `dtype`\ s. Parameters ---------- arg1, arg2 : dtype_like `dtype` or object coercible to one Returns ------- out : bool See Also -------- :ref:`arrays.scalars` : Overview of the numpy type hierarchy. Examples -------- `issubdtype` can be used to check the type of arrays: >>> ints = np.array([1, 2, 3], dtype=np.int32) >>> np.issubdtype(ints.dtype, np.integer) True >>> np.issubdtype(ints.dtype, np.floating) False >>> floats = np.array([1, 2, 3], dtype=np.float32) >>> np.issubdtype(floats.dtype, np.integer) False >>> np.issubdtype(floats.dtype, np.floating) True Similar types of different sizes are not subdtypes of each other: >>> np.issubdtype(np.float64, np.float32) False >>> np.issubdtype(np.float32, np.float64) False but both are subtypes of `floating`: >>> np.issubdtype(np.float64, np.floating) True >>> np.issubdtype(np.float32, np.floating) True For convenience, dtype-like objects are allowed too: >>> np.issubdtype('S1', np.bytes_) True >>> np.issubdtype('i4', np.signedinteger) True """ if not issubclass_(arg1, generic): arg1 = dtype(arg1).type if not issubclass_(arg2, generic): arg2 = dtype(arg2).type return issubclass(arg1, arg2)
r""" Returns True if first argument is a typecode lower/equal in type hierarchy. This is like the builtin :func:`issubclass`, but for `dtype`\ s. Parameters ---------- arg1, arg2 : dtype_like `dtype` or object coercible to one Returns ------- out : bool See Also -------- :ref:`arrays.scalars` : Overview of the numpy type hierarchy. Examples -------- `issubdtype` can be used to check the type of arrays: >>> ints = np.array([1, 2, 3], dtype=np.int32) >>> np.issubdtype(ints.dtype, np.integer) True >>> np.issubdtype(ints.dtype, np.floating) False >>> floats = np.array([1, 2, 3], dtype=np.float32) >>> np.issubdtype(floats.dtype, np.integer) False >>> np.issubdtype(floats.dtype, np.floating) True Similar types of different sizes are not subdtypes of each other: >>> np.issubdtype(np.float64, np.float32) False >>> np.issubdtype(np.float32, np.float64) False but both are subtypes of `floating`: >>> np.issubdtype(np.float64, np.floating) True >>> np.issubdtype(np.float32, np.floating) True For convenience, dtype-like objects are allowed too: >>> np.issubdtype('S1', np.bytes_) True >>> np.issubdtype('i4', np.signedinteger) True
python
numpy/_core/numerictypes.py
412
[ "arg1", "arg2" ]
false
3
7.04
numpy/numpy
31,054
numpy
false
is_masked
def is_masked(x): """ Determine whether input has masked values. Accepts any object as input, but always returns False unless the input is a MaskedArray containing masked values. Parameters ---------- x : array_like Array to check for masked values. Returns ------- result : bool True if `x` is a MaskedArray with masked values, False otherwise. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x masked_array(data=[--, 1, --, 2, 3], mask=[ True, False, True, False, False], fill_value=0) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x masked_array(data=[0, 1, 0, 2, 3], mask=False, fill_value=42) >>> ma.is_masked(x) False Always returns False if `x` isn't a MaskedArray. >>> x = [False, True, False] >>> ma.is_masked(x) False >>> x = 'a string' >>> ma.is_masked(x) False """ m = getmask(x) if m is nomask: return False elif m.any(): return True return False
Determine whether input has masked values. Accepts any object as input, but always returns False unless the input is a MaskedArray containing masked values. Parameters ---------- x : array_like Array to check for masked values. Returns ------- result : bool True if `x` is a MaskedArray with masked values, False otherwise. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x masked_array(data=[--, 1, --, 2, 3], mask=[ True, False, True, False, False], fill_value=0) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x masked_array(data=[0, 1, 0, 2, 3], mask=False, fill_value=42) >>> ma.is_masked(x) False Always returns False if `x` isn't a MaskedArray. >>> x = [False, True, False] >>> ma.is_masked(x) False >>> x = 'a string' >>> ma.is_masked(x) False
python
numpy/ma/core.py
6,862
[ "x" ]
false
3
7.68
numpy/numpy
31,054
numpy
false
definitelyRunningAsRoot
boolean definitelyRunningAsRoot();
Determine whether this JVM is running as the root user. @return true if running as root, or false if unsure
java
libs/native/src/main/java/org/elasticsearch/nativeaccess/NativeAccess.java
33
[]
true
1
6.8
elastic/elasticsearch
75,680
javadoc
false
normalizeReferrerURL
function normalizeReferrerURL(referrerName) { if (referrerName === null || referrerName === undefined) { return undefined; } if (typeof referrerName === 'string') { if (path.isAbsolute(referrerName)) { return pathToFileURL(referrerName).href; } if (StringPrototypeStartsWith(referrerName, 'file://') || URLCanParse(referrerName)) { return referrerName; } return undefined; } assert.fail('Unreachable code reached by ' + inspect(referrerName)); }
Normalize the referrer name as a URL. If it's a string containing an absolute path or a URL it's normalized as a URL string. Otherwise it's returned as undefined. @param {string | null | undefined} referrerName @returns {string | undefined}
javascript
lib/internal/modules/helpers.js
276
[ "referrerName" ]
false
7
6.08
nodejs/node
114,839
jsdoc
false
closeHeartbeatThread
private void closeHeartbeatThread() { BaseHeartbeatThread thread; synchronized (this) { if (heartbeatThread == null) return; heartbeatThread.close(); thread = heartbeatThread; heartbeatThread = null; } try { thread.join(); } catch (InterruptedException e) { log.warn("Interrupted while waiting for consumer heartbeat thread to close"); throw new InterruptException(e); } }
Ensure the group is active (i.e., joined and synced) @param timer Timer bounding how long this method can block @throws KafkaException if the callback throws exception @return true iff the group is active
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
431
[]
void
true
3
7.6
apache/kafka
31,560
javadoc
false
from_object
def from_object(self, obj: object | str) -> None: """Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) Nothing is done to the object before loading. If the object is a class and has ``@property`` attributes, it needs to be instantiated before being passed to this method. You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object """ if isinstance(obj, str): obj = import_string(obj) for key in dir(obj): if key.isupper(): self[key] = getattr(obj, key)
Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) Nothing is done to the object before loading. If the object is a class and has ``@property`` attributes, it needs to be instantiated before being passed to this method. You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object
python
src/flask/config.py
218
[ "self", "obj" ]
None
true
4
6.56
pallets/flask
70,946
sphinx
false
_fit_and_predict
def _fit_and_predict(estimator, X, y, train, test, fit_params, method): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method' """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = _check_method_params(X, params=fit_params, indices=train) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) func = getattr(estimator, method) predictions = func(X_test) encode = ( method in ["decision_function", "predict_proba", "predict_log_proba"] and y is not None ) if encode: if isinstance(predictions, list): predictions = [ _enforce_prediction_order( estimator.classes_[i_label], predictions[i_label], n_classes=len(set(y[:, i_label])), method=method, ) for i_label in range(len(predictions)) ] else: # A 2D y array should be a binary label indicator matrix xp, _ = get_namespace(X, y) n_classes = ( len(set(_convert_to_numpy(y, xp=xp))) if y.ndim == 1 else y.shape[1] ) predictions = _enforce_prediction_order( estimator.classes_, predictions, n_classes, method ) return predictions
Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape (n_samples, n_features) The data to fit. .. versionchanged:: 0.20 X is only required to be an object with finite length or shape now y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None The target variable to try to predict in the case of supervised learning. train : array-like of shape (n_train_samples,) Indices of training samples. test : array-like of shape (n_test_samples,) Indices of test samples. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. method : str Invokes the passed method name of the passed estimator. Returns ------- predictions : sequence Result of calling 'estimator.method'
python
sklearn/model_selection/_validation.py
1,252
[ "estimator", "X", "y", "train", "test", "fit_params", "method" ]
false
9
6
scikit-learn/scikit-learn
64,340
numpy
false
getLocale
public static Locale getLocale(@Nullable LocaleContext localeContext) { if (localeContext != null) { Locale locale = localeContext.getLocale(); if (locale != null) { return locale; } } return (defaultLocale != null ? defaultLocale : Locale.getDefault()); }
Return the Locale associated with the given user context, if any, or the system default Locale otherwise. This is effectively a replacement for {@link java.util.Locale#getDefault()}, able to optionally respect a user-level Locale setting. @param localeContext the user-level locale context to check @return the current Locale, or the system default Locale if no specific Locale has been associated with the current thread @since 5.0 @see #getLocale() @see LocaleContext#getLocale() @see #setDefaultLocale(Locale) @see java.util.Locale#getDefault()
java
spring-context/src/main/java/org/springframework/context/i18n/LocaleContextHolder.java
220
[ "localeContext" ]
Locale
true
4
7.44
spring-projects/spring-framework
59,386
javadoc
false
add
public <V> Member<V> add(String name, @Nullable V value) { return add(name, (instance) -> value); }
Add a new member with a static value. @param <V> the value type @param name the member name @param value the member value @return the added {@link Member} which may be configured further
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
214
[ "name", "value" ]
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
has_user_subclass
def has_user_subclass(args, allowed_subclasses): """Check if any tensor arguments are user subclasses. This is used to determine if tensor subclasses should get a chance to run their own implementation first before falling back to the default implementation. Args: args: Arguments to check (will be flattened with pytree) allowed_subclasses: Tuple of allowed subclass types Returns: True if user tensor subclasses are found, False otherwise """ flat_args, _ = pytree.tree_flatten(args) val = any( isinstance(a, torch.Tensor) and type(a) is not torch.Tensor and not isinstance(a, allowed_subclasses) for a in flat_args ) return val
Check if any tensor arguments are user subclasses. This is used to determine if tensor subclasses should get a chance to run their own implementation first before falling back to the default implementation. Args: args: Arguments to check (will be flattened with pytree) allowed_subclasses: Tuple of allowed subclass types Returns: True if user tensor subclasses are found, False otherwise
python
torch/_higher_order_ops/utils.py
1,244
[ "args", "allowed_subclasses" ]
false
3
6.96
pytorch/pytorch
96,034
google
false
roundUpSafe
private static long roundUpSafe(long value, long divisor) { if (divisor <= 0) { throw new IllegalArgumentException("divisor must be positive"); } return ((value + divisor - 1) / divisor) * divisor; }
Helper method to round up to the nearest multiple of a given divisor. <p>Equivalent to C++ {@code raft::round_up_safe<uint32_t>(value, divisor)} @param value the value to round up @param divisor the divisor to round to @return the rounded up value
java
libs/gpu-codec/src/main/java/org/elasticsearch/gpu/codec/CuVSIvfPqParamsFactory.java
145
[ "value", "divisor" ]
true
2
8.08
elastic/elasticsearch
75,680
javadoc
false
betweenOrderedExclusive
private boolean betweenOrderedExclusive(final A b, final A c) { return greaterThan(b) && lessThan(c); }
Tests if {@code (b < a < c)} or {@code (b > a > c)} where the {@code a} is object passed to {@link #is}. @param b the object to compare to the base object @param c the object to compare to the base object @return true if the base object is between b and c and not equal to those
java
src/main/java/org/apache/commons/lang3/compare/ComparableUtils.java
73
[ "b", "c" ]
true
2
8.16
apache/commons-lang
2,896
javadoc
false
collect_fw_donated_buffer_idxs
def collect_fw_donated_buffer_idxs( fw_ins: list[Optional[FakeTensor]], user_fw_outs: list[Optional[FakeTensor]], bw_outs: list[Optional[FakeTensor]], saved_tensors: list[FakeTensor], ) -> list[int]: """ Checks if the saved tensors are donated buffers, which means a saved tensor is not an alias of any tensors in fw_ins, user_fw_outs, and bw_outs. """ storage_refs = set() for t in itertools.chain(fw_ins, user_fw_outs, bw_outs): # Only access storage if a tensor has storage (not sparse) if t is not None and isinstance(t, FakeTensor) and not is_sparse_any(t): storage_refs.add(StorageWeakRef(t.untyped_storage())) num_saved_tensor = len(saved_tensors) donated_buffer_idxs = [] for i in range(num_saved_tensor): t = saved_tensors[i] if ( t is not None and not is_sparse_any(t) and StorageWeakRef(t.untyped_storage()) not in storage_refs ): donated_buffer_idxs.append(i) return donated_buffer_idxs
Checks if the saved tensors are donated buffers, which means a saved tensor is not an alias of any tensors in fw_ins, user_fw_outs, and bw_outs.
python
torch/_functorch/_aot_autograd/graph_compile.py
537
[ "fw_ins", "user_fw_outs", "bw_outs", "saved_tensors" ]
list[int]
true
9
6
pytorch/pytorch
96,034
unknown
false
processEntries
private static Properties processEntries(Properties properties) { coercePropertyToEpoch(properties, "commit.time"); coercePropertyToEpoch(properties, "build.time"); Object commitId = properties.get("commit.id"); if (commitId != null) { // Can get converted into a map, so we copy the entry as a nested key properties.put("commit.id.full", commitId); } return properties; }
Return the timestamp of the commit or {@code null}. <p> If the original value could not be parsed properly, it is still available with the {@code commit.time} key. @return the commit time @see #get(String)
java
core/spring-boot/src/main/java/org/springframework/boot/info/GitProperties.java
95
[ "properties" ]
Properties
true
2
7.2
spring-projects/spring-boot
79,428
javadoc
false
read_pickle
def read_pickle( filepath_or_buffer: FilePath | ReadPickleBuffer, compression: CompressionOptions = "infer", storage_options: StorageOptions | None = None, ) -> DataFrame | Series: """ Load pickled pandas object (or any object) from file and return unpickled object. .. warning:: Loading pickled data received from untrusted sources can be unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__. Parameters ---------- filepath_or_buffer : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``readlines()`` function. Also accepts URL. URL is not limited to S3 and GCS. {decompression_options} {storage_options} Returns ------- object The unpickled pandas object (or any object) that was stored in file. See Also -------- DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. Series.to_pickle : Pickle (serialize) Series object to file. read_hdf : Read HDF5 file into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. read_parquet : Load a parquet object, returning a DataFrame. Notes ----- read_pickle is only guaranteed to be backwards compatible to pandas 1.0 provided the object was serialized with to_pickle. Examples -------- >>> original_df = pd.DataFrame( ... {{"foo": range(5), "bar": range(5, 10)}} ... ) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # TypeError for Cython complaints about object.__new__ vs Tick.__new__ excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError) with get_handle( filepath_or_buffer, "rb", compression=compression, is_text=False, storage_options=storage_options, ) as handles: # 1) try standard library Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes try: with warnings.catch_warnings(record=True): # We want to silence any warnings about, e.g. moved modules. warnings.simplefilter("ignore", Warning) return pickle.load(handles.handle) except excs_to_catch: # e.g. # "No module named 'pandas.core.sparse.series'" # "Can't get attribute '_nat_unpickle' on <module 'pandas._libs.tslib" handles.handle.seek(0) return pickle_compat.Unpickler(handles.handle).load()
Load pickled pandas object (or any object) from file and return unpickled object. .. warning:: Loading pickled data received from untrusted sources can be unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__. Parameters ---------- filepath_or_buffer : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``readlines()`` function. Also accepts URL. URL is not limited to S3 and GCS. {decompression_options} {storage_options} Returns ------- object The unpickled pandas object (or any object) that was stored in file. See Also -------- DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. Series.to_pickle : Pickle (serialize) Series object to file. read_hdf : Read HDF5 file into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. read_parquet : Load a parquet object, returning a DataFrame. Notes ----- read_pickle is only guaranteed to be backwards compatible to pandas 1.0 provided the object was serialized with to_pickle. Examples -------- >>> original_df = pd.DataFrame( ... {{"foo": range(5), "bar": range(5, 10)}} ... ) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9
python
pandas/io/pickle.py
124
[ "filepath_or_buffer", "compression", "storage_options" ]
DataFrame | Series
true
1
7.2
pandas-dev/pandas
47,362
numpy
false
attemptBackendHandshake
function attemptBackendHandshake() { if (!backendInitialized) { // tslint:disable-next-line:no-console console.log('Attempting handshake with backend', new Date()); const retry = () => { if (backendInitialized || backgroundDisconnected) { return; } handshakeWithBackend(); setTimeout(retry, 500); }; retry(); } }
@license Copyright Google LLC All Rights Reserved. Use of this source code is governed by an MIT-style license that can be found in the LICENSE file at https://angular.dev/license
typescript
devtools/projects/shell-browser/src/app/content-script.ts
29
[]
false
4
6.4
angular/angular
99,544
jsdoc
false
connect
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. Receivers can be asynchronous. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be a Python object, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings # If DEBUG is on, check that we got a good receiver if settings.configured and settings.DEBUG: if not callable(receiver): raise TypeError("Signal receivers must be callable.") # Check for **kwargs if not func_accepts_kwargs(receiver): raise ValueError( "Signal receivers must accept keyword arguments (**kwargs)." ) if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) is_async = iscoroutinefunction(receiver) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods if hasattr(receiver, "__self__") and hasattr(receiver, "__func__"): ref = weakref.WeakMethod receiver_object = receiver.__self__ receiver = ref(receiver) weakref.finalize(receiver_object, self._flag_dead_receivers) # Keep a weakref to sender if possible to ensure associated receivers # are cleared if it gets garbage collected. This ensures there is no # id(sender) collisions for distinct senders with non-overlapping # lifetimes. sender_ref = None if sender is not None: try: sender_ref = weakref.ref(sender, self._flag_dead_receivers) except TypeError: pass with self.lock: self._clear_dead_receivers() if not any(r_key == lookup_key for r_key, _, _, _ in self.receivers): self.receivers.append((lookup_key, receiver, sender_ref, is_async)) self.sender_receivers_cache.clear()
Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. Receivers can be asynchronous. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be a Python object, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable.
python
django/dispatch/dispatcher.py
81
[ "self", "receiver", "sender", "weak", "dispatch_uid" ]
false
12
6
django/django
86,204
google
false
isAutowireCandidate
protected boolean isAutowireCandidate(String beanName, RootBeanDefinition mbd, DependencyDescriptor descriptor, AutowireCandidateResolver resolver) { String bdName = transformedBeanName(beanName); resolveBeanClass(mbd, bdName); if (mbd.isFactoryMethodUnique && mbd.factoryMethodToIntrospect == null) { new ConstructorResolver(this).resolveFactoryMethodIfPossible(mbd); } BeanDefinitionHolder holder = (beanName.equals(bdName) ? this.mergedBeanDefinitionHolders.computeIfAbsent(beanName, key -> new BeanDefinitionHolder(mbd, beanName, getAliases(bdName))) : new BeanDefinitionHolder(mbd, beanName, getAliases(bdName))); return resolver.isAutowireCandidate(holder, descriptor); }
Determine whether the specified bean definition qualifies as an autowire candidate, to be injected into other beans which declare a dependency of matching type. @param beanName the name of the bean definition to check @param mbd the merged bean definition to check @param descriptor the descriptor of the dependency to resolve @param resolver the AutowireCandidateResolver to use for the actual resolution algorithm @return whether the bean should be considered as autowire candidate
java
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
949
[ "beanName", "mbd", "descriptor", "resolver" ]
true
4
7.6
spring-projects/spring-framework
59,386
javadoc
false
getAliases
async function getAliases(options: ExecOptionsWithStringEncoding, existingCommands?: Set<string>): Promise<ICompletionResource[]> { const output = await execHelper('Get-Command -CommandType Alias | Select-Object Name, CommandType, Definition, DisplayName, ModuleName, @{Name="Version";Expression={$_.Version.ToString()}} | ConvertTo-Json', { ...options, maxBuffer: 1024 * 1024 * 100 // This is a lot of content, increase buffer size }); let json: any; try { json = JSON.parse(output); } catch (e) { console.error('Error parsing output:', e); return []; } if (!Array.isArray(json)) { return []; } return (json as unknown[]) .filter(isPwshGetCommandEntry) .map(e => { // Aliases sometimes use the same Name and DisplayName, show them as methods in this case. const isAlias = e.Name !== e.DisplayName; const detailParts: string[] = []; if (e.Definition) { detailParts.push(e.Definition); } if (e.ModuleName && e.Version) { detailParts.push(`${e.ModuleName} v${e.Version}`); } let definitionCommand = undefined; if (e.Definition) { let definitionIndex = e.Definition.indexOf(' '); if (definitionIndex === -1) { definitionIndex = e.Definition.length; definitionCommand = e.Definition.substring(0, definitionIndex); } } return { label: e.Name, detail: detailParts.join('\n\n'), kind: (isAlias ? vscode.TerminalCompletionItemKind.Alias : vscode.TerminalCompletionItemKind.Method), definitionCommand, }; }); }
The numeric values associated with CommandType from Get-Command. It appears that this is a bitfield based on the values but I think it's actually used as an enum where a CommandType can only be a single one of these. Source: ``` [enum]::GetValues([System.Management.Automation.CommandTypes]) | ForEach-Object { [pscustomobject]@{ Name = $_ Value = [int]$_ } } ```
typescript
extensions/terminal-suggest/src/shell/pwsh.ts
57
[ "options", "existingCommands?" ]
true
9
7.12
microsoft/vscode
179,840
jsdoc
true
matchesActiveProfiles
private boolean matchesActiveProfiles(Predicate<String> activeProfiles) { Assert.state(this.onProfile != null, "'this.onProfile' must not be null"); return org.springframework.core.env.Profiles.of(this.onProfile).matches(activeProfiles); }
Return {@code true} if the properties indicate that the config data property source is active for the given activation context. @param activationContext the activation context @return {@code true} if the config data property source is active
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataProperties.java
141
[ "activeProfiles" ]
true
1
6.32
spring-projects/spring-boot
79,428
javadoc
false
getVirtualData
private CloseableDataBlock getVirtualData() throws IOException { CloseableDataBlock virtualData = (this.virtualData != null) ? this.virtualData.get() : null; if (virtualData != null) { return virtualData; } virtualData = createVirtualData(); this.virtualData = new SoftReference<>(virtualData); return virtualData; }
Open a {@link DataBlock} containing the raw zip data. For container zip files, this may be smaller than the original file since additional bytes are permitted at the front of a zip file. For nested zip files, this will be only the contents of the nest zip. <p> For nested directory zip files, a virtual data block will be created containing only the relevant content. <p> To release resources, the {@link #close()} method of the data block should be called explicitly or by try-with-resources. <p> The returned data block should not be accessed once {@link #close()} has been called. @return the zip data @throws IOException on I/O error
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
146
[]
CloseableDataBlock
true
3
8.08
spring-projects/spring-boot
79,428
javadoc
false
configure
public <T extends ThreadPoolTaskExecutor> T configure(T taskExecutor) { PropertyMapper map = PropertyMapper.get(); map.from(this.queueCapacity).to(taskExecutor::setQueueCapacity); map.from(this.corePoolSize).to(taskExecutor::setCorePoolSize); map.from(this.maxPoolSize).to(taskExecutor::setMaxPoolSize); map.from(this.keepAlive).asInt(Duration::getSeconds).to(taskExecutor::setKeepAliveSeconds); map.from(this.allowCoreThreadTimeOut).to(taskExecutor::setAllowCoreThreadTimeOut); map.from(this.acceptTasksAfterContextClose).to(taskExecutor::setAcceptTasksAfterContextClose); map.from(this.awaitTermination).to(taskExecutor::setWaitForTasksToCompleteOnShutdown); map.from(this.awaitTerminationPeriod).as(Duration::toMillis).to(taskExecutor::setAwaitTerminationMillis); map.from(this.threadNamePrefix).whenHasText().to(taskExecutor::setThreadNamePrefix); map.from(this.taskDecorator).to(taskExecutor::setTaskDecorator); if (!CollectionUtils.isEmpty(this.customizers)) { this.customizers.forEach((customizer) -> customizer.customize(taskExecutor)); } return taskExecutor; }
Configure the provided {@link ThreadPoolTaskExecutor} instance using this builder. @param <T> the type of task executor @param taskExecutor the {@link ThreadPoolTaskExecutor} to configure @return the task executor instance @see #build() @see #build(Class)
java
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskExecutorBuilder.java
326
[ "taskExecutor" ]
T
true
2
7.28
spring-projects/spring-boot
79,428
javadoc
false
strings_with_wrong_placed_whitespace
def strings_with_wrong_placed_whitespace( file_obj: IO[str], ) -> Iterable[tuple[int, str]]: """ Test case for leading spaces in concated strings. For example: >>> rule = "We want the space at the end of the line, not at the beginning" Instead of: >>> rule = "We want the space at the end of the line, not at the beginning" Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of unconcatenated string. msg : str Explanation of the error. """ def has_wrong_whitespace(first_line: str, second_line: str) -> bool: """ Checking if the two lines are mattching the unwanted pattern. Parameters ---------- first_line : str First line to check. second_line : str Second line to check. Returns ------- bool True if the two received string match, an unwanted pattern. Notes ----- The unwanted pattern that we are trying to catch is if the spaces in a string that is concatenated over multiple lines are placed at the end of each string, unless this string is ending with a newline character (\n). For example, this is bad: >>> rule = "We want the space at the end of the line, not at the beginning" And what we want is: >>> rule = "We want the space at the end of the line, not at the beginning" And if the string is ending with a new line character (\n) we do not want any trailing whitespaces after it. For example, this is bad: >>> rule = ( ... "We want the space at the begging of " ... "the line if the previous line is ending with a \n " ... "not at the end, like always" ... ) And what we do want is: >>> rule = ( ... "We want the space at the begging of " ... "the line if the previous line is ending with a \n" ... " not at the end, like always" ... ) """ if first_line.endswith(r"\n"): return False elif first_line.startswith(" ") or second_line.startswith(" "): return False elif first_line.endswith(" ") or second_line.endswith(" "): return False elif (not first_line.endswith(" ")) and second_line.startswith(" "): return True return False tokens: list = list(tokenize.generate_tokens(file_obj.readline)) for first_token, second_token, third_token in zip( tokens, tokens[1:], tokens[2:], strict=False ): # Checking if we are in a block of concated string if ( first_token.type == third_token.type == token.STRING and second_token.type == token.NL ): # Striping the quotes, with the string literal prefix first_string: str = first_token.string[ _get_literal_string_prefix_len(first_token.string) + 1 : -1 ] second_string: str = third_token.string[ _get_literal_string_prefix_len(third_token.string) + 1 : -1 ] if has_wrong_whitespace(first_string, second_string): yield ( third_token.start[0], ( "String has a space at the beginning instead " "of the end of the previous string." ), )
Test case for leading spaces in concated strings. For example: >>> rule = "We want the space at the end of the line, not at the beginning" Instead of: >>> rule = "We want the space at the end of the line, not at the beginning" Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of unconcatenated string. msg : str Explanation of the error.
python
scripts/validate_unwanted_patterns.py
180
[ "file_obj" ]
Iterable[tuple[int, str]]
true
12
8.56
pandas-dev/pandas
47,362
numpy
false
round
public static Date round(final Object date, final int field) { Objects.requireNonNull(date, "date"); if (date instanceof Date) { return round((Date) date, field); } if (date instanceof Calendar) { return round((Calendar) date, field).getTime(); } throw new ClassCastException("Could not round " + date); }
Rounds a date, leaving the field specified as the most significant field. <p>For example, if you had the date-time of 28 Mar 2002 13:45:01.231, if this was passed with HOUR, it would return 28 Mar 2002 14:00:00.000. If this was passed with MONTH, it would return 1 April 2002 0:00:00.000.</p> <p>For a date in a time zone that handles the change to daylight saving time, rounding to Calendar.HOUR_OF_DAY will behave as follows. Suppose daylight saving time begins at 02:00 on March 30. Rounding a date that crosses this time would produce the following values: </p> <ul> <li>March 30, 2003 01:10 rounds to March 30, 2003 01:00</li> <li>March 30, 2003 01:40 rounds to March 30, 2003 03:00</li> <li>March 30, 2003 02:10 rounds to March 30, 2003 03:00</li> <li>March 30, 2003 02:40 rounds to March 30, 2003 04:00</li> </ul> @param date the date to work with, either {@link Date} or {@link Calendar}, not null. @param field the field from {@link Calendar} or {@code SEMI_MONTH}. @return the different rounded date, not null. @throws NullPointerException if the date is {@code null}. @throws ClassCastException if the object type is not a {@link Date} or {@link Calendar}. @throws ArithmeticException if the year is over 280 million.
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
1,461
[ "date", "field" ]
Date
true
3
7.92
apache/commons-lang
2,896
javadoc
false
inner
def inner(a, b, /): """ inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError If both `a` and `b` are nonscalar and their last dimensions have different sizes. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. vecdot : Vector dot product of two arrays. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-2,j0,...,js-2] = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> import numpy as np >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> c = np.inner(a, b) >>> c.shape (2, 3) >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) >>> a = np.arange(2).reshape((1,1,2)) >>> b = np.arange(6).reshape((3,2)) >>> c = np.inner(a, b) >>> c.shape (1, 1, 3) >>> c array([[[1, 3, 5]]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]]) """ return (a, b)
inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError If both `a` and `b` are nonscalar and their last dimensions have different sizes. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. vecdot : Vector dot product of two arrays. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-2,j0,...,js-2] = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> import numpy as np >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> c = np.inner(a, b) >>> c.shape (2, 3) >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) >>> a = np.arange(2).reshape((1,1,2)) >>> b = np.arange(6).reshape((3,2)) >>> c = np.inner(a, b) >>> c.shape (1, 1, 3) >>> c array([[[1, 3, 5]]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]])
python
numpy/_core/multiarray.py
310
[ "a", "b" ]
false
1
6.4
numpy/numpy
31,054
numpy
false
check_tuning_config
def check_tuning_config(self, tuning_config: dict) -> None: """ Check if a tuning configuration is valid. :param tuning_config: tuning_config """ for channel in tuning_config["TrainingJobDefinition"]["InputDataConfig"]: if "S3DataSource" in channel["DataSource"]: self.check_s3_url(channel["DataSource"]["S3DataSource"]["S3Uri"])
Check if a tuning configuration is valid. :param tuning_config: tuning_config
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
242
[ "self", "tuning_config" ]
None
true
3
6.4
apache/airflow
43,597
sphinx
false
createIncludeFixerReplacements
llvm::Expected<tooling::Replacements> createIncludeFixerReplacements( StringRef Code, const IncludeFixerContext &Context, const clang::format::FormatStyle &Style, bool AddQualifiers) { if (Context.getHeaderInfos().empty()) return tooling::Replacements(); StringRef FilePath = Context.getFilePath(); std::string IncludeName = "#include " + Context.getHeaderInfos().front().Header + "\n"; // Create replacements for the new header. clang::tooling::Replacements Insertions; auto Err = Insertions.add(tooling::Replacement(FilePath, UINT_MAX, 0, IncludeName)); if (Err) return std::move(Err); auto CleanReplaces = cleanupAroundReplacements(Code, Insertions, Style); if (!CleanReplaces) return CleanReplaces; auto Replaces = std::move(*CleanReplaces); if (AddQualifiers) { for (const auto &Info : Context.getQuerySymbolInfos()) { // Ignore the empty range. if (Info.Range.getLength() > 0) { auto R = tooling::Replacement( {FilePath, Info.Range.getOffset(), Info.Range.getLength(), Context.getHeaderInfos().front().QualifiedName}); auto Err = Replaces.add(R); if (Err) { llvm::consumeError(std::move(Err)); R = tooling::Replacement( R.getFilePath(), Replaces.getShiftedCodePosition(R.getOffset()), R.getLength(), R.getReplacementText()); Replaces = Replaces.merge(tooling::Replacements(R)); } } } } return formatReplacements(Code, Replaces, Style); }
Get the include fixer context for the queried symbol.
cpp
clang-tools-extra/clang-include-fixer/IncludeFixer.cpp
405
[ "Code", "AddQualifiers" ]
true
7
6
llvm/llvm-project
36,021
doxygen
false
handleAcknowledgeShareSessionNotFound
void handleAcknowledgeShareSessionNotFound() { Map<TopicIdPartition, Acknowledgements> acknowledgementsMapToClear = incompleteAcknowledgements.isEmpty() ? acknowledgementsToSend : incompleteAcknowledgements; acknowledgementsMapToClear.forEach((tip, acks) -> { if (acks != null) { acks.complete(Errors.SHARE_SESSION_NOT_FOUND.exception()); } // We do not know whether this is a renew ack, but handling the error as if it were, will ensure // that we do not leave dangling acknowledgements resultHandler.complete(tip, acks, requestType, true, Optional.empty()); }); acknowledgementsMapToClear.clear(); processingComplete(); }
Set the error code for all remaining acknowledgements in the event of a share session not found error which prevents the remaining acknowledgements from being sent.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
1,350
[]
void
true
3
7.04
apache/kafka
31,560
javadoc
false
toString
@Override public String toString() { if (this.parentName != null) { return "Generic bean with parent '" + this.parentName + "': " + super.toString(); } return "Generic bean: " + super.toString(); }
Create a new GenericBeanDefinition as deep copy of the given bean definition. @param original the original bean definition to copy from
java
spring-beans/src/main/java/org/springframework/beans/factory/support/GenericBeanDefinition.java
94
[]
String
true
2
6.4
spring-projects/spring-framework
59,386
javadoc
false
addToMap
void addToMap() { if (ancestor != null) { ancestor.addToMap(); } else { map.put(key, delegate); } }
Add the delegate to the map. Other {@code WrappedCollection} methods should call this method after adding elements to a previously empty collection. <p>Subcollection add the ancestor's delegate instead.
java
android/guava/src/com/google/common/collect/AbstractMapBasedMultimap.java
388
[]
void
true
2
6.24
google/guava
51,352
javadoc
false
toDouble
public Double toDouble() { return Double.valueOf(doubleValue()); }
Gets this mutable as an instance of Double. @return a Double instance containing the value from this mutable, never null.
java
src/main/java/org/apache/commons/lang3/mutable/MutableDouble.java
399
[]
Double
true
1
6.8
apache/commons-lang
2,896
javadoc
false
checkOpen
private void checkOpen() throws IOException { if (seq == null) { throw new IOException("reader closed"); } }
Creates a new reader wrapping the given character sequence.
java
android/guava/src/com/google/common/io/CharSequenceReader.java
50
[]
void
true
2
6.88
google/guava
51,352
javadoc
false
extractCause
public static ConcurrentException extractCause(final ExecutionException ex) { if (ex == null || ex.getCause() == null) { return null; } ExceptionUtils.throwUnchecked(ex.getCause()); return new ConcurrentException(ex.getMessage(), ex.getCause()); }
Inspects the cause of the specified {@link ExecutionException} and creates a {@link ConcurrentException} with the checked cause if necessary. This method performs the following checks on the cause of the passed in exception: <ul> <li>If the passed in exception is <strong>null</strong> or the cause is <strong>null</strong>, this method returns <strong>null</strong>.</li> <li>If the cause is a runtime exception, it is directly thrown.</li> <li>If the cause is an error, it is directly thrown, too.</li> <li>In any other case the cause is a checked exception. The method then creates a {@link ConcurrentException}, initializes it with the cause, and returns it.</li> </ul> @param ex the exception to be processed @return a {@link ConcurrentException} with the checked cause
java
src/main/java/org/apache/commons/lang3/concurrent/ConcurrentUtils.java
206
[ "ex" ]
ConcurrentException
true
3
7.6
apache/commons-lang
2,896
javadoc
false
skew
def skew(self, numeric_only: bool = False): """ Calculate the expanding unbiased skewness. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. Returns ------- Series or DataFrame Return type is the same as the original object with ``np.float64`` dtype. See Also -------- scipy.stats.skew : Third moment of a probability density. Series.expanding : Calling expanding with Series data. DataFrame.expanding : Calling expanding with DataFrames. Series.skew : Aggregating skew for Series. DataFrame.skew : Aggregating skew for DataFrame. Notes ----- A minimum of three periods is required for the rolling calculation. Examples -------- >>> ser = pd.Series([-1, 0, 2, -1, 2], index=["a", "b", "c", "d", "e"]) >>> ser.expanding().skew() a NaN b NaN c 0.935220 d 1.414214 e 0.315356 dtype: float64 """ return super().skew(numeric_only=numeric_only)
Calculate the expanding unbiased skewness. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. Returns ------- Series or DataFrame Return type is the same as the original object with ``np.float64`` dtype. See Also -------- scipy.stats.skew : Third moment of a probability density. Series.expanding : Calling expanding with Series data. DataFrame.expanding : Calling expanding with DataFrames. Series.skew : Aggregating skew for Series. DataFrame.skew : Aggregating skew for DataFrame. Notes ----- A minimum of three periods is required for the rolling calculation. Examples -------- >>> ser = pd.Series([-1, 0, 2, -1, 2], index=["a", "b", "c", "d", "e"]) >>> ser.expanding().skew() a NaN b NaN c 0.935220 d 1.414214 e 0.315356 dtype: float64
python
pandas/core/window/expanding.py
910
[ "self", "numeric_only" ]
true
1
7.12
pandas-dev/pandas
47,362
numpy
false
stack
def stack(tensors: Any, new_dim: Any, dim: int = 0) -> _Tensor: """ Stack tensors along a new dimension. Args: tensors: Sequence of tensors to stack new_dim: The new Dim to create for stacking dim: The dimension position to insert the new dimension (default: 0) Returns: Stacked tensor with the new dimension """ if not tensors: raise ValueError("stack expects a non-empty sequence of tensors") # Check if new_dim is a Dim object if not isinstance(new_dim, Dim): # Fall back to regular torch.stack result = torch.stack(tensors, dim=dim) return result # type: ignore[return-value] # Collect all result_levels from input tensors result_levels = [] infos = [] for t in tensors: info = TensorInfo.create(t, ensure_batched=False, ensure_present=False) infos.append(info) for level in info.levels: if level not in result_levels: result_levels.append(level) # Set the new_dim size to match number of tensors new_dim.size = len(tensors) # Match all tensors to the common level structure using _match_levels inputs = [] for info in infos: assert info.tensor is not None, "Cannot stack tensors with None tensor data" matched_tensor = _match_levels(info.tensor, info.levels, result_levels) inputs.append(matched_tensor) # Calculate ndim and resolve the dim parameter ndim = ndim_of_levels(result_levels) rawdim = 0 if dim is not None and not (isinstance(dim, int) and dim == 0): from ._wrap import _wrap_dim d = _wrap_dim(dim, ndim, False) try: idx = result_levels.index(d) except ValueError: raise TypeError(f"Dimension {dim} does not exist in inputs") from None rawdim = idx # Stack tensors at the resolved dimension result = torch.stack(inputs, rawdim) # Insert new dimension entry at the correct position result_levels.insert(rawdim, DimEntry(new_dim)) # Return as a first-class tensor tensor_result = Tensor.from_positional( result, result_levels, infos[0].has_device if infos else True ) return tensor_result # type: ignore[return-value]
Stack tensors along a new dimension. Args: tensors: Sequence of tensors to stack new_dim: The new Dim to create for stacking dim: The dimension position to insert the new dimension (default: 0) Returns: Stacked tensor with the new dimension
python
functorch/dim/__init__.py
1,136
[ "tensors", "new_dim", "dim" ]
_Tensor
true
11
7.84
pytorch/pytorch
96,034
google
false
bakeUrlData
function bakeUrlData(type, e = 0, withBase = false, asUrl = false) { let result = []; if (type === 'wpt') { result = getUrlData(withBase); } else if (urls[type]) { const input = urls[type]; const item = withBase ? [input, 'about:blank'] : input; // Roughly the size of WPT URL test data result = new Array(200).fill(item); } else { throw new Error(`Unknown url data type ${type}`); } if (typeof e !== 'number') { throw new Error(`e must be a number, received ${e}`); } for (let i = 0; i < e; ++i) { result = result.concat(result); } if (asUrl) { if (withBase) { result = result.map(([input, base]) => new URL(input, base)); } else { result = result.map((input) => new URL(input)); } } return result; }
Generate an array of data for URL benchmarks to use. The size of the resulting data set is the original data size * 2 ** `e`. The 'wpt' type contains about 400 data points when `withBase` is true, and 200 data points when `withBase` is false. Other types contain 200 data points with or without base. @param {string} type Type of the data, 'wpt' or a key of `urls` @param {number} e The repetition of the data, as exponent of 2 @param {boolean} withBase Whether to include a base URL @param {boolean} asUrl Whether to return the results as URL objects @returns {string[] | string[][] | URL[]}
javascript
benchmark/common.js
401
[ "type" ]
false
11
6.24
nodejs/node
114,839
jsdoc
false
get_previous_scheduled_dagrun
def get_previous_scheduled_dagrun( dag_run_id: int, session: Session = NEW_SESSION, ) -> DagRun | None: """ Return the previous SCHEDULED DagRun, if there is one. :param dag_run_id: the DAG run ID :param session: SQLAlchemy ORM Session """ dag_run = session.get(DagRun, dag_run_id) if not dag_run or not dag_run.logical_date: return None return session.scalar( select(DagRun) .where( DagRun.dag_id == dag_run.dag_id, DagRun.logical_date < dag_run.logical_date, DagRun.run_type != DagRunType.MANUAL, ) .order_by(DagRun.logical_date.desc()) .limit(1) )
Return the previous SCHEDULED DagRun, if there is one. :param dag_run_id: the DAG run ID :param session: SQLAlchemy ORM Session
python
airflow-core/src/airflow/models/dagrun.py
967
[ "dag_run_id", "session" ]
DagRun | None
true
3
6.72
apache/airflow
43,597
sphinx
false
callWithTimeout
@CanIgnoreReturnValue @ParametricNullness <T extends @Nullable Object> T callWithTimeout( Callable<T> callable, long timeoutDuration, TimeUnit timeoutUnit) throws TimeoutException, InterruptedException, ExecutionException;
Invokes a specified Callable, timing out after the specified time limit. If the target method call finishes before the limit is reached, the return value or a wrapped exception is propagated. If, on the other hand, the time limit is reached, we attempt to abort the call to the target, and throw a {@link TimeoutException} to the caller. @param callable the Callable to execute @param timeoutDuration with timeoutUnit, the maximum length of time to wait @param timeoutUnit with timeoutDuration, the maximum length of time to wait @return the result returned by the Callable @throws TimeoutException if the time limit is reached @throws InterruptedException if the current thread was interrupted during execution @throws ExecutionException if {@code callable} throws a checked exception @throws UncheckedExecutionException if {@code callable} throws a {@code RuntimeException} @throws ExecutionError if {@code callable} throws an {@code Error} @since 22.0
java
android/guava/src/com/google/common/util/concurrent/TimeLimiter.java
100
[ "callable", "timeoutDuration", "timeoutUnit" ]
T
true
1
6.56
google/guava
51,352
javadoc
false
parseListElement
function parseListElement<T extends Node | undefined>(parsingContext: ParsingContext, parseElement: () => T): T { const node = currentNode(parsingContext); if (node) { return consumeNode(node) as T; } return parseElement(); }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
3,116
[ "parsingContext", "parseElement" ]
true
2
6.72
microsoft/TypeScript
107,154
jsdoc
false
maybeReconcile
private void maybeReconcile() { if (targetAssignmentReconciled()) { log.trace("Ignoring reconciliation attempt. Target assignment is equal to the " + "current assignment."); return; } if (reconciliationInProgress) { log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. Assignment {}" + " will be handled in the next reconciliation loop.", targetAssignment); return; } markReconciliationInProgress(); SortedSet<StreamsRebalanceData.TaskId> assignedActiveTasks = toTaskIdSet(targetAssignment.activeTasks); SortedSet<StreamsRebalanceData.TaskId> ownedActiveTasks = toTaskIdSet(currentAssignment.activeTasks); SortedSet<StreamsRebalanceData.TaskId> activeTasksToRevoke = new TreeSet<>(ownedActiveTasks); activeTasksToRevoke.removeAll(assignedActiveTasks); SortedSet<StreamsRebalanceData.TaskId> assignedStandbyTasks = toTaskIdSet(targetAssignment.standbyTasks); SortedSet<StreamsRebalanceData.TaskId> ownedStandbyTasks = toTaskIdSet(currentAssignment.standbyTasks); SortedSet<StreamsRebalanceData.TaskId> assignedWarmupTasks = toTaskIdSet(targetAssignment.warmupTasks); SortedSet<StreamsRebalanceData.TaskId> ownedWarmupTasks = toTaskIdSet(currentAssignment.warmupTasks); boolean isGroupReady = targetAssignment.isGroupReady; log.info("Assigned tasks with local epoch {} and group {}\n" + "\tMember: {}\n" + "\tAssigned active tasks: {}\n" + "\tOwned active tasks: {}\n" + "\tActive tasks to revoke: {}\n" + "\tAssigned standby tasks: {}\n" + "\tOwned standby tasks: {}\n" + "\tAssigned warm-up tasks: {}\n" + "\tOwned warm-up tasks: {}\n", targetAssignment.localEpoch, isGroupReady ? "is ready" : "is not ready", memberId, assignedActiveTasks, ownedActiveTasks, activeTasksToRevoke, assignedStandbyTasks, ownedStandbyTasks, assignedWarmupTasks, ownedWarmupTasks ); SortedSet<TopicPartition> ownedTopicPartitionsFromSubscriptionState = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); ownedTopicPartitionsFromSubscriptionState.addAll(subscriptionState.assignedPartitions()); SortedSet<TopicPartition> ownedTopicPartitionsFromAssignedTasks = topicPartitionsForActiveTasks(currentAssignment.activeTasks); if (!ownedTopicPartitionsFromAssignedTasks.equals(ownedTopicPartitionsFromSubscriptionState)) { throw new IllegalStateException("Owned partitions from subscription state and owned partitions from " + "assigned active tasks are not equal. " + "Owned partitions from subscription state: " + ownedTopicPartitionsFromSubscriptionState + ", " + "Owned partitions from assigned active tasks: " + ownedTopicPartitionsFromAssignedTasks); } SortedSet<TopicPartition> assignedTopicPartitions = topicPartitionsForActiveTasks(targetAssignment.activeTasks); SortedSet<TopicPartition> partitionsToRevoke = new TreeSet<>(ownedTopicPartitionsFromSubscriptionState); partitionsToRevoke.removeAll(assignedTopicPartitions); final CompletableFuture<Void> tasksRevoked = revokeActiveTasks(activeTasksToRevoke); final CompletableFuture<Void> tasksRevokedAndAssigned = tasksRevoked.thenCompose(__ -> { if (!maybeAbortReconciliation()) { return assignTasks(assignedActiveTasks, ownedActiveTasks, assignedStandbyTasks, assignedWarmupTasks, isGroupReady); } return CompletableFuture.completedFuture(null); }); // The current target assignment is captured to ensure that acknowledging the current assignment is done with // the same target assignment that was used when this reconciliation was initiated. LocalAssignment currentTargetAssignment = targetAssignment; tasksRevokedAndAssigned.whenComplete((__, callbackError) -> { if (callbackError != null) { log.error("Reconciliation failed: callback invocation failed for tasks {}", currentTargetAssignment, callbackError); markReconciliationCompleted(); } else { if (reconciliationInProgress && !maybeAbortReconciliation()) { currentAssignment = currentTargetAssignment; transitionTo(MemberState.ACKNOWLEDGING); markReconciliationCompleted(); } } }); }
Reconcile the assignment that has been received from the server. Reconciliation will trigger the callbacks and update the subscription state. There are two conditions under which no reconciliation will be triggered: - We have already reconciled the assignment (the target assignment is the same as the current assignment). - Another reconciliation is already in progress.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
1,040
[]
void
true
9
6.96
apache/kafka
31,560
javadoc
false
createVirtualData
private CloseableDataBlock createVirtualData() throws IOException { int size = size(); NameOffsetLookups nameOffsetLookups = this.nameOffsetLookups.emptyCopy(); ZipCentralDirectoryFileHeaderRecord[] centralRecords = new ZipCentralDirectoryFileHeaderRecord[size]; long[] centralRecordPositions = new long[size]; for (int i = 0; i < size; i++) { int lookupIndex = ZipContent.this.lookupIndexes[i]; long pos = getCentralDirectoryFileHeaderRecordPos(lookupIndex); nameOffsetLookups.enable(i, this.nameOffsetLookups.isEnabled(lookupIndex)); centralRecords[i] = ZipCentralDirectoryFileHeaderRecord.load(this.data, pos); centralRecordPositions[i] = pos; } return new VirtualZipDataBlock(this.data, nameOffsetLookups, centralRecords, centralRecordPositions); }
Open a {@link DataBlock} containing the raw zip data. For container zip files, this may be smaller than the original file since additional bytes are permitted at the front of a zip file. For nested zip files, this will be only the contents of the nest zip. <p> For nested directory zip files, a virtual data block will be created containing only the relevant content. <p> To release resources, the {@link #close()} method of the data block should be called explicitly or by try-with-resources. <p> The returned data block should not be accessed once {@link #close()} has been called. @return the zip data @throws IOException on I/O error
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
156
[]
CloseableDataBlock
true
2
8.08
spring-projects/spring-boot
79,428
javadoc
false
toPrimitive
public static Object toPrimitive(final Object array) { if (array == null) { return null; } final Class<?> ct = array.getClass().getComponentType(); final Class<?> pt = ClassUtils.wrapperToPrimitive(ct); if (Boolean.TYPE.equals(pt)) { return toPrimitive((Boolean[]) array); } if (Character.TYPE.equals(pt)) { return toPrimitive((Character[]) array); } if (Byte.TYPE.equals(pt)) { return toPrimitive((Byte[]) array); } if (Integer.TYPE.equals(pt)) { return toPrimitive((Integer[]) array); } if (Long.TYPE.equals(pt)) { return toPrimitive((Long[]) array); } if (Short.TYPE.equals(pt)) { return toPrimitive((Short[]) array); } if (Double.TYPE.equals(pt)) { return toPrimitive((Double[]) array); } if (Float.TYPE.equals(pt)) { return toPrimitive((Float[]) array); } return array; }
Create an array of primitive type from an array of wrapper types. <p> This method returns {@code null} for a {@code null} input array. </p> @param array an array of wrapper object. @return an array of the corresponding primitive type, or the original array. @since 3.5
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
9,153
[ "array" ]
Object
true
10
8.24
apache/commons-lang
2,896
javadoc
false
ofEmptyLocation
static ConfigDataEnvironmentContributor ofEmptyLocation(ConfigDataLocation location, boolean profileSpecific, ConversionService conversionService) { return new ConfigDataEnvironmentContributor(Kind.EMPTY_LOCATION, location, null, profileSpecific, null, null, null, EMPTY_LOCATION_OPTIONS, null, conversionService); }
Factory method to create an {@link Kind#EMPTY_LOCATION empty location} contributor. @param location the location of this contributor @param profileSpecific if the contributor is from a profile specific import @param conversionService the conversion service to use @return a new {@link ConfigDataEnvironmentContributor} instance
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
470
[ "location", "profileSpecific", "conversionService" ]
ConfigDataEnvironmentContributor
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
processBytes
@CanIgnoreReturnValue // some uses know that their processor never returns false boolean processBytes(byte[] buf, int off, int len) throws IOException;
This method will be called for each chunk of bytes in an input stream. The implementation should process the bytes from {@code buf[off]} through {@code buf[off + len - 1]} (inclusive). @param buf the byte array containing the data to process @param off the initial offset into the array @param len the length of data to be processed @return true to continue processing, false to stop
java
android/guava/src/com/google/common/io/ByteProcessor.java
46
[ "buf", "off", "len" ]
true
1
6.8
google/guava
51,352
javadoc
false
functionsIn
function functionsIn(object) { return object == null ? [] : baseFunctions(object, keysIn(object)); }
Creates an array of function property names from own and inherited enumerable properties of `object`. @static @memberOf _ @since 4.0.0 @category Object @param {Object} object The object to inspect. @returns {Array} Returns the function names. @see _.functions @example function Foo() { this.a = _.constant('a'); this.b = _.constant('b'); } Foo.prototype.c = _.constant('c'); _.functionsIn(new Foo); // => ['a', 'b', 'c']
javascript
lodash.js
13,204
[ "object" ]
false
2
7.12
lodash/lodash
61,490
jsdoc
false
toString
@Override public String toString() { String remainingMs; if (timer != null) { timer.update(); remainingMs = String.valueOf(timer.remainingMs()); } else { remainingMs = "<not set>"; } return "UnsentRequest{" + "requestBuilder=" + requestBuilder + ", handler=" + handler + ", node=" + node + ", remainingMs=" + remainingMs + '}'; }
Return the time when the request was enqueued to {@link NetworkClientDelegate#unsentRequests}.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegate.java
398
[]
String
true
2
6.08
apache/kafka
31,560
javadoc
false
print
public String print(Duration value) { return longValue(value) + asSuffix(); }
Print the given {@link Duration} as a {@link String}, converting it to a long value using this unit's precision via {@link #longValue(Duration)} and appending this unit's simple {@link #asSuffix() suffix}. @param value the {@code Duration} to convert to a {@code String} @return the {@code String} representation of the {@code Duration} in the {@link Style#SIMPLE SIMPLE} style
java
spring-context/src/main/java/org/springframework/format/annotation/DurationFormat.java
185
[ "value" ]
String
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
betweenOrdered
private boolean betweenOrdered(final A b, final A c) { return greaterThanOrEqualTo(b) && lessThanOrEqualTo(c); }
Tests if {@code (b < a < c)} or {@code (b > a > c)} where the {@code a} is object passed to {@link #is}. @param b the object to compare to the base object @param c the object to compare to the base object @return true if the base object is between b and c and not equal to those
java
src/main/java/org/apache/commons/lang3/compare/ComparableUtils.java
69
[ "b", "c" ]
true
2
8.16
apache/commons-lang
2,896
javadoc
false
tolist
def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to `fill_value`. If `fill_value` is None, the corresponding entries in the output list will be ``None``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] """ _mask = self._mask # No mask ? Just return .data.tolist ? if _mask is nomask: return self._data.tolist() # Explicit fill_value: fill the array and get the list if fill_value is not None: return self.filled(fill_value).tolist() # Structured array. names = self.dtype.names if names: result = self._data.astype([(_, object) for _ in names]) for n in names: result[n][_mask[n]] = None return result.tolist() # Standard arrays. if _mask is nomask: return [None] # Set temps to save time when dealing w/ marrays. inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None result.shape = inishape return result.tolist()
Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to `fill_value`. If `fill_value` is None, the corresponding entries in the output list will be ``None``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]]
python
numpy/ma/core.py
6,299
[ "self", "fill_value" ]
false
6
7.84
numpy/numpy
31,054
numpy
false
flushQuietly
@Beta public static void flushQuietly(Flushable flushable) { try { flush(flushable, true); } catch (IOException e) { logger.log(Level.SEVERE, "IOException should not have been thrown.", e); } }
Equivalent to calling {@code flush(flushable, true)}, but with no {@code IOException} in the signature. @param flushable the {@code Flushable} object to be flushed.
java
android/guava/src/com/google/common/io/Flushables.java
70
[ "flushable" ]
void
true
2
6.72
google/guava
51,352
javadoc
false
addNeighbors
public static final <E extends Collection<? super String>> E addNeighbors(String geohash, E neighbors) { return addNeighborsAtLevel(geohash, geohash.length(), neighbors); }
Add all geohashes of the cells next to a given geohash to a list. @param geohash Geohash of a specified cell @param neighbors list to add the neighbors to @return the given list
java
libs/geo/src/main/java/org/elasticsearch/geometry/utils/Geohash.java
160
[ "geohash", "neighbors" ]
E
true
1
6.64
elastic/elasticsearch
75,680
javadoc
false
invokeOnPartitionsLostCallback
private CompletableFuture<Void> invokeOnPartitionsLostCallback(Set<TopicPartition> partitionsLost) { // This should not trigger the callback if partitionsLost is empty, to keep the current // behaviour. Optional<ConsumerRebalanceListener> listener = subscriptions.rebalanceListener(); if (!partitionsLost.isEmpty() && listener.isPresent()) { return enqueueConsumerRebalanceListenerCallback(ON_PARTITIONS_LOST, partitionsLost); } else { return CompletableFuture.completedFuture(null); } }
@return Server-side assignor implementation configured for the member, that will be sent out to the server to be used. If empty, then the server will select the assignor.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java
374
[ "partitionsLost" ]
true
3
7.04
apache/kafka
31,560
javadoc
false
_forward_logs
async def _forward_logs(self, logs_client, next_token: str | None = None) -> str | None: """ Read logs from the cloudwatch stream and print them to the task logs. :return: the token to pass to the next iteration to resume where we started. """ while True: if next_token is not None: token_arg: dict[str, str] = {"nextToken": next_token} else: token_arg = {} try: response = await logs_client.get_log_events( logGroupName=self.log_group, logStreamName=self.log_stream, startFromHead=True, **token_arg, ) except ClientError as ce: if ce.response["Error"]["Code"] == "ResourceNotFoundException": self.log.info( "Tried to get logs from stream %s in group %s but it didn't exist (yet). " "Will try again.", self.log_stream, self.log_group, ) return None raise events = response["events"] for log_event in events: self.log.info(AwsTaskLogFetcher.event_to_str(log_event)) if len(events) == 0 or next_token == response["nextForwardToken"]: return response["nextForwardToken"] next_token = response["nextForwardToken"]
Read logs from the cloudwatch stream and print them to the task logs. :return: the token to pass to the next iteration to resume where we started.
python
providers/amazon/src/airflow/providers/amazon/aws/triggers/ecs.py
200
[ "self", "logs_client", "next_token" ]
str | None
true
8
6.88
apache/airflow
43,597
unknown
false
channel
public FileChannel channel() { return channel; }
Get the underlying file channel. @return The file channel
java
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
124
[]
FileChannel
true
1
6.96
apache/kafka
31,560
javadoc
false
_valid_locales
def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]: """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """ return [ loc for loc in ( locale.normalize(loc.strip()) if normalize else loc.strip() for loc in locales ) if can_set_locale(loc) ]
Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales.
python
pandas/_config/localization.py
88
[ "locales", "normalize" ]
list[str]
true
2
6.88
pandas-dev/pandas
47,362
numpy
false
assignRackAwareRoundRobin
private void assignRackAwareRoundRobin(List<TopicPartition> unassignedPartitions) { if (rackInfo.consumerRacks.isEmpty()) return; int nextUnfilledConsumerIndex = 0; Iterator<TopicPartition> unassignedIter = unassignedPartitions.iterator(); while (unassignedIter.hasNext()) { TopicPartition unassignedPartition = unassignedIter.next(); String consumer = null; int nextIndex = rackInfo.nextRackConsumer(unassignedPartition, unfilledMembersWithUnderMinQuotaPartitions, nextUnfilledConsumerIndex); if (nextIndex >= 0) { consumer = unfilledMembersWithUnderMinQuotaPartitions.get(nextIndex); int assignmentCount = assignment.get(consumer).size() + 1; if (assignmentCount >= minQuota) { unfilledMembersWithUnderMinQuotaPartitions.remove(consumer); // Only add this consumer if the current num members at maxQuota is less than the expected number // since a consumer at minQuota can only be considered unfilled if it's possible to add another partition, // which would bump it to maxQuota and exceed the expectedNumMembersWithOverMinQuotaPartitions if (assignmentCount < maxQuota && (currentNumMembersWithOverMinQuotaPartitions < expectedNumMembersWithOverMinQuotaPartitions)) { unfilledMembersWithExactlyMinQuotaPartitions.add(consumer); } } else { nextIndex++; } nextUnfilledConsumerIndex = unfilledMembersWithUnderMinQuotaPartitions.isEmpty() ? 0 : nextIndex % unfilledMembersWithUnderMinQuotaPartitions.size(); } else if (!unfilledMembersWithExactlyMinQuotaPartitions.isEmpty()) { int firstIndex = rackInfo.nextRackConsumer(unassignedPartition, unfilledMembersWithExactlyMinQuotaPartitions, 0); if (firstIndex >= 0) { consumer = unfilledMembersWithExactlyMinQuotaPartitions.get(firstIndex); if (assignment.get(consumer).size() + 1 == maxQuota) { unfilledMembersWithExactlyMinQuotaPartitions.remove(firstIndex); currentNumMembersWithOverMinQuotaPartitions++; // Clear this once the current num consumers over minQuota reaches the expected number since this // means all consumers at minQuota are now considered filled if (currentNumMembersWithOverMinQuotaPartitions == expectedNumMembersWithOverMinQuotaPartitions) { unfilledMembersWithExactlyMinQuotaPartitions.clear(); } } } } if (consumer != null) { assignNewPartition(unassignedPartition, consumer); unassignedIter.remove(); } } }
Constructs a constrained assignment builder. @param partitionsPerTopic The partitions for each subscribed topic @param rackInfo Rack information for consumers and racks @param consumerToOwnedPartitions Each consumer's previously owned and still-subscribed partitions @param partitionsWithMultiplePreviousOwners The partitions being claimed in the previous assignment of multiple consumers
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java
734
[ "unassignedPartitions" ]
void
true
13
6.08
apache/kafka
31,560
javadoc
false
ensureCapacity
public static double[] ensureCapacity(double[] array, int minLength, int padding) { checkArgument(minLength >= 0, "Invalid minLength: %s", minLength); checkArgument(padding >= 0, "Invalid padding: %s", padding); return (array.length < minLength) ? Arrays.copyOf(array, minLength + padding) : array; }
Returns an array containing the same values as {@code array}, but guaranteed to be of a specified minimum length. If {@code array} already has a length of at least {@code minLength}, it is returned directly. Otherwise, a new array of size {@code minLength + padding} is returned, containing the values of {@code array}, and zeroes in the remaining places. @param array the source array @param minLength the minimum length the returned array must guarantee @param padding an extra amount to "grow" the array by if growth is necessary @throws IllegalArgumentException if {@code minLength} or {@code padding} is negative @return an array containing the values of {@code array}, with guaranteed minimum length {@code minLength}
java
android/guava/src/com/google/common/primitives/Doubles.java
346
[ "array", "minLength", "padding" ]
true
2
7.92
google/guava
51,352
javadoc
false
getDetailedErrorMessage
protected String getDetailedErrorMessage(Object bean, @Nullable String message) { StringBuilder sb = (StringUtils.hasLength(message) ? new StringBuilder(message).append('\n') : new StringBuilder()); sb.append("HandlerMethod details: \n"); sb.append("Bean [").append(bean.getClass().getName()).append("]\n"); sb.append("Method [").append(this.method.toGenericString()).append("]\n"); return sb.toString(); }
Add additional details such as the bean type and method signature to the given error message. @param message error message to append the HandlerMethod details to
java
spring-context/src/main/java/org/springframework/context/event/ApplicationListenerMethodAdapter.java
428
[ "bean", "message" ]
String
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
reorder_post_acc_grad_hook_nodes
def reorder_post_acc_grad_hook_nodes(self) -> None: """ Usage of AOTAutograd causes all the post_acc_grad_hook nodes to get pushed to the end of the graph. This differs from eager mode, which schedules them as soon as possible. This pass attempts to reorder the graph to mimic eager behavior. """ post_acc_grad_hooks = [] for node in self.fx_tracer.graph.find_nodes( op="call_function", target=call_hook ): if node.kwargs.get("hook_type", None) != "post_acc_grad_hook": continue post_acc_grad_hooks.append(node) # nodes in post_acc_grad_hooks are in topo order. For hooks registered # to same node, we should keep their relative order for node in reversed(post_acc_grad_hooks): getitem_node = node.args[0] param_node = node.args[1] # post_acc_grad_hook handle one param # find the corresponding acc_grad node acc_grad_node = None for n in list(param_node.users.keys()): if n.op == "call_function" and n.target is call_accumulate_grad: acc_grad_node = n break assert acc_grad_node is not None, ( "post_acc_grad_hook must have corresponding acc grad node" ) # append post_acc_grad_hook after acc_grad node acc_grad_node.append(getitem_node) getitem_node.append(node)
Usage of AOTAutograd causes all the post_acc_grad_hook nodes to get pushed to the end of the graph. This differs from eager mode, which schedules them as soon as possible. This pass attempts to reorder the graph to mimic eager behavior.
python
torch/_dynamo/compiled_autograd.py
1,293
[ "self" ]
None
true
7
6
pytorch/pytorch
96,034
unknown
false
resetCaches
@Override public void resetCaches() { this.cacheMap.values().forEach(Cache::clear); if (this.dynamic) { this.cacheMap.keySet().retainAll(this.customCacheNames); } }
Reset this cache manager's caches, removing them completely for on-demand re-creation in 'dynamic' mode, or simply clearing their entries otherwise. @since 6.2.14
java
spring-context-support/src/main/java/org/springframework/cache/caffeine/CaffeineCacheManager.java
271
[]
void
true
2
6.56
spring-projects/spring-framework
59,386
javadoc
false
defaults
public static ErrorAttributeOptions defaults() { return of(Include.PATH, Include.STATUS, Include.ERROR); }
Create an {@code ErrorAttributeOptions} with defaults. @return an {@code ErrorAttributeOptions}
java
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java
106
[]
ErrorAttributeOptions
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
is_nested_list_like
def is_nested_list_like(obj: object) -> bool: """ Check if the object is list-like, and that all of its elements are also list-like. Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like """ return ( is_list_like(obj) and hasattr(obj, "__len__") # need PEP 724 to handle these typing errors and len(obj) > 0 # pyright: ignore[reportArgumentType] and all(is_list_like(item) for item in obj) # type: ignore[attr-defined] )
Check if the object is list-like, and that all of its elements are also list-like. Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like
python
pandas/core/dtypes/inference.py
259
[ "obj" ]
bool
true
4
8.16
pandas-dev/pandas
47,362
numpy
false
stream
public Stream<T> stream() { return this.services.stream(); }
Return a {@link Stream} of the AOT services. @return a stream of the services
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotServices.java
153
[]
true
1
6.96
spring-projects/spring-framework
59,386
javadoc
false
_stripBasePath
function _stripBasePath(basePath: string, url: string): string { if (!basePath || !url.startsWith(basePath)) { return url; } const strippedUrl = url.substring(basePath.length); if (strippedUrl === '' || ['/', ';', '?', '#'].includes(strippedUrl[0])) { return strippedUrl; } return url; }
@description A service that applications can use to interact with a browser's URL. Depending on the `LocationStrategy` used, `Location` persists to the URL's path or the URL's hash segment. @usageNotes It's better to use the `Router.navigate()` service to trigger route changes. Use `Location` only if you need to interact with or create normalized URLs outside of routing. `Location` is responsible for normalizing the URL against the application's base href. A normalized URL is absolute from the URL host, includes the application's base href, and has no trailing slash: - `/my/app/user/123` is normalized - `my/app/user/123` **is not** normalized - `/my/app/user/123/` **is not** normalized ### Example {@example common/location/ts/path_location_component.ts region='LocationComponent'} @publicApi
typescript
packages/common/src/location/location.ts
310
[ "basePath", "url" ]
true
5
6.64
angular/angular
99,544
jsdoc
false
ofContextClass
static ApplicationContextFactory ofContextClass(Class<? extends ConfigurableApplicationContext> contextClass) { return of(() -> BeanUtils.instantiateClass(contextClass)); }
Creates an {@code ApplicationContextFactory} that will create contexts by instantiating the given {@code contextClass} through its primary constructor. @param contextClass the context class @return the factory that will instantiate the context class @see BeanUtils#instantiateClass(Class)
java
core/spring-boot/src/main/java/org/springframework/boot/ApplicationContextFactory.java
88
[ "contextClass" ]
ApplicationContextFactory
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
customizers
public ThreadPoolTaskExecutorBuilder customizers(Iterable<? extends ThreadPoolTaskExecutorCustomizer> customizers) { Assert.notNull(customizers, "'customizers' must not be null"); return new ThreadPoolTaskExecutorBuilder(this.queueCapacity, this.corePoolSize, this.maxPoolSize, this.allowCoreThreadTimeOut, this.keepAlive, this.acceptTasksAfterContextClose, this.awaitTermination, this.awaitTerminationPeriod, this.threadNamePrefix, this.taskDecorator, append(null, customizers)); }
Set the {@link ThreadPoolTaskExecutorCustomizer ThreadPoolTaskExecutorCustomizers} that should be applied to the {@link ThreadPoolTaskExecutor}. Customizers are applied in the order that they were added after builder configuration has been applied. Setting this value will replace any previously configured customizers. @param customizers the customizers to set @return a new builder instance @see #additionalCustomizers(ThreadPoolTaskExecutorCustomizer...)
java
core/spring-boot/src/main/java/org/springframework/boot/task/ThreadPoolTaskExecutorBuilder.java
257
[ "customizers" ]
ThreadPoolTaskExecutorBuilder
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
_needs_i8_conversion
def _needs_i8_conversion(self, key) -> bool: """ Check if a given key needs i8 conversion. Conversion is necessary for Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An Interval-like requires conversion if its endpoints are one of the aforementioned types. Assumes that any list-like data has already been cast to an Index. Parameters ---------- key : scalar or Index-like The key that should be checked for i8 conversion Returns ------- bool """ key_dtype = getattr(key, "dtype", None) if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): return self._needs_i8_conversion(key.left) i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex) return isinstance(key, i8_types)
Check if a given key needs i8 conversion. Conversion is necessary for Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An Interval-like requires conversion if its endpoints are one of the aforementioned types. Assumes that any list-like data has already been cast to an Index. Parameters ---------- key : scalar or Index-like The key that should be checked for i8 conversion Returns ------- bool
python
pandas/core/indexes/interval.py
625
[ "self", "key" ]
bool
true
3
6.72
pandas-dev/pandas
47,362
numpy
false
visitConstructor
function visitConstructor(node: ConstructorDeclaration) { if (!shouldEmitFunctionLikeDeclaration(node)) { return undefined; } return factory.updateConstructorDeclaration( node, /*modifiers*/ undefined, visitParameterList(node.parameters, visitor, context), transformConstructorBody(node.body, node), ); }
Determines whether to emit a function-like declaration. We should not emit the declaration if it does not have a body. @param node The declaration node.
typescript
src/compiler/transformers/ts.ts
1,332
[ "node" ]
false
2
6.08
microsoft/TypeScript
107,154
jsdoc
false
update_job
def update_job(self, **job_kwargs) -> bool: """ Update job configurations. .. seealso:: - :external+boto3:py:meth:`Glue.Client.update_job` :param job_kwargs: Keyword args that define the configurations used for the job :return: True if job was updated and false otherwise """ job_name = job_kwargs.pop("Name") current_job = self.conn.get_job(JobName=job_name)["Job"] update_config = { key: value for key, value in job_kwargs.items() if current_job.get(key) != job_kwargs[key] } if update_config != {}: self.log.info("Updating job: %s", job_name) self.conn.update_job(JobName=job_name, JobUpdate=job_kwargs) self.log.info("Updated configurations: %s", update_config) return True return False
Update job configurations. .. seealso:: - :external+boto3:py:meth:`Glue.Client.update_job` :param job_kwargs: Keyword args that define the configurations used for the job :return: True if job was updated and false otherwise
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue.py
463
[ "self" ]
bool
true
2
7.44
apache/airflow
43,597
sphinx
false
processBackgroundEvents
<T> T processBackgroundEvents(final Future<T> future, final Timer timer, final Predicate<Exception> ignoreErrorEventException) { log.trace("Will wait up to {} ms for future {} to complete", timer.remainingMs(), future); do { boolean hadEvents = false; try { hadEvents = processBackgroundEvents(); } catch (Exception e) { if (!ignoreErrorEventException.test(e)) throw e; } try { if (future.isDone()) { // If the event is done (either successfully or otherwise), go ahead and attempt to return // without waiting. We use the ConsumerUtils.getResult() method here to handle the conversion // of the exception types. T result = ConsumerUtils.getResult(future); log.trace("Future {} completed successfully", future); return result; } else if (!hadEvents) { // If the above processing yielded no events, then let's sit tight for a bit to allow the // background thread to either finish the task, or populate the background event // queue with things to process in our next loop. Timer pollInterval = time.timer(100L); log.trace("Waiting {} ms for future {} to complete", pollInterval.remainingMs(), future); T result = ConsumerUtils.getResult(future, pollInterval); log.trace("Future {} completed successfully", future); return result; } } catch (TimeoutException e) { // Ignore this as we will retry the event until the timeout expires. } finally { timer.update(); } } while (timer.notExpired()); log.trace("Future {} did not complete within timeout", future); throw new TimeoutException("Operation timed out before completion"); }
This method can be used by cases where the caller has an event that needs to both block for completion but also process background events. For some events, in order to fully process the associated logic, the {@link ConsumerNetworkThread background thread} needs assistance from the application thread to complete. If the application thread simply blocked on the event after submitting it, the processing would deadlock. The logic herein is basically a loop that performs two tasks in each iteration: <ol> <li>Process background events, if any</li> <li><em>Briefly</em> wait for {@link CompletableApplicationEvent an event} to complete</li> </ol> <p/> Each iteration gives the application thread an opportunity to process background events, which may be necessary to complete the overall processing. @param future Event that contains a {@link CompletableFuture}; it is on this future that the application thread will wait for completion @param timer Overall timer that bounds how long to wait for the event to complete @param ignoreErrorEventException Predicate to ignore background errors. Any exceptions found while processing background events that match the predicate won't be propagated. @return {@code true} if the event completed within the timeout, {@code false} otherwise
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
1,285
[ "future", "timer", "ignoreErrorEventException" ]
T
true
6
7.92
apache/kafka
31,560
javadoc
false
_tensor_description_to_json
def _tensor_description_to_json( cls, tensor_desc: Optional["TensorDescription"], # type: ignore[name-defined] # noqa: F821 ) -> Optional[str]: """Convert TensorDescription to JSON string. Args: tensor_desc: TensorDescription object Returns: Optional[str]: JSON string representation or None """ if tensor_desc is None: return None result = { "element": cls._enum_to_json(tensor_desc.element), "layout": cls._enum_to_json(tensor_desc.layout), "alignment": tensor_desc.alignment, "complex_transform": cls._enum_to_json(tensor_desc.complex_transform), } return json.dumps(result)
Convert TensorDescription to JSON string. Args: tensor_desc: TensorDescription object Returns: Optional[str]: JSON string representation or None
python
torch/_inductor/codegen/cuda/serialization.py
399
[ "cls", "tensor_desc" ]
Optional[str]
true
2
7.28
pytorch/pytorch
96,034
google
false
getEnvAsRecord
function getEnvAsRecord(shellIntegrationEnv: ITerminalEnvironment): Record<string, string> { const env: Record<string, string> = {}; for (const [key, value] of Object.entries(shellIntegrationEnv ?? process.env)) { if (typeof value === 'string') { env[key] = value; } } if (!shellIntegrationEnv) { sanitizeProcessEnvironment(env); } return env; }
Adjusts the current working directory based on a given current command string if it is a folder. @param currentCommandString - The current command string, which might contain a folder path prefix. @param currentCwd - The current working directory. @returns The new working directory.
typescript
extensions/terminal-suggest/src/terminalSuggestMain.ts
569
[ "shellIntegrationEnv" ]
true
3
7.92
microsoft/vscode
179,840
jsdoc
false
sort_complex
def sort_complex(a): """ Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> import numpy as np >>> np.sort_complex([5, 3, 6, 2, 1]) array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) b.sort() if not issubclass(b.dtype.type, _nx.complexfloating): if b.dtype.char in 'bhBH': return b.astype('F') elif b.dtype.char == 'g': return b.astype('G') else: return b.astype('D') else: return b
Sort a complex array using the real part first, then the imaginary part. Parameters ---------- a : array_like Input array Returns ------- out : complex ndarray Always returns a sorted complex array. Examples -------- >>> import numpy as np >>> np.sort_complex([5, 3, 6, 2, 1]) array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
python
numpy/lib/_function_base_impl.py
1,860
[ "a" ]
false
6
7.68
numpy/numpy
31,054
numpy
false
_split
def _split(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. Calls :meth:`str.split` element-wise. Parameters ---------- a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sep : str or unicode, optional If `sep` is not specified or None, any whitespace string is a separator. maxsplit : int, optional If `maxsplit` is given, at most `maxsplit` splits are done. Returns ------- out : ndarray Array of list objects Examples -------- >>> import numpy as np >>> x = np.array("Numpy is nice!") >>> np.strings.split(x, " ") # doctest: +SKIP array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP >>> np.strings.split(x, " ", 1) # doctest: +SKIP array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- str.split, rsplit """ # This will return an array of lists of different sizes, so we # leave it as an object array return _vec_string( a, np.object_, 'split', [sep] + _clean_args(maxsplit))
For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. Calls :meth:`str.split` element-wise. Parameters ---------- a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sep : str or unicode, optional If `sep` is not specified or None, any whitespace string is a separator. maxsplit : int, optional If `maxsplit` is given, at most `maxsplit` splits are done. Returns ------- out : ndarray Array of list objects Examples -------- >>> import numpy as np >>> x = np.array("Numpy is nice!") >>> np.strings.split(x, " ") # doctest: +SKIP array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP >>> np.strings.split(x, " ", 1) # doctest: +SKIP array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- str.split, rsplit
python
numpy/_core/strings.py
1,400
[ "a", "sep", "maxsplit" ]
false
1
6.64
numpy/numpy
31,054
numpy
false
startObject
public XContentBuilder startObject() throws IOException { generator.writeStartObject(); return this; }
@return the value of the "human readable" flag. When the value is equal to true, some types of values are written in a format easier to read for a human.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
337
[]
XContentBuilder
true
1
6.96
elastic/elasticsearch
75,680
javadoc
false
requestOffsetResetIfPartitionAssigned
public synchronized void requestOffsetResetIfPartitionAssigned(TopicPartition partition) { final TopicPartitionState state = assignedStateOrNull(partition); if (state != null) { state.reset(defaultResetStrategy); } }
Unset the preferred read replica. This causes the fetcher to go back to the leader for fetches. @param tp The topic partition @return the removed preferred read replica if set, Empty otherwise.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
800
[ "partition" ]
void
true
2
8.24
apache/kafka
31,560
javadoc
false
middleware
def middleware( self, middleware_type: Annotated[ str, Doc( """ The type of middleware. Currently only supports `http`. """ ), ], ) -> Callable[[DecoratedCallable], DecoratedCallable]: """ Add a middleware to the application. Read more about it in the [FastAPI docs for Middleware](https://fastapi.tiangolo.com/tutorial/middleware/). ## Example ```python import time from typing import Awaitable, Callable from fastapi import FastAPI, Request, Response app = FastAPI() @app.middleware("http") async def add_process_time_header( request: Request, call_next: Callable[[Request], Awaitable[Response]] ) -> Response: start_time = time.time() response = await call_next(request) process_time = time.time() - start_time response.headers["X-Process-Time"] = str(process_time) return response ``` """ def decorator(func: DecoratedCallable) -> DecoratedCallable: self.add_middleware(BaseHTTPMiddleware, dispatch=func) return func return decorator
Add a middleware to the application. Read more about it in the [FastAPI docs for Middleware](https://fastapi.tiangolo.com/tutorial/middleware/). ## Example ```python import time from typing import Awaitable, Callable from fastapi import FastAPI, Request, Response app = FastAPI() @app.middleware("http") async def add_process_time_header( request: Request, call_next: Callable[[Request], Awaitable[Response]] ) -> Response: start_time = time.time() response = await call_next(request) process_time = time.time() - start_time response.headers["X-Process-Time"] = str(process_time) return response ```
python
fastapi/applications.py
4,578
[ "self", "middleware_type" ]
Callable[[DecoratedCallable], DecoratedCallable]
true
1
6.8
tiangolo/fastapi
93,264
unknown
false
hasExpired
private boolean hasExpired() { if (this.neverExpire) { return false; } Duration timeToLive = this.timeToLive; Instant lastAccessed = this.lastAccessed; if (timeToLive == null || lastAccessed == null) { return true; } return !UNLIMITED.equals(timeToLive) && now().isAfter(lastAccessed.plus(timeToLive)); }
Get a value from the cache, creating it if necessary. @param factory a factory used to create the item if there is no reference to it. @param refreshAction action called to refresh the value if it has expired @return the value from the cache
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/SoftReferenceConfigurationPropertyCache.java
115
[]
true
5
7.92
spring-projects/spring-boot
79,428
javadoc
false
writePair
private <N, V> void writePair(N name, @Nullable V value) { this.path = this.path.child(name.toString()); if (!isFilteredPath()) { String processedName = processName(name.toString()); ActiveSeries activeSeries = this.activeSeries.peek(); Assert.state(activeSeries != null, "No series has been started"); activeSeries.incrementIndexAndAddCommaIfRequired(); Assert.state(activeSeries.addName(processedName), () -> "The name '" + processedName + "' has already been written"); writeString(processedName); append(":"); write(value); } this.path = (this.path.parent() != null) ? this.path.parent() : MemberPath.ROOT; }
Write the specified pairs to an already started {@link Series#OBJECT object series}. @param <N> the name type in the pair @param <V> the value type in the pair @param pairs a callback that will be used to provide each pair. Typically a {@code forEach} method reference. @see #writePairs(Consumer)
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonValueWriter.java
247
[ "name", "value" ]
void
true
3
6.88
spring-projects/spring-boot
79,428
javadoc
false
processEntry
async function processEntry( entryPath: string, entryType: FsEntryType | undefined, filesResolver: FilesResolver, ): Promise<LoadedFile[]> { if (!entryType) { return [] } if (entryType.kind === 'symlink') { const realPath = entryType.realPath const realType = await filesResolver.getEntryType(realPath) return processEntry(realPath, realType, filesResolver) } if (entryType.kind === 'file') { if (path.extname(entryPath) !== '.prisma') { return [] } const content = await filesResolver.getFileContents(entryPath) if (typeof content === 'undefined') { return [] } return [[entryPath, content]] } if (entryType.kind === 'directory') { const dirEntries = await filesResolver.listDirContents(entryPath) const nested = await Promise.all( dirEntries.map(async (dirEntry) => { const fullPath = path.join(entryPath, dirEntry) const nestedEntryType = await filesResolver.getEntryType(fullPath) return processEntry(fullPath, nestedEntryType, filesResolver) }), ) return nested.flat() } return [] }
Given folder name, returns list of all files composing a single prisma schema @param folderPath
typescript
packages/schema-files-loader/src/loadSchemaFiles.ts
19
[ "entryPath", "entryType", "filesResolver" ]
true
7
6.08
prisma/prisma
44,834
jsdoc
true
parseTypeOrTypePredicate
function parseTypeOrTypePredicate(): TypeNode { const pos = getNodePos(); const typePredicateVariable = isIdentifier() && tryParse(parseTypePredicatePrefix); const type = parseType(); if (typePredicateVariable) { return finishNode(factory.createTypePredicateNode(/*assertsModifier*/ undefined, typePredicateVariable, type), pos); } else { return type; } }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
4,912
[]
true
4
6.72
microsoft/TypeScript
107,154
jsdoc
false
build_submit_sql_data
def build_submit_sql_data( sql: str | None = None, conf: dict[Any, Any] | None = None, driver_resource_spec: str | None = None, executor_resource_spec: str | None = None, num_executors: int | str | None = None, name: str | None = None, ) -> str: """ Build the submit spark sql request data. :param sql: The SQL query to execute. (templated) :param conf: Spark configuration properties. :param driver_resource_spec: The resource specifications of the Spark driver. :param executor_resource_spec: The resource specifications of each Spark executor. :param num_executors: number of executors to launch for this application. :param name: name of this application. """ if sql is None: raise ValueError("Parameter sql is need when submit spark sql.") extra_conf: dict[str, str] = {} formatted_conf = "" if driver_resource_spec: extra_conf["spark.driver.resourceSpec"] = driver_resource_spec if executor_resource_spec: extra_conf["spark.executor.resourceSpec"] = executor_resource_spec if num_executors: extra_conf["spark.executor.instances"] = str(num_executors) if name: extra_conf["spark.app.name"] = name if conf and AnalyticDBSparkHook._validate_extra_conf(conf): extra_conf.update(conf) for key, value in extra_conf.items(): formatted_conf += f"set {key} = {value};" return (formatted_conf + sql).strip()
Build the submit spark sql request data. :param sql: The SQL query to execute. (templated) :param conf: Spark configuration properties. :param driver_resource_spec: The resource specifications of the Spark driver. :param executor_resource_spec: The resource specifications of each Spark executor. :param num_executors: number of executors to launch for this application. :param name: name of this application.
python
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/analyticdb_spark.py
260
[ "sql", "conf", "driver_resource_spec", "executor_resource_spec", "num_executors", "name" ]
str
true
9
6.72
apache/airflow
43,597
sphinx
false
extract_array
def extract_array( obj: T, extract_numpy: bool = False, extract_range: bool = False ) -> T | ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a NumpyExtensionArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(["a", "b", "c"], dtype="category")) ['a', 'b', 'c'] Categories (3, str): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ typ = getattr(obj, "_typ", None) if typ in _typs: # i.e. isinstance(obj, (ABCIndex, ABCSeries)) if typ == "rangeindex": if extract_range: # error: "T" has no attribute "_values" return obj._values # type: ignore[attr-defined] return obj # error: "T" has no attribute "_values" return obj._values # type: ignore[attr-defined] elif extract_numpy and typ == "npy_extension": # i.e. isinstance(obj, ABCNumpyExtensionArray) # error: "T" has no attribute "to_numpy" return obj.to_numpy() # type: ignore[attr-defined] return obj
Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a NumpyExtensionArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(["a", "b", "c"], dtype="category")) ['a', 'b', 'c'] Categories (3, str): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3])
python
pandas/core/construction.py
430
[ "obj", "extract_numpy", "extract_range" ]
T | ArrayLike
true
6
8.48
pandas-dev/pandas
47,362
numpy
false
appendNewBatch
private RecordAppendResult appendNewBatch(String topic, int partition, Deque<ProducerBatch> dq, long timestamp, byte[] key, byte[] value, Header[] headers, AppendCallbacks callbacks, ByteBuffer buffer, long nowMs) { assert partition != RecordMetadata.UNKNOWN_PARTITION; RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callbacks, dq, nowMs); if (appendResult != null) { // Somebody else found us a batch, return the one we waited for! Hopefully this doesn't happen often... return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer); ProducerBatch batch = new ProducerBatch(new TopicPartition(topic, partition), recordsBuilder, nowMs); FutureRecordMetadata future = Objects.requireNonNull(batch.tryAppend(timestamp, key, value, headers, callbacks, nowMs)); dq.addLast(batch); incomplete.add(batch); return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true, batch.estimatedSizeInBytes()); }
Append a new batch to the queue @param topic The topic @param partition The partition (cannot be RecordMetadata.UNKNOWN_PARTITION) @param dq The queue @param timestamp The timestamp of the record @param key The key for the record @param value The value for the record @param headers the Headers for the record @param callbacks The callbacks to execute @param buffer The buffer for the new batch @param nowMs The current time, in milliseconds
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
375
[ "topic", "partition", "dq", "timestamp", "key", "value", "headers", "callbacks", "buffer", "nowMs" ]
RecordAppendResult
true
3
6.4
apache/kafka
31,560
javadoc
false
kill_worker
def kill_worker( self, worker: CeleryTestWorker, method: WorkerKill.Method, ) -> None: """Kill a Celery worker. Args: worker (CeleryTestWorker): Worker to kill. method (WorkerKill.Method): The method to kill the worker. """ if method == WorkerKill.Method.DOCKER_KILL: worker.kill() assert worker.container.status == "exited", ( f"Worker container should be in 'exited' state after kill, " f"but is in '{worker.container.status}' state instead." ) if method == WorkerKill.Method.CONTROL_SHUTDOWN: control: Control = worker.app.control control.shutdown(destination=[worker.hostname()]) worker.container.reload() if method == WorkerKill.Method.SIGTERM: worker.kill(signal="SIGTERM") if method == WorkerKill.Method.SIGQUIT: worker.kill(signal="SIGQUIT")
Kill a Celery worker. Args: worker (CeleryTestWorker): Worker to kill. method (WorkerKill.Method): The method to kill the worker.
python
t/smoke/operations/worker_kill.py
19
[ "self", "worker", "method" ]
None
true
5
6.4
celery/celery
27,741
google
false
remove_unused_levels
def remove_unused_levels(self) -> MultiIndex: """ Create new MultiIndex from current that removes unused levels. Unused level(s) means levels that are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. The `remove_unused_levels` method is useful in cases where you have a MultiIndex with hierarchical levels, but some of these levels are no longer needed due to filtering or subsetting operations. By removing the unused levels, the resulting MultiIndex becomes more compact and efficient, which can improve performance in subsequent operations. Returns ------- MultiIndex A new MultiIndex with unused levels removed. See Also -------- MultiIndex.droplevel : Remove specified levels from a MultiIndex. MultiIndex.reorder_levels : Rearrange levels of a MultiIndex. MultiIndex.set_levels : Set new levels on a MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_product([range(2), list("ab")]) >>> mi MultiIndex([(0, 'a'), (0, 'b'), (1, 'a'), (1, 'b')], ) >>> mi[2:] MultiIndex([(1, 'a'), (1, 'b')], ) The 0 from the first level is not represented and can be removed >>> mi2 = mi[2:].remove_unused_levels() >>> mi2.levels FrozenList([[1], ['a', 'b']]) """ new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes, strict=True): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: if lev.isna().any() and len(uniques) == len(lev): break # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self.view() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result
Create new MultiIndex from current that removes unused levels. Unused level(s) means levels that are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. The `remove_unused_levels` method is useful in cases where you have a MultiIndex with hierarchical levels, but some of these levels are no longer needed due to filtering or subsetting operations. By removing the unused levels, the resulting MultiIndex becomes more compact and efficient, which can improve performance in subsequent operations. Returns ------- MultiIndex A new MultiIndex with unused levels removed. See Also -------- MultiIndex.droplevel : Remove specified levels from a MultiIndex. MultiIndex.reorder_levels : Rearrange levels of a MultiIndex. MultiIndex.set_levels : Set new levels on a MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_product([range(2), list("ab")]) >>> mi MultiIndex([(0, 'a'), (0, 'b'), (1, 'a'), (1, 'b')], ) >>> mi[2:] MultiIndex([(1, 'a'), (1, 'b')], ) The 0 from the first level is not represented and can be removed >>> mi2 = mi[2:].remove_unused_levels() >>> mi2.levels FrozenList([[1], ['a', 'b']])
python
pandas/core/indexes/multi.py
2,123
[ "self" ]
MultiIndex
true
8
7.2
pandas-dev/pandas
47,362
unknown
false