function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hashCodeMulti
|
@Deprecated
public static int hashCodeMulti(final Object... objects) {
int hash = 1;
if (objects != null) {
for (final Object object : objects) {
final int tmpHash = Objects.hashCode(object);
hash = hash * 31 + tmpHash;
}
}
return hash;
}
|
Gets the hash code for multiple objects.
<p>
This allows a hash code to be rapidly calculated for a number of objects. The hash code for a single object is the <em>not</em> same as
{@link #hashCode(Object)}. The hash code for multiple objects is the same as that calculated by an {@link ArrayList} containing the specified objects.
</p>
<pre>
ObjectUtils.hashCodeMulti() = 1
ObjectUtils.hashCodeMulti((Object[]) null) = 1
ObjectUtils.hashCodeMulti(a) = 31 + a.hashCode()
ObjectUtils.hashCodeMulti(a,b) = (31 + a.hashCode()) * 31 + b.hashCode()
ObjectUtils.hashCodeMulti(a,b,c) = ((31 + a.hashCode()) * 31 + b.hashCode()) * 31 + c.hashCode()
</pre>
@param objects the objects to obtain the hash code of, may be {@code null}.
@return the hash code of the objects, or zero if null.
@since 3.0
@deprecated this method has been replaced by {@code java.util.Objects.hash(Object...)} in Java 7 and will be removed in future releases.
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 752
|
[] | true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
trace
|
def trace(x, /, *, offset=0, dtype=None):
"""
Returns the sum along the specified diagonals of a matrix
(or a stack of matrices) ``x``.
This function is Array API compatible, contrary to
:py:func:`numpy.trace`.
Parameters
----------
x : (...,M,N) array_like
Input array having shape (..., M, N) and whose innermost two
dimensions form MxN matrices.
offset : int, optional
Offset specifying the off-diagonal relative to the main diagonal,
where::
* offset = 0: the main diagonal.
* offset > 0: off-diagonal above the main diagonal.
* offset < 0: off-diagonal below the main diagonal.
dtype : dtype, optional
Data type of the returned array.
Returns
-------
out : ndarray
An array containing the traces and whose shape is determined by
removing the last two dimensions and storing the traces in the last
array dimension. For example, if x has rank k and shape:
(I, J, K, ..., L, M, N), then an output array has rank k-2 and shape:
(I, J, K, ..., L) where::
out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :])
The returned array must have a data type as described by the dtype
parameter above.
See Also
--------
numpy.trace
Examples
--------
>>> np.linalg.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.linalg.trace(a)
array([3, 11])
Trace is computed with the last two axes as the 2-d sub-arrays.
This behavior differs from :py:func:`numpy.trace` which uses the first two
axes by default.
>>> a = np.arange(24).reshape((3, 2, 2, 2))
>>> np.linalg.trace(a).shape
(3, 2)
Traces adjacent to the main diagonal can be obtained by using the
`offset` argument:
>>> a = np.arange(9).reshape((3, 3)); a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.linalg.trace(a, offset=1) # First superdiagonal
6
>>> np.linalg.trace(a, offset=2) # Second superdiagonal
2
>>> np.linalg.trace(a, offset=-1) # First subdiagonal
10
>>> np.linalg.trace(a, offset=-2) # Second subdiagonal
6
"""
return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype)
|
Returns the sum along the specified diagonals of a matrix
(or a stack of matrices) ``x``.
This function is Array API compatible, contrary to
:py:func:`numpy.trace`.
Parameters
----------
x : (...,M,N) array_like
Input array having shape (..., M, N) and whose innermost two
dimensions form MxN matrices.
offset : int, optional
Offset specifying the off-diagonal relative to the main diagonal,
where::
* offset = 0: the main diagonal.
* offset > 0: off-diagonal above the main diagonal.
* offset < 0: off-diagonal below the main diagonal.
dtype : dtype, optional
Data type of the returned array.
Returns
-------
out : ndarray
An array containing the traces and whose shape is determined by
removing the last two dimensions and storing the traces in the last
array dimension. For example, if x has rank k and shape:
(I, J, K, ..., L, M, N), then an output array has rank k-2 and shape:
(I, J, K, ..., L) where::
out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :])
The returned array must have a data type as described by the dtype
parameter above.
See Also
--------
numpy.trace
Examples
--------
>>> np.linalg.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.linalg.trace(a)
array([3, 11])
Trace is computed with the last two axes as the 2-d sub-arrays.
This behavior differs from :py:func:`numpy.trace` which uses the first two
axes by default.
>>> a = np.arange(24).reshape((3, 2, 2, 2))
>>> np.linalg.trace(a).shape
(3, 2)
Traces adjacent to the main diagonal can be obtained by using the
`offset` argument:
>>> a = np.arange(9).reshape((3, 3)); a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.linalg.trace(a, offset=1) # First superdiagonal
6
>>> np.linalg.trace(a, offset=2) # Second superdiagonal
2
>>> np.linalg.trace(a, offset=-1) # First subdiagonal
10
>>> np.linalg.trace(a, offset=-2) # Second subdiagonal
6
|
python
|
numpy/linalg/_linalg.py
| 3,163
|
[
"x",
"offset",
"dtype"
] | false
| 1
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
remove
|
@Override
public void remove() {
checkState(current != null, "no calls to next() since the last call to remove()");
if (current != next) { // after call to next()
previous = current.previousSibling;
nextIndex--;
} else { // after call to previous()
next = current.nextSibling;
}
removeNode(current);
current = null;
}
|
Constructs a new iterator over all values for the specified key starting at the specified
index. This constructor is optimized so that it starts at either the head or the tail,
depending on which is closer to the specified index. This allows adds to the tail to be done
in constant time.
@throws IndexOutOfBoundsException if index is invalid
|
java
|
android/guava/src/com/google/common/collect/LinkedListMultimap.java
| 543
|
[] |
void
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
read_query
|
def read_query(
self,
sql: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Raises NotImplementedError
params : list, tuple or dict, optional, default: None
Raises NotImplementedError
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
Raises NotImplementedError
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
if coerce_float is not True:
raise NotImplementedError(
"'coerce_float' is not implemented for ADBC drivers"
)
if params:
raise NotImplementedError("'params' is not implemented for ADBC drivers")
if chunksize:
raise NotImplementedError("'chunksize' is not implemented for ADBC drivers")
with self.execute(sql) as cur:
pa_table = cur.fetch_arrow_table()
df = arrow_table_to_pandas(pa_table, dtype_backend=dtype_backend)
return _wrap_result_adbc(
df,
index_col=index_col,
parse_dates=parse_dates,
dtype=dtype,
)
|
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Raises NotImplementedError
params : list, tuple or dict, optional, default: None
Raises NotImplementedError
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
Raises NotImplementedError
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
|
python
|
pandas/io/sql.py
| 2,253
|
[
"self",
"sql",
"index_col",
"coerce_float",
"parse_dates",
"params",
"chunksize",
"dtype",
"dtype_backend"
] |
DataFrame | Iterator[DataFrame]
| true
| 4
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
declaresInterruptedEx
|
private static boolean declaresInterruptedEx(Method method) {
for (Class<?> exType : method.getExceptionTypes()) {
// debate: == or isAssignableFrom?
if (exType == InterruptedException.class) {
return true;
}
}
return false;
}
|
Creates a TimeLimiter instance using the given executor service to execute method calls.
<p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills
up, any time callers spend waiting for a thread may count toward their time limit, and in this
case the call may even time out before the target method is ever invoked.
@param executor the ExecutorService that will execute the method calls on the target objects;
for example, a {@link Executors#newCachedThreadPool()}.
@since 22.0
|
java
|
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
| 251
|
[
"method"
] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
failure_message_from_response
|
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
cluster_status = response["NotebookExecution"]
return cluster_status.get("LastStateChangeReason", None)
|
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
| 404
|
[
"response"
] |
str | None
| true
| 1
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
getInt7SQVectorScorerSupplier
|
Optional<RandomVectorScorerSupplier> getInt7SQVectorScorerSupplier(
VectorSimilarityType similarityType,
IndexInput input,
QuantizedByteVectorValues values,
float scoreCorrectionConstant
);
|
Returns an optional containing an int7 scalar quantized vector score supplier
for the given parameters, or an empty optional if a scorer is not supported.
@param similarityType the similarity type
@param input the index input containing the vector data;
offset of the first vector is 0,
the length must be (maxOrd + Float#BYTES) * dims
@param values the random access vector values
@param scoreCorrectionConstant the score correction constant
@return an optional containing the vector scorer supplier, or empty
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/VectorScorerFactory.java
| 39
|
[
"similarityType",
"input",
"values",
"scoreCorrectionConstant"
] | true
| 1
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
corr
|
def corr(
self,
other: Series,
method: CorrelationMethod = "pearson",
min_periods: int | None = None,
) -> Series:
"""
Compute correlation between each group and another Series.
Parameters
----------
other : Series
Series to compute correlation with.
method : {'pearson', 'kendall', 'spearman'}, default 'pearson'
Method of correlation to use.
min_periods : int, optional
Minimum number of observations required per pair of columns to
have a valid result.
Returns
-------
Series
Correlation value for each group.
See Also
--------
Series.corr : Equivalent method on ``Series``.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], index=[0, 0, 1, 1])
>>> g = s.groupby([0, 0, 1, 1])
>>> g.corr() # doctest: +SKIP
"""
result = self._op_via_apply(
"corr", other=other, method=method, min_periods=min_periods
)
return result
|
Compute correlation between each group and another Series.
Parameters
----------
other : Series
Series to compute correlation with.
method : {'pearson', 'kendall', 'spearman'}, default 'pearson'
Method of correlation to use.
min_periods : int, optional
Minimum number of observations required per pair of columns to
have a valid result.
Returns
-------
Series
Correlation value for each group.
See Also
--------
Series.corr : Equivalent method on ``Series``.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], index=[0, 0, 1, 1])
>>> g = s.groupby([0, 0, 1, 1])
>>> g.corr() # doctest: +SKIP
|
python
|
pandas/core/groupby/generic.py
| 1,666
|
[
"self",
"other",
"method",
"min_periods"
] |
Series
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
unmodifiableEntries
|
private static <K extends @Nullable Object, V extends @Nullable Object>
Collection<Entry<K, V>> unmodifiableEntries(Collection<Entry<K, V>> entries) {
if (entries instanceof Set) {
return Maps.unmodifiableEntrySet((Set<Entry<K, V>>) entries);
}
return new Maps.UnmodifiableEntries<>(Collections.unmodifiableCollection(entries));
}
|
Returns an unmodifiable view of the specified collection of entries. The {@link Entry#setValue}
operation throws an {@link UnsupportedOperationException}. If the specified collection is a
{@code Set}, the returned collection is also a {@code Set}.
@param entries the entries for which to return an unmodifiable view
@return an unmodifiable view of the entries
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 1,051
|
[
"entries"
] | true
| 2
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
initialize
|
def initialize(self) -> None:
"""
Initialize the bundle.
This method is called by the DAG processor and worker before the bundle is used,
and allows for deferring expensive operations until that point in time. This will
only be called when Airflow needs the bundle files on disk - some uses only need
to call the `view_url` method, which can run without initializing the bundle.
This method must ultimately be safe to call concurrently from different threads or processes.
If it isn't naturally safe, you'll need to make it so with some form of locking.
There is a `lock` context manager on this class available for this purpose.
If you override this method, ensure you call `super().initialize()`
at the end of your method, after the bundle is initialized, not the beginning.
"""
self.is_initialized = True
# Check if the bundle path exists after initialization
bundle_path = self.path
if not bundle_path.exists():
log.warning(
"Bundle '%s' path does not exist: %s. This may cause DAG loading issues.",
self.name,
bundle_path,
)
|
Initialize the bundle.
This method is called by the DAG processor and worker before the bundle is used,
and allows for deferring expensive operations until that point in time. This will
only be called when Airflow needs the bundle files on disk - some uses only need
to call the `view_url` method, which can run without initializing the bundle.
This method must ultimately be safe to call concurrently from different threads or processes.
If it isn't naturally safe, you'll need to make it so with some form of locking.
There is a `lock` context manager on this class available for this purpose.
If you override this method, ensure you call `super().initialize()`
at the end of your method, after the bundle is initialized, not the beginning.
|
python
|
airflow-core/src/airflow/dag_processing/bundles/base.py
| 278
|
[
"self"
] |
None
| true
| 2
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
appendln
|
public StrBuilder appendln(final StringBuffer str) {
return append(str).appendNewLine();
}
|
Appends a string buffer followed by a new line to this string builder.
Appending null will call {@link #appendNull()}.
@param str the string buffer to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,104
|
[
"str"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toString
|
@Override
public String toString() {
if (tokens == null) {
return "StrTokenizer[not tokenized yet]";
}
return "StrTokenizer" + getTokenList();
}
|
Gets the String content that the tokenizer is parsing.
@return the string content being parsed.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 1,105
|
[] |
String
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
resolve
|
@Nullable
public String resolve(IngestDocument ingestDocument) {
if (fieldReference != null) {
String value = ingestDocument.getFieldValue(fieldReference, String.class, true);
if (value == null) {
value = getStringFieldValueInDottedNotation(ingestDocument);
}
return sanitizer.apply(value);
} else {
return value;
}
}
|
Resolves the field reference from the provided ingest document or returns the static value if this value source doesn't represent
a field reference.
@param ingestDocument
@return the resolved field reference or static value
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RerouteProcessor.java
| 276
|
[
"ingestDocument"
] |
String
| true
| 3
| 7.6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
create_task
|
def create_task(
self, source_location_arn: str, destination_location_arn: str, **create_task_kwargs
) -> str:
"""
Create a Task between the specified source and destination LocationArns.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.create_task`
:param source_location_arn: Source LocationArn. Must exist already.
:param destination_location_arn: Destination LocationArn. Must exist already.
:param create_task_kwargs: Passed to ``boto.create_task()``. See AWS boto3 datasync documentation.
:return: TaskArn of the created Task
"""
task = self.get_conn().create_task(
SourceLocationArn=source_location_arn,
DestinationLocationArn=destination_location_arn,
**create_task_kwargs,
)
self._refresh_tasks()
return task["TaskArn"]
|
Create a Task between the specified source and destination LocationArns.
.. seealso::
- :external+boto3:py:meth:`DataSync.Client.create_task`
:param source_location_arn: Source LocationArn. Must exist already.
:param destination_location_arn: Destination LocationArn. Must exist already.
:param create_task_kwargs: Passed to ``boto.create_task()``. See AWS boto3 datasync documentation.
:return: TaskArn of the created Task
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
| 137
|
[
"self",
"source_location_arn",
"destination_location_arn"
] |
str
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
toPrimitive
|
public static long[] toPrimitive(final Long[] array) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_LONG_ARRAY;
}
final long[] result = new long[array.length];
for (int i = 0; i < array.length; i++) {
result[i] = array[i].longValue();
}
return result;
}
|
Converts an array of object Longs to primitives.
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array a {@link Long} array, may be {@code null}.
@return a {@code long} array, {@code null} if null array input.
@throws NullPointerException if an array element is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 9,104
|
[
"array"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
write_file
|
def write_file(self) -> None:
"""
Export DataFrame object to Stata dta format.
This method writes the contents of a pandas DataFrame to a `.dta` file
compatible with Stata. It includes features for handling value labels,
variable types, and metadata like timestamps and data labels. The output
file can then be read and used in Stata or other compatible statistical
tools.
See Also
--------
read_stata : Read Stata file into DataFrame.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
io.stata.StataWriter : A class for writing Stata binary dta files.
Examples
--------
>>> df = pd.DataFrame(
... {
... "fully_labelled": [1, 2, 3, 3, 1],
... "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
... "Y": [7, 7, 9, 8, 10],
... "Z": pd.Categorical(["j", "k", "l", "k", "j"]),
... }
... )
>>> path = "/My_path/filename.dta"
>>> labels = {
... "fully_labelled": {1: "one", 2: "two", 3: "three"},
... "partially_labelled": {1.0: "one", 2.0: "two"},
... }
>>> writer = pd.io.stata.StataWriter(
... path, df, value_labels=labels
... ) # doctest: +SKIP
>>> writer.write_file() # doctest: +SKIP
>>> df = pd.read_stata(path) # doctest: +SKIP
>>> df # doctest: +SKIP
index fully_labelled partially_labeled Y Z
0 0 one one 7 j
1 1 two two 7 k
2 2 three NaN 9 l
3 3 three 9.0 8 k
4 4 one NaN 10 j
"""
with get_handle(
self._fname,
"wb",
compression=self._compression,
is_text=False,
storage_options=self.storage_options,
) as self.handles:
if self.handles.compression["method"] is not None:
# ZipFile creates a file (with the same name) for each write call.
# Write it first into a buffer and then write the buffer to the ZipFile.
self._output_file, self.handles.handle = self.handles.handle, BytesIO()
self.handles.created_handles.append(self.handles.handle)
try:
self._write_header(
data_label=self._data_label, time_stamp=self._time_stamp
)
self._write_map()
self._write_variable_types()
self._write_varnames()
self._write_sortlist()
self._write_formats()
self._write_value_label_names()
self._write_variable_labels()
self._write_expansion_fields()
self._write_characteristics()
records = self._prepare_data()
self._write_data(records)
self._write_strls()
self._write_value_labels()
self._write_file_close_tag()
self._write_map()
self._close()
except Exception as exc:
self.handles.close()
if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile(
self._fname
):
try:
os.unlink(self._fname)
except OSError:
warnings.warn(
f"This save was not successful but {self._fname} could not "
"be deleted. This file is not valid.",
ResourceWarning,
stacklevel=find_stack_level(),
)
raise exc
|
Export DataFrame object to Stata dta format.
This method writes the contents of a pandas DataFrame to a `.dta` file
compatible with Stata. It includes features for handling value labels,
variable types, and metadata like timestamps and data labels. The output
file can then be read and used in Stata or other compatible statistical
tools.
See Also
--------
read_stata : Read Stata file into DataFrame.
DataFrame.to_stata : Export DataFrame object to Stata dta format.
io.stata.StataWriter : A class for writing Stata binary dta files.
Examples
--------
>>> df = pd.DataFrame(
... {
... "fully_labelled": [1, 2, 3, 3, 1],
... "partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
... "Y": [7, 7, 9, 8, 10],
... "Z": pd.Categorical(["j", "k", "l", "k", "j"]),
... }
... )
>>> path = "/My_path/filename.dta"
>>> labels = {
... "fully_labelled": {1: "one", 2: "two", 3: "three"},
... "partially_labelled": {1.0: "one", 2.0: "two"},
... }
>>> writer = pd.io.stata.StataWriter(
... path, df, value_labels=labels
... ) # doctest: +SKIP
>>> writer.write_file() # doctest: +SKIP
>>> df = pd.read_stata(path) # doctest: +SKIP
>>> df # doctest: +SKIP
index fully_labelled partially_labeled Y Z
0 0 one one 7 j
1 1 two two 7 k
2 2 three NaN 9 l
3 3 three 9.0 8 k
4 4 one NaN 10 j
|
python
|
pandas/io/stata.py
| 2,835
|
[
"self"
] |
None
| true
| 4
| 8.56
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
neverEntitled
|
private void neverEntitled(Class<?> callerClass, Supplier<String> operationDescription) {
var requestingClass = requestingClass(callerClass);
if (policyManager.isTriviallyAllowed(requestingClass)) {
return;
}
ModuleEntitlements entitlements = policyManager.getEntitlements(requestingClass);
notEntitled(
Strings.format(
"component [%s], module [%s], class [%s], operation [%s]",
entitlements.componentName(),
entitlements.moduleName(),
requestingClass,
operationDescription.get()
),
requestingClass,
entitlements
);
}
|
@param operationDescription is only called when the operation is not trivially allowed, meaning the check is about to fail;
therefore, its performance is not a major concern.
|
java
|
libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyCheckerImpl.java
| 125
|
[
"callerClass",
"operationDescription"
] |
void
| true
| 2
| 6.4
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
findPrimaryConstructor
|
public static <T> @Nullable Constructor<T> findPrimaryConstructor(Class<T> clazz) {
Assert.notNull(clazz, "Class must not be null");
if (KOTLIN_REFLECT_PRESENT && KotlinDetector.isKotlinType(clazz)) {
return KotlinDelegate.findPrimaryConstructor(clazz);
}
if (clazz.isRecord()) {
try {
// Use the canonical constructor which is always present
RecordComponent[] components = clazz.getRecordComponents();
Class<?>[] paramTypes = new Class<?>[components.length];
for (int i = 0; i < components.length; i++) {
paramTypes[i] = components[i].getType();
}
return clazz.getDeclaredConstructor(paramTypes);
}
catch (NoSuchMethodException ignored) {
}
}
return null;
}
|
Return the primary constructor of the provided class. For Kotlin classes, this
returns the Java constructor corresponding to the Kotlin primary constructor
(as defined in the Kotlin specification). For Java records, this returns the
canonical constructor. Otherwise, this simply returns {@code null}.
@param clazz the class to check
@since 5.0
@see <a href="https://kotlinlang.org/docs/reference/classes.html#constructors">Kotlin constructors</a>
@see <a href="https://docs.oracle.com/javase/specs/jls/se17/html/jls-8.html#jls-8.10.4">Record constructor declarations</a>
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 278
|
[
"clazz"
] | true
| 6
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
format
|
@Override
public <B extends Appendable> B format(final Calendar calendar, final B buf) {
return printer.format(calendar, buf);
}
|
Formats a {@link Calendar} object into the supplied {@link StringBuffer}.
@param calendar the calendar to format.
@param buf the buffer to format into.
@return the specified string buffer.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDateFormat.java
| 419
|
[
"calendar",
"buf"
] |
B
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
printRootCauseStackTrace
|
@SuppressWarnings("resource")
public static void printRootCauseStackTrace(final Throwable throwable, final PrintStream printStream) {
if (throwable == null) {
return;
}
Objects.requireNonNull(printStream, "printStream");
getRootCauseStackTraceList(throwable).forEach(printStream::println);
printStream.flush();
}
|
Prints a compact stack trace for the root cause of a throwable.
<p>The compact stack trace starts with the root cause and prints
stack frames up to the place where it was caught and wrapped.
Then it prints the wrapped exception and continues with stack frames
until the wrapper exception is caught and wrapped again, etc.</p>
<p>The output of this method is consistent across JDK versions.
Note that this is the opposite order to the JDK1.4 display.</p>
<p>The method is equivalent to {@code printStackTrace} for throwables
that don't have nested causes.</p>
@param throwable the throwable to output, may be null.
@param printStream the stream to output to, may not be null.
@throws NullPointerException if the printStream is {@code null}.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
| 743
|
[
"throwable",
"printStream"
] |
void
| true
| 2
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
stableQuickSort
|
private static void stableQuickSort(TDigestIntArray order, TDigestDoubleArray values, int start, int end, int limit) {
// the while loop implements tail-recursion to avoid excessive stack calls on nasty cases
while (end - start > limit) {
// pivot by a random element
int pivotIndex = start + prng.nextInt(end - start);
double pivotValue = values.get(order.get(pivotIndex));
int pv = order.get(pivotIndex);
// move pivot to beginning of array
swap(order, start, pivotIndex);
// we use a three way partition because many duplicate values is an important case
int low = start + 1; // low points to first value not known to be equal to pivotValue
int high = end; // high points to first value > pivotValue
int i = low; // i scans the array
while (i < high) {
// invariant: (values[order[k]],order[k]) == (pivotValue, pv) for k in [0..low)
// invariant: (values[order[k]],order[k]) < (pivotValue, pv) for k in [low..i)
// invariant: (values[order[k]],order[k]) > (pivotValue, pv) for k in [high..end)
// in-loop: i < high
// in-loop: low < high
// in-loop: i >= low
double vi = values.get(order.get(i));
int pi = order.get(i);
if (vi == pivotValue && pi == pv) {
if (low != i) {
swap(order, low, i);
} else {
i++;
}
low++;
} else if (vi > pivotValue || (vi == pivotValue && pi > pv)) {
high--;
swap(order, i, high);
} else {
// vi < pivotValue || (vi == pivotValue && pi < pv)
i++;
}
}
// invariant: (values[order[k]],order[k]) == (pivotValue, pv) for k in [0..low)
// invariant: (values[order[k]],order[k]) < (pivotValue, pv) for k in [low..i)
// invariant: (values[order[k]],order[k]) > (pivotValue, pv) for k in [high..end)
// assert i == high || low == high therefore, we are done with partition
// at this point, i==high, from [start,low) are == pivot, [low,high) are < and [high,end) are >
// we have to move the values equal to the pivot into the middle. To do this, we swap pivot
// values into the top end of the [low,high) range stopping when we run out of destinations
// or when we run out of values to copy
int from = start;
int to = high - 1;
for (i = 0; from < low && to >= low; i++) {
swap(order, from++, to--);
}
if (from == low) {
// ran out of things to copy. This means that the last destination is the boundary
low = to + 1;
} else {
// ran out of places to copy to. This means that there are uncopied pivots and the
// boundary is at the beginning of those
low = from;
}
// checkPartition(order, values, pivotValue, start, low, high, end);
// now recurse, but arrange it so we handle the longer limit by tail recursion
// we have to sort the pivot values because they may have different weights
// we can't do that, however until we know how much weight is in the left and right
if (low - start < end - high) {
// left side is smaller
stableQuickSort(order, values, start, low, limit);
// this is really a way to do
// quickSort(order, values, high, end, limit);
start = high;
} else {
stableQuickSort(order, values, high, end, limit);
// this is really a way to do
// quickSort(order, values, start, low, limit);
end = low;
}
}
}
|
Stabilized quick sort on an index array. This is a normal quick sort that uses the
original index as a secondary key. Since we are really just sorting an index array
we can do this nearly for free.
@param order The pre-allocated index array
@param values The values to sort
@param start The beginning of the values to sort
@param end The value after the last value to sort
@param limit The minimum size to recurse down to.
|
java
|
libs/tdigest/src/main/java/org/elasticsearch/tdigest/Sort.java
| 61
|
[
"order",
"values",
"start",
"end",
"limit"
] |
void
| true
| 13
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
select_column
|
def select_column(
self,
key: str,
column: str,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
|
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
|
python
|
pandas/io/pytables.py
| 970
|
[
"self",
"key",
"column",
"start",
"stop"
] | true
| 2
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TopicIdPartition that = (TopicIdPartition) o;
return topicId.equals(that.topicId) &&
topicPartition.equals(that.topicPartition);
}
|
@return Topic partition representing this instance.
|
java
|
clients/src/main/java/org/apache/kafka/common/TopicIdPartition.java
| 81
|
[
"o"
] | true
| 5
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
isEmpty
|
@Override
public boolean isEmpty() {
/*
* Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and
* removed in one segment while checking another, in which case the table was never actually
* empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment
* modifications before recheck.) Method containsValue() uses similar constructions for
* stability checks.
*/
long sum = 0L;
Segment<K, V, E, S>[] segments = this.segments;
for (int i = 0; i < segments.length; ++i) {
if (segments[i].count != 0) {
return false;
}
sum += segments[i].modCount;
}
if (sum != 0L) { // recheck unless no modifications
for (int i = 0; i < segments.length; ++i) {
if (segments[i].count != 0) {
return false;
}
sum -= segments[i].modCount;
}
return sum == 0L;
}
return true;
}
|
Concrete implementation of {@link Segment} for weak keys and {@link Dummy} values.
|
java
|
android/guava/src/com/google/common/collect/MapMakerInternalMap.java
| 2,315
|
[] | true
| 6
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
stack
|
def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
Parameters
----------
arrays : sequence of ndarrays
Each array must have the same shape. In the case of a single ndarray
array_like input, it will be treated as a sequence of arrays; i.e.,
each element along the zeroth axis is treated as a separate array.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.24
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
.. versionadded:: 1.24
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
unstack : Split an array into a tuple of sub-arrays along an axis.
Examples
--------
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> arrays = [rng.normal(size=(3,4)) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.stack((a, b))
array([[1, 2, 3],
[4, 5, 6]])
>>> np.stack((a, b), axis=-1)
array([[1, 4],
[2, 5],
[3, 6]])
"""
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out,
dtype=dtype, casting=casting)
|
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
Parameters
----------
arrays : sequence of ndarrays
Each array must have the same shape. In the case of a single ndarray
array_like input, it will be treated as a sequence of arrays; i.e.,
each element along the zeroth axis is treated as a separate array.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.24
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
.. versionadded:: 1.24
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
unstack : Split an array into a tuple of sub-arrays along an axis.
Examples
--------
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> arrays = [rng.normal(size=(3,4)) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.stack((a, b))
array([[1, 2, 3],
[4, 5, 6]])
>>> np.stack((a, b), axis=-1)
array([[1, 4],
[2, 5],
[3, 6]])
|
python
|
numpy/_core/shape_base.py
| 379
|
[
"arrays",
"axis",
"out",
"dtype",
"casting"
] | false
| 3
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
createHoistedVariableForClass
|
function createHoistedVariableForClass(name: string | PrivateIdentifier | undefined, node: PrivateIdentifier | ClassStaticBlockDeclaration, suffix?: string): Identifier {
const { className } = getPrivateIdentifierEnvironment().data;
const prefix: GeneratedNamePart | string = className ? { prefix: "_", node: className, suffix: "_" } : "_";
const identifier = typeof name === "object" ? factory.getGeneratedNameForNode(name, GeneratedIdentifierFlags.Optimistic | GeneratedIdentifierFlags.ReservedInNestedScopes, prefix, suffix) :
typeof name === "string" ? factory.createUniqueName(name, GeneratedIdentifierFlags.Optimistic, prefix, suffix) :
factory.createTempVariable(/*recordTempVariable*/ undefined, /*reservedInNestedScopes*/ true, prefix, suffix);
if (resolver.hasNodeCheckFlag(node, NodeCheckFlags.BlockScopedBindingInLoop)) {
addBlockScopedVariable(identifier);
}
else {
hoistVariableDeclaration(identifier);
}
return identifier;
}
|
If the name is a computed property, this function transforms it, then either returns an expression which caches the
value of the result or the expression itself if the value is either unused or safe to inline into multiple locations
@param shouldHoist Does the expression need to be reused? (ie, for an initializer or a decorator)
|
typescript
|
src/compiler/transformers/classFields.ts
| 2,953
|
[
"name",
"node",
"suffix?"
] | true
| 6
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
initWithCommittedOffsetsIfNeeded
|
private CompletableFuture<Void> initWithCommittedOffsetsIfNeeded(Set<TopicPartition> initializingPartitions,
long deadlineMs) {
if (initializingPartitions.isEmpty()) {
return CompletableFuture.completedFuture(null);
}
log.debug("Refreshing committed offsets for partitions {}", initializingPartitions);
CompletableFuture<Void> result = new CompletableFuture<>();
// The shorter the timeout provided to poll(), the more likely the offsets fetch will time out. To handle
// this case, on the first attempt to fetch the committed offsets, a FetchCommittedOffsetsEvent is created
// (with potentially a longer timeout) and stored. The event is used for the first attempt, but in the
// case it times out, subsequent attempts will also use the event in order to wait for the results.
if (!canReusePendingOffsetFetchEvent(initializingPartitions)) {
// Generate a new OffsetFetch request and update positions when a response is received
final long fetchCommittedDeadlineMs = Math.max(deadlineMs, time.milliseconds() + defaultApiTimeoutMs);
CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> fetchOffsets =
commitRequestManager.fetchOffsets(initializingPartitions, fetchCommittedDeadlineMs);
CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> fetchOffsetsAndRefresh =
fetchOffsets.whenComplete((offsets, error) -> {
pendingOffsetFetchEvent = null;
// Update positions with the retrieved offsets
refreshOffsets(offsets, error, result);
});
pendingOffsetFetchEvent = new PendingFetchCommittedRequest(initializingPartitions, fetchOffsetsAndRefresh);
} else {
// Reuse pending OffsetFetch request that will complete when positions are refreshed with the committed offsets retrieved
pendingOffsetFetchEvent.result.whenComplete((__, error) -> {
if (error == null) {
result.complete(null);
} else {
result.completeExceptionally(error);
}
});
}
return result;
}
|
Fetch the committed offsets for partitions that require initialization. This will trigger an OffsetFetch
request and update positions in the subscription state once a response is received.
@param initializingPartitions Set of partitions to update with a position. This same set will be kept
throughout the whole process (considered when fetching committed offsets, and
when resetting positions for partitions that may not have committed offsets).
@param deadlineMs Deadline of the application event that triggered this operation. Used to
determine how much time to allow for the reused offset fetch to complete.
@throws TimeoutException If offsets could not be retrieved within the timeout
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 364
|
[
"initializingPartitions",
"deadlineMs"
] | true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
get_chunk
|
def get_chunk(self, size: int | None = None) -> pd.DataFrame:
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
|
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
|
python
|
pandas/io/sas/sas_xport.py
| 424
|
[
"self",
"size"
] |
pd.DataFrame
| true
| 2
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
acknowledgeOnClose
|
public CompletableFuture<Void> acknowledgeOnClose(final Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap,
final long deadlineMs) {
final Cluster cluster = metadata.fetch();
final AtomicInteger resultCount = new AtomicInteger();
final ResultHandler resultHandler = new ResultHandler(resultCount, Optional.empty());
closing = true;
Map<Integer, Map<TopicIdPartition, Acknowledgements>> acknowledgementsMapAllNodes = new HashMap<>();
acknowledgementsMap.forEach((tip, nodeAcks) -> {
if (!isLeaderKnownToHaveChanged(nodeAcks.nodeId(), tip)) {
Map<TopicIdPartition, Acknowledgements> acksMap = acknowledgementsMapAllNodes.computeIfAbsent(nodeAcks.nodeId(), k -> new HashMap<>());
Acknowledgements prevAcks = acksMap.putIfAbsent(tip, nodeAcks.acknowledgements());
if (prevAcks != null) {
acksMap.get(tip).merge(nodeAcks.acknowledgements());
}
} else {
nodeAcks.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, nodeAcks.acknowledgements()), true, Optional.empty());
}
});
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
//Add any waiting piggyback acknowledgements for the node.
Map<TopicIdPartition, Acknowledgements> fetchAcks = fetchAcknowledgementsToSend.remove(nodeId);
if (fetchAcks != null) {
fetchAcks.forEach((tip, acks) -> {
if (!isLeaderKnownToHaveChanged(nodeId, tip)) {
Map<TopicIdPartition, Acknowledgements> acksMap = acknowledgementsMapAllNodes.computeIfAbsent(nodeId, k -> new HashMap<>());
Acknowledgements prevAcks = acksMap.putIfAbsent(tip, acks);
if (prevAcks != null) {
acksMap.get(tip).merge(acks);
}
} else {
acks.complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, acks), true, Optional.empty());
}
});
}
Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = acknowledgementsMapAllNodes.get(nodeId);
if (acknowledgementsMapForNode != null) {
acknowledgementsMapForNode.forEach((tip, acknowledgements) -> {
metricsManager.recordAcknowledgementSent(acknowledgements.size());
log.debug("Added closing acknowledge request for partition {} to node {}", tip.topicPartition(), node.id());
resultCount.incrementAndGet();
});
} else {
acknowledgementsMapForNode = new HashMap<>();
}
acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null));
// Ensure there is no close() request already present as they are blocking calls
// and only one request can be active at a time.
if (acknowledgeRequestStates.get(nodeId).getCloseRequest() != null && isRequestStateInProgress(acknowledgeRequestStates.get(nodeId).getCloseRequest())) {
log.error("Attempt to call close() when there is an existing close request for node {}-{}", node.id(), acknowledgeRequestStates.get(nodeId).getSyncRequestQueue());
closeFuture.completeExceptionally(
new IllegalStateException("Attempt to call close() when there is an existing close request for node : " + node.id()));
} else {
// There can only be one close() happening at a time. So per node, there will be one acknowledge request state.
acknowledgeRequestStates.get(nodeId).setCloseRequest(
new AcknowledgeRequestState(logContext,
ShareConsumeRequestManager.class.getSimpleName() + ":3",
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
sessionHandler,
nodeId,
acknowledgementsMapForNode,
resultHandler,
AcknowledgeRequestType.CLOSE
));
}
}
});
resultHandler.completeIfEmpty();
return closeFuture;
}
|
Enqueue the final AcknowledgeRequestState used to commit the final acknowledgements and
close the share sessions.
@param acknowledgementsMap The acknowledgements to commit
@param deadlineMs Time until which the request will be retried if it fails with
an expected retriable error.
@return The future which completes when the acknowledgements finished
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 662
|
[
"acknowledgementsMap",
"deadlineMs"
] | true
| 10
| 7.52
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
move_cutlass_compiled_cache
|
def move_cutlass_compiled_cache() -> None:
"""Move CUTLASS compiled cache file to the cache directory if it exists."""
if not try_import_cutlass.cache_info().currsize > 0:
return
import cutlass_cppgen # type: ignore[import-not-found]
# Check if the CACHE_FILE attribute exists in cutlass_cppgen and if the file exists
if not hasattr(cutlass_cppgen, "CACHE_FILE") or not os.path.exists(
cutlass_cppgen.CACHE_FILE
):
return
try:
filename = os.path.basename(cutlass_cppgen.CACHE_FILE)
shutil.move(cutlass_cppgen.CACHE_FILE, os.path.join(cache_dir(), filename))
log.debug("Moved CUTLASS compiled cache file to %s", cache_dir())
except OSError:
log.warning("Failed to move CUTLASS compiled cache file", exc_info=True)
|
Move CUTLASS compiled cache file to the cache directory if it exists.
|
python
|
torch/_inductor/codegen/cuda/cutlass_utils.py
| 39
|
[] |
None
| true
| 4
| 7.2
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
validate
|
public List<ConfigValue> validate(Map<String, String> props) {
return new ArrayList<>(validateAll(props).values());
}
|
Validate the current configuration values with the configuration definition.
@param props the current configuration values
@return List of Config, each Config contains the updated configuration information given
the current configuration values.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 562
|
[
"props"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
close
|
@Override
public void close() throws IOException {
lock.lock();
try {
client.close();
} finally {
lock.unlock();
}
}
|
Check whether there is pending request. This includes both requests that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@return A boolean indicating whether there is pending request
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 545
|
[] |
void
| true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
max
|
public static int max(int a, final int b, final int c) {
if (b > a) {
a = b;
}
if (c > a) {
a = c;
}
return a;
}
|
Gets the maximum of three {@code int} values.
@param a value 1.
@param b value 2.
@param c value 3.
@return the largest of the values.
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,003
|
[
"a",
"b",
"c"
] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
array_equal
|
def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> import numpy as np
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
if not equal_nan:
return builtins.bool((asanyarray(a1 == a2)).all())
if a1 is a2:
# nan will compare equal so an array will compare equal to itself.
return True
cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype)
and _dtype_cannot_hold_nan(a2.dtype))
if cannot_have_nan:
return builtins.bool(asarray(a1 == a2).all())
# Handling NaN values if equal_nan is True
a1nan, a2nan = isnan(a1), isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return builtins.bool((a1[~a1nan] == a2[~a1nan]).all())
|
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> import numpy as np
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
|
python
|
numpy/_core/numeric.py
| 2,463
|
[
"a1",
"a2",
"equal_nan"
] | false
| 7
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
findProvider
|
private @Nullable TemplateAvailabilityProvider findProvider(String view, Environment environment,
ClassLoader classLoader, ResourceLoader resourceLoader) {
for (TemplateAvailabilityProvider candidate : this.providers) {
if (candidate.isTemplateAvailable(view, environment, classLoader, resourceLoader)) {
return candidate;
}
}
return null;
}
|
Get the provider that can be used to render the given view.
@param view the view to render
@param environment the environment
@param classLoader the class loader
@param resourceLoader the resource loader
@return a {@link TemplateAvailabilityProvider} or null
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/template/TemplateAvailabilityProviders.java
| 156
|
[
"view",
"environment",
"classLoader",
"resourceLoader"
] |
TemplateAvailabilityProvider
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createEnvironment
|
private ConfigurableEnvironment createEnvironment(Class<? extends ConfigurableEnvironment> type) {
try {
Constructor<? extends ConfigurableEnvironment> constructor = type.getDeclaredConstructor();
ReflectionUtils.makeAccessible(constructor);
return constructor.newInstance();
}
catch (Exception ex) {
return new ApplicationEnvironment();
}
}
|
Converts the given {@code environment} to the given {@link StandardEnvironment}
type. If the environment is already of the same type, no conversion is performed
and it is returned unchanged.
@param environment the Environment to convert
@param type the type to convert the Environment to
@return the converted Environment
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/EnvironmentConverter.java
| 90
|
[
"type"
] |
ConfigurableEnvironment
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
wrapper
|
def wrapper(fn: Callable[_P, _R]) -> Callable[_P, _R]:
"""Wrap the function to enable memoization with replay and record.
Args:
fn: The function to wrap.
Returns:
A wrapped version of the function.
"""
# If caching is disabled, return the original function unchanged
if not config.IS_CACHING_MODULE_ENABLED():
return fn
# Create decorated versions using record and replay
replay_fn = self.replay(
custom_params_encoder,
custom_result_decoder,
)(fn)
record_fn = self.record(
custom_params_encoder,
custom_result_encoder,
)(fn)
@functools.wraps(fn)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
"""Attempt to replay from cache, or record on cache miss.
Args:
*args: Positional arguments to pass to the function.
**kwargs: Keyword arguments to pass to the function.
Returns:
The result from cache (if hit) or from executing the function (if miss).
"""
# Try to replay first
try:
return replay_fn(*args, **kwargs)
except KeyError:
# Cache miss - record the result
return record_fn(*args, **kwargs)
return inner
|
Wrap the function to enable memoization with replay and record.
Args:
fn: The function to wrap.
Returns:
A wrapped version of the function.
|
python
|
torch/_inductor/runtime/caching/interfaces.py
| 188
|
[
"fn"
] |
Callable[_P, _R]
| true
| 2
| 8.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
visitTopLevelImportEqualsDeclaration
|
function visitTopLevelImportEqualsDeclaration(node: ImportEqualsDeclaration): VisitResult<Statement | undefined> {
Debug.assert(isExternalModuleImportEqualsDeclaration(node), "import= for internal module references should be handled in an earlier transformer.");
let statements: Statement[] | undefined;
if (moduleKind !== ModuleKind.AMD) {
if (hasSyntacticModifier(node, ModifierFlags.Export)) {
statements = append(
statements,
setOriginalNode(
setTextRange(
factory.createExpressionStatement(
createExportExpression(
node.name,
createRequireCall(node),
),
),
node,
),
node,
),
);
}
else {
statements = append(
statements,
setOriginalNode(
setTextRange(
factory.createVariableStatement(
/*modifiers*/ undefined,
factory.createVariableDeclarationList(
[
factory.createVariableDeclaration(
factory.cloneNode(node.name),
/*exclamationToken*/ undefined,
/*type*/ undefined,
createRequireCall(node),
),
],
/*flags*/ languageVersion >= ScriptTarget.ES2015 ? NodeFlags.Const : NodeFlags.None,
),
),
node,
),
node,
),
);
}
}
else {
if (hasSyntacticModifier(node, ModifierFlags.Export)) {
statements = append(
statements,
setOriginalNode(
setTextRange(
factory.createExpressionStatement(
createExportExpression(factory.getExportName(node), factory.getLocalName(node)),
),
node,
),
node,
),
);
}
}
statements = appendExportsOfImportEqualsDeclaration(statements, node);
return singleOrMany(statements);
}
|
Visits an ImportEqualsDeclaration node.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 1,553
|
[
"node"
] | true
| 7
| 6.32
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
canSendRequest
|
private boolean canSendRequest(String node, long now) {
return connectionStates.isReady(node, now) && selector.isChannelReady(node) &&
inFlightRequests.canSendMore(node);
}
|
Are we connected and ready and able to send more requests to the given connection?
@param node The node
@param now the current timestamp
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 530
|
[
"node",
"now"
] | true
| 3
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
addConstructorArg
|
private Map<RestApiVersion, Integer> addConstructorArg(BiConsumer<?, ?> consumer, ParseField parseField) {
boolean required = consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER;
if (RestApiVersion.minimumSupported().matches(parseField.getForRestApiVersion())) {
constructorArgInfos.computeIfAbsent(RestApiVersion.minimumSupported(), (v) -> new ArrayList<>())
.add(new ConstructorArgInfo(parseField, required));
}
if (RestApiVersion.current().matches(parseField.getForRestApiVersion())) {
constructorArgInfos.computeIfAbsent(RestApiVersion.current(), (v) -> new ArrayList<>())
.add(new ConstructorArgInfo(parseField, required));
}
// calculate the positions for the arguments
return constructorArgInfos.entrySet().stream().collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> e.getValue().size()));
}
|
Add a constructor argument
@param consumer Either {@link #REQUIRED_CONSTRUCTOR_ARG_MARKER} or {@link #REQUIRED_CONSTRUCTOR_ARG_MARKER}
@param parseField Parse field
@return The argument position
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java
| 376
|
[
"consumer",
"parseField"
] | true
| 3
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
cacheExceptionIfEventExpired
|
private void cacheExceptionIfEventExpired(CompletableFuture<Void> result, long deadlineMs) {
result.whenComplete((__, error) -> {
boolean updatePositionsExpired = time.milliseconds() >= deadlineMs;
if (error != null && updatePositionsExpired) {
cachedUpdatePositionsException.set(error);
}
});
}
|
Save exception that may occur while updating fetch positions. Note that since the update fetch positions
is triggered asynchronously, errors may be found when the triggering UpdateFetchPositionsEvent has already
expired. In that case, the exception is saved in memory, to be thrown when processing the following
UpdateFetchPositionsEvent.
@param result Update fetch positions future to get the exception from (if any)
@param deadlineMs Deadline of the triggering application event, used to identify if the event has already
expired when the error in the result future occurs.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManager.java
| 318
|
[
"result",
"deadlineMs"
] |
void
| true
| 3
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
modifierVisitor
|
function modifierVisitor(node: Node): VisitResult<Node | undefined> {
if (isDecorator(node)) return undefined;
if (modifierToFlag(node.kind) & ModifierFlags.TypeScriptModifier) {
return undefined;
}
else if (currentNamespace && node.kind === SyntaxKind.ExportKeyword) {
return undefined;
}
return node;
}
|
Specialized visitor that visits the immediate children of a class with TypeScript syntax.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/ts.ts
| 625
|
[
"node"
] | true
| 6
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
generate_blob
|
def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes:
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : dict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes("GSO", "ascii")
gso_type = struct.pack(self._byteorder + "B", 130)
null = struct.pack(self._byteorder + "B", 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + "I"
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
if isinstance(strl, str):
strl_convert = bytes(strl, "utf-8")
else:
strl_convert = strl
bio.write(struct.pack(len_type, len(strl_convert) + 1))
# xxx...xxx
bio.write(strl_convert)
bio.write(null)
return bio.getvalue()
|
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : dict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
|
python
|
pandas/io/stata.py
| 3,300
|
[
"self",
"gso_table"
] |
bytes
| true
| 5
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
reorder_pre_hook_nodes_to_schedule_asap
|
def reorder_pre_hook_nodes_to_schedule_asap(self) -> None:
"""
In this function, we schedule the pre hooks as soon as possible. This
does not match eager behavior (schedule pre hook right before its
registered node), but it can make acc grad be scheduled properly when
the pre hooks are registered to them. After reordering acc grad node, we
will reorder the pre hooks again to mimic eager behavior.
"""
for node in self.fx_tracer.graph.find_nodes(
op="call_function", target=call_hook
):
if node.kwargs.get("hook_type", None) != "pre_hook":
continue
getitem_node = node.args[0]
# pre_hook handle a tuple of grad tensors
input_nodes = self.get_all_nodes(node.args[1])
to_remove = []
to_append = []
hook_block = [node] # contain the hook and hook args getitem
for n in input_nodes:
if n.op == "call_function" and n.target is operator.getitem:
to_append.append(n.args[0])
to_remove.append(n)
hook_block.append(n)
for a, b in zip(to_remove, to_append):
input_nodes.remove(a)
input_nodes.append(b) # type: ignore[arg-type]
arg = max(input_nodes) # last input
if arg is not node.prev and not self.is_placeholder(arg):
arg.append(getitem_node)
for n in hook_block:
getitem_node.append(n)
|
In this function, we schedule the pre hooks as soon as possible. This
does not match eager behavior (schedule pre hook right before its
registered node), but it can make acc grad be scheduled properly when
the pre hooks are registered to them. After reordering acc grad node, we
will reorder the pre hooks again to mimic eager behavior.
|
python
|
torch/_dynamo/compiled_autograd.py
| 1,222
|
[
"self"
] |
None
| true
| 10
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
convertIfNecessary
|
public <T> @Nullable T convertIfNecessary(@Nullable String propertyName, @Nullable Object oldValue,
Object newValue, @Nullable Class<T> requiredType) throws IllegalArgumentException {
return convertIfNecessary(propertyName, oldValue, newValue, requiredType, TypeDescriptor.valueOf(requiredType));
}
|
Convert the value to the required type for the specified property.
@param propertyName name of the property
@param oldValue the previous value, if available (may be {@code null})
@param newValue the proposed new value
@param requiredType the type we must convert to
(or {@code null} if not known, for example in case of a collection element)
@return the new value, possibly the result of type conversion
@throws IllegalArgumentException if type conversion failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/TypeConverterDelegate.java
| 95
|
[
"propertyName",
"oldValue",
"newValue",
"requiredType"
] |
T
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
calculate_dagrun_date_fields
|
def calculate_dagrun_date_fields(
self,
dag: SerializedDAG,
last_automated_dag_run: None | DataInterval,
) -> None:
"""
Calculate ``next_dagrun`` and `next_dagrun_create_after``.
:param dag: The DAG object
:param last_automated_dag_run: DataInterval (or datetime) of most recent run of this dag, or none
if not yet scheduled.
"""
last_automated_data_interval: DataInterval | None
if isinstance(last_automated_dag_run, datetime):
raise ValueError(
"Passing a datetime to `DagModel.calculate_dagrun_date_fields` is not supported. "
"Provide a data interval instead."
)
last_automated_data_interval = last_automated_dag_run
next_dagrun_info = dag.next_dagrun_info(last_automated_data_interval)
if next_dagrun_info is None:
self.next_dagrun_data_interval = self.next_dagrun = self.next_dagrun_create_after = None
else:
self.next_dagrun_data_interval = next_dagrun_info.data_interval
self.next_dagrun = next_dagrun_info.logical_date
self.next_dagrun_create_after = next_dagrun_info.run_after
log.info(
"Setting next_dagrun for %s to %s, run_after=%s",
dag.dag_id,
self.next_dagrun,
self.next_dagrun_create_after,
)
|
Calculate ``next_dagrun`` and `next_dagrun_create_after``.
:param dag: The DAG object
:param last_automated_dag_run: DataInterval (or datetime) of most recent run of this dag, or none
if not yet scheduled.
|
python
|
airflow-core/src/airflow/models/dag.py
| 702
|
[
"self",
"dag",
"last_automated_dag_run"
] |
None
| true
| 4
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
leaving
|
private void leaving() {
clearTaskAndPartitionAssignment();
subscriptionState.unsubscribe();
transitionToSendingLeaveGroup(false);
}
|
Leaves the group.
<p>
This method does the following:
<ol>
<li>Transitions member state to {@link MemberState#PREPARE_LEAVING}.</li>
<li>Requests the invocation of the revocation callback or lost callback.</li>
<li>Once the callback completes, it clears the current and target assignment, unsubscribes from
all topics and transitions the member state to {@link MemberState#LEAVING}.</li>
</ol>
States {@link MemberState#PREPARE_LEAVING} and {@link MemberState#LEAVING} cause the heartbeat request manager
to send a leave group heartbeat.
</p>
@return future that will complete when the revocation callback execution completes and the heartbeat
to leave the group has been sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 971
|
[] |
void
| true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
processCommonDefinitionAnnotations
|
public static void processCommonDefinitionAnnotations(AnnotatedBeanDefinition abd) {
processCommonDefinitionAnnotations(abd, abd.getMetadata());
}
|
Register all relevant annotation post processors in the given registry.
@param registry the registry to operate on
@param source the configuration source element (already extracted)
that this registration was triggered from. May be {@code null}.
@return a Set of BeanDefinitionHolders, containing all bean definitions
that have actually been registered by this call
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationConfigUtils.java
| 227
|
[
"abd"
] |
void
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parenthesizeExpressionOfComputedPropertyName
|
function parenthesizeExpressionOfComputedPropertyName(expression: Expression): Expression {
return isCommaSequence(expression) ? factory.createParenthesizedExpression(expression) : expression;
}
|
Wraps the operand to a BinaryExpression in parentheses if they are needed to preserve the intended
order of operations.
@param binaryOperator The operator for the BinaryExpression.
@param operand The operand for the BinaryExpression.
@param isLeftSideOfBinary A value indicating whether the operand is the left side of the
BinaryExpression.
|
typescript
|
src/compiler/factory/parenthesizerRules.ts
| 320
|
[
"expression"
] | true
| 2
| 6.16
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
setSpanAttributes
|
private void setSpanAttributes(TraceContext traceContext, @Nullable Map<String, Object> spanAttributes, SpanBuilder spanBuilder) {
setSpanAttributes(spanAttributes, spanBuilder);
final String xOpaqueId = traceContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER);
if (xOpaqueId != null) {
spanBuilder.setAttribute("es.x-opaque-id", xOpaqueId);
}
}
|
Most of the examples of how to use the OTel API look something like this, where the span context
is automatically propagated:
<pre>{@code
Span span = tracer.spanBuilder("parent").startSpan();
try (Scope scope = parentSpan.makeCurrent()) {
// ...do some stuff, possibly creating further spans
} finally {
span.end();
}
}</pre>
This typically isn't useful in Elasticsearch, because a {@link Scope} can't be used across threads.
However, if a scope is active, then the APM agent can capture additional information, so this method
exists to make it possible to use scopes in the few situation where it makes sense.
@param traceable provides the ID of a currently-open span for which to open a scope.
@return a method to close the scope when you are finished with it.
|
java
|
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java
| 339
|
[
"traceContext",
"spanAttributes",
"spanBuilder"
] |
void
| true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
CopySourceButton
|
function CopySourceButton({source, symbolicatedSourcePromise}: Props) {
const symbolicatedSource = React.use(symbolicatedSourcePromise);
if (symbolicatedSource == null) {
const [, sourceURL, line, column] = source;
const handleCopy = withPermissionsCheck(
{permissions: ['clipboardWrite']},
() => copy(`${sourceURL}:${line}:${column}`),
);
return (
<Button onClick={handleCopy} title="Copy to clipboard">
<ButtonIcon type="copy" />
</Button>
);
}
const [, sourceURL, line, column] = symbolicatedSource.location;
const handleCopy = withPermissionsCheck(
{permissions: ['clipboardWrite']},
() => copy(`${sourceURL}:${line}:${column}`),
);
return (
<Button onClick={handleCopy} title="Copy to clipboard">
<ButtonIcon type="copy" />
</Button>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/Components/InspectedElementSourcePanel.js
| 68
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
formatUTC
|
public static String formatUTC(final long millis, final String pattern, final Locale locale) {
return format(new Date(millis), pattern, UTC_TIME_ZONE, locale);
}
|
Formats a date/time into a specific pattern using the UTC time zone.
@param millis the date to format expressed in milliseconds.
@param pattern the pattern to use to format the date, not null.
@param locale the locale to use, may be {@code null}.
@return the formatted date.
|
java
|
src/main/java/org/apache/commons/lang3/time/DateFormatUtils.java
| 398
|
[
"millis",
"pattern",
"locale"
] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
initialize
|
public static <T> T initialize(final ConcurrentInitializer<T> initializer)
throws ConcurrentException {
return initializer != null ? initializer.get() : null;
}
|
Invokes the specified {@link ConcurrentInitializer} and returns the
object produced by the initializer. This method just invokes the {@code
get()} method of the given {@link ConcurrentInitializer}. It is
<strong>null</strong>-safe: if the argument is <strong>null</strong>, result is also
<strong>null</strong>.
@param <T> the type of the object produced by the initializer
@param initializer the {@link ConcurrentInitializer} to be invoked
@return the object managed by the {@link ConcurrentInitializer}
@throws ConcurrentException if the {@link ConcurrentInitializer} throws
an exception
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/ConcurrentUtils.java
| 287
|
[
"initializer"
] |
T
| true
| 2
| 7.36
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
arrayPush
|
function arrayPush(array, values) {
var index = -1,
length = values.length,
offset = array.length;
while (++index < length) {
array[offset + index] = values[index];
}
return array;
}
|
Appends the elements of `values` to `array`.
@private
@param {Array} array The array to modify.
@param {Array} values The values to append.
@returns {Array} Returns `array`.
|
javascript
|
lodash.js
| 666
|
[
"array",
"values"
] | false
| 2
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
unmodifiableSetMultimap
|
public static <K extends @Nullable Object, V extends @Nullable Object>
SetMultimap<K, V> unmodifiableSetMultimap(SetMultimap<K, V> delegate) {
if (delegate instanceof UnmodifiableSetMultimap || delegate instanceof ImmutableSetMultimap) {
return delegate;
}
return new UnmodifiableSetMultimap<>(delegate);
}
|
Returns an unmodifiable view of the specified {@code SetMultimap}. Query operations on the
returned multimap "read through" to the specified multimap, and attempts to modify the returned
multimap, either directly or through the multimap's views, result in an {@code
UnsupportedOperationException}.
<p>The returned multimap will be serializable if the specified multimap is serializable.
@param delegate the multimap for which an unmodifiable view is to be returned
@return an unmodifiable view of the specified multimap
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 916
|
[
"delegate"
] | true
| 3
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
from_positional
|
def from_positional(
cls, tensor: torch.Tensor, levels: list[DimEntry], has_device: bool
) -> Union[_Tensor, torch.Tensor]:
"""
Create a functorch Tensor from a regular PyTorch tensor with specified dimension levels.
This is the primary way to create Tensor objects with first-class dimensions.
Args:
tensor: The underlying PyTorch tensor
levels: List of DimEntry objects specifying the dimension structure
has_device: Whether the tensor is on a device (not CPU)
Returns:
A new Tensor instance with the specified dimensions, or a regular torch.Tensor
if there are no named dimensions
"""
seen_dims = 0
last = 0
for i, l in enumerate(levels):
if l.is_positional():
# Validate consecutive positional dimensions
assert last == 0 or last + 1 == l.position(), (
f"Positional dimensions must be consecutive, got {last} then {l.position()}"
)
last = l.position()
else:
# This is a named dimension
seen_dims += 1
# Validate final positional dimension
assert last == 0 or last == -1, (
f"Final positional dimension must be 0 or -1, got {last}"
)
if not seen_dims:
return tensor
# Create Tensor object with proper level management
result = cls()
result._tensor = tensor
result._levels = levels
result._has_device = has_device
result._batchtensor = None # Will be created lazily if needed
result._delayed = None
result._delayed_orig = None
result._delayed_args = None
# Validate tensor dimensionality matches levels
assert tensor.dim() == len(levels), (
f"Tensor has {tensor.dim()} dimensions but {len(levels)} levels provided"
)
return result
|
Create a functorch Tensor from a regular PyTorch tensor with specified dimension levels.
This is the primary way to create Tensor objects with first-class dimensions.
Args:
tensor: The underlying PyTorch tensor
levels: List of DimEntry objects specifying the dimension structure
has_device: Whether the tensor is on a device (not CPU)
Returns:
A new Tensor instance with the specified dimensions, or a regular torch.Tensor
if there are no named dimensions
|
python
|
functorch/dim/__init__.py
| 983
|
[
"cls",
"tensor",
"levels",
"has_device"
] |
Union[_Tensor, torch.Tensor]
| true
| 7
| 7.92
|
pytorch/pytorch
| 96,034
|
google
| false
|
arraySample
|
function arraySample(array) {
var length = array.length;
return length ? array[baseRandom(0, length - 1)] : undefined;
}
|
A specialized version of `_.sample` for arrays.
@private
@param {Array} array The array to sample.
@returns {*} Returns the random element.
|
javascript
|
lodash.js
| 2,459
|
[
"array"
] | false
| 2
| 6.16
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
baseAt
|
function baseAt(object, paths) {
var index = -1,
length = paths.length,
result = Array(length),
skip = object == null;
while (++index < length) {
result[index] = skip ? undefined : get(object, paths[index]);
}
return result;
}
|
The base implementation of `_.at` without support for individual paths.
@private
@param {Object} object The object to iterate over.
@param {string[]} paths The property paths to pick.
@returns {Array} Returns the picked elements.
|
javascript
|
lodash.js
| 2,613
|
[
"object",
"paths"
] | false
| 3
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
newline
|
private void newline() {
if (this.indent == null) {
return;
}
this.out.append("\n");
this.out.append(this.indent.repeat(this.stack.size()));
}
|
Encodes {@code value} to this stringer.
@param value the value to encode
@return this stringer.
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONStringer.java
| 344
|
[] |
void
| true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
listTransactions
|
default ListTransactionsResult listTransactions() {
return listTransactions(new ListTransactionsOptions());
}
|
List active transactions in the cluster. See
{@link #listTransactions(ListTransactionsOptions)} for more details.
@return The result
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,739
|
[] |
ListTransactionsResult
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
unique
|
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
Examples
--------
>>> import numpy as np
>>> a = [1, 2, 1000, 2, 3]
>>> mask = [0, 0, 1, 0, 0]
>>> masked_a = np.ma.masked_array(a, mask)
>>> masked_a
masked_array(data=[1, 2, --, 2, 3],
mask=[False, False, True, False, False],
fill_value=999999)
>>> np.ma.unique(masked_a)
masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999)
>>> np.ma.unique(masked_a, return_index=True)
(masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999), array([0, 1, 4, 2]))
>>> np.ma.unique(masked_a, return_inverse=True)
(masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999), array([0, 1, 3, 1, 2]))
>>> np.ma.unique(masked_a, return_index=True, return_inverse=True)
(masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2]))
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
|
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
Examples
--------
>>> import numpy as np
>>> a = [1, 2, 1000, 2, 3]
>>> mask = [0, 0, 1, 0, 0]
>>> masked_a = np.ma.masked_array(a, mask)
>>> masked_a
masked_array(data=[1, 2, --, 2, 3],
mask=[False, False, True, False, False],
fill_value=999999)
>>> np.ma.unique(masked_a)
masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999)
>>> np.ma.unique(masked_a, return_index=True)
(masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999), array([0, 1, 4, 2]))
>>> np.ma.unique(masked_a, return_inverse=True)
(masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999), array([0, 1, 3, 1, 2]))
>>> np.ma.unique(masked_a, return_index=True, return_inverse=True)
(masked_array(data=[1, 2, 3, --],
mask=[False, False, False, True],
fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2]))
|
python
|
numpy/ma/extras.py
| 1,267
|
[
"ar1",
"return_index",
"return_inverse"
] | false
| 3
| 6
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
json_serialize
|
def json_serialize(value: Any) -> str | None:
"""
JSON serializer replicating current watchtower behavior.
This provides customers with an accessible import,
`airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize`
:param value: the object to serialize
:return: string representation of `value`
"""
return watchtower._json_serialize_default(value)
|
JSON serializer replicating current watchtower behavior.
This provides customers with an accessible import,
`airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize`
:param value: the object to serialize
:return: string representation of `value`
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/log/cloudwatch_task_handler.py
| 70
|
[
"value"
] |
str | None
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
executeInitializrMetadataRetrieval
|
private ClassicHttpResponse executeInitializrMetadataRetrieval(String url) {
HttpGet request = new HttpGet(url);
request.setHeader(new BasicHeader(HttpHeaders.ACCEPT, ACCEPT_META_DATA));
return execute(request, URI.create(url), "retrieve metadata");
}
|
Retrieves the meta-data of the service at the specified URL.
@param url the URL
@return the response
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrService.java
| 178
|
[
"url"
] |
ClassicHttpResponse
| true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
read
|
public long read() throws IOException {
if (receive == null) {
receive = new NetworkReceive(maxReceiveSize, id, memoryPool);
}
long bytesReceived = receive(this.receive);
if (this.receive.requiredMemoryAmountKnown() && !this.receive.memoryAllocated() && isInMutableState()) {
//pool must be out of memory, mute ourselves.
mute();
}
return bytesReceived;
}
|
Returns the port to which this channel's socket is connected or 0 if the socket has never been connected.
If the socket was connected prior to being closed, then this method will continue to return the
connected port number after the socket is closed.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 407
|
[] | true
| 5
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
unescapeHtml4
|
public static final String unescapeHtml4(final String input) {
return UNESCAPE_HTML4.translate(input);
}
|
Unescapes a string containing entity escapes to a string
containing the actual Unicode characters corresponding to the
escapes. Supports HTML 4.0 entities.
<p>For example, the string {@code "<Français>"}
will become {@code "<Français>"}</p>
<p>If an entity is unrecognized, it is left alone, and inserted
verbatim into the result string. e.g. {@code ">&zzzz;x"} will
become {@code ">&zzzz;x"}.</p>
@param input the {@link String} to unescape, may be null
@return a new unescaped {@link String}, {@code null} if null string input
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/StringEscapeUtils.java
| 729
|
[
"input"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
configureBean
|
public void configureBean(Object beanInstance) {
if (this.beanFactory == null) {
if (logger.isDebugEnabled()) {
logger.debug("BeanFactory has not been set on " + ClassUtils.getShortName(getClass()) + ": " +
"Make sure this configurer runs in a Spring container. Unable to configure bean of type [" +
ClassUtils.getDescriptiveType(beanInstance) + "]. Proceeding without injection.");
}
return;
}
BeanWiringInfoResolver bwiResolver = this.beanWiringInfoResolver;
Assert.state(bwiResolver != null, "No BeanWiringInfoResolver available");
BeanWiringInfo bwi = bwiResolver.resolveWiringInfo(beanInstance);
if (bwi == null) {
// Skip the bean if no wiring info given.
return;
}
ConfigurableListableBeanFactory beanFactory = this.beanFactory;
Assert.state(beanFactory != null, "No BeanFactory available");
try {
String beanName = bwi.getBeanName();
if (bwi.indicatesAutowiring() || (bwi.isDefaultBeanName() && beanName != null &&
!beanFactory.containsBean(beanName))) {
// Perform autowiring (also applying standard factory / post-processor callbacks).
beanFactory.autowireBeanProperties(beanInstance, bwi.getAutowireMode(), bwi.getDependencyCheck());
beanFactory.initializeBean(beanInstance, (beanName != null ? beanName : ""));
}
else {
// Perform explicit wiring based on the specified bean definition.
beanFactory.configureBean(beanInstance, (beanName != null ? beanName : ""));
}
}
catch (BeanCreationException ex) {
Throwable rootCause = ex.getMostSpecificCause();
if (rootCause instanceof BeanCurrentlyInCreationException bce) {
String bceBeanName = bce.getBeanName();
if (bceBeanName != null && beanFactory.isCurrentlyInCreation(bceBeanName)) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to create target bean '" + bce.getBeanName() +
"' while configuring object of type [" + beanInstance.getClass().getName() +
"] - probably due to a circular reference. This is a common startup situation " +
"and usually not fatal. Proceeding without injection. Original exception: " + ex);
}
return;
}
}
throw ex;
}
}
|
Configure the bean instance.
<p>Subclasses can override this to provide custom configuration logic.
Typically called by an aspect, for all bean instances matched by a pointcut.
@param beanInstance the bean instance to configure (must <b>not</b> be {@code null})
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/wiring/BeanConfigurerSupport.java
| 122
|
[
"beanInstance"
] |
void
| true
| 15
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
create
|
public static URL create(File file, JarEntry nestedEntry) {
return create(file, (nestedEntry != null) ? nestedEntry.getName() : null);
}
|
Create a new jar URL.
@param file the jar file
@param nestedEntry the nested entry or {@code null}
@return a jar file URL
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/JarUrl.java
| 50
|
[
"file",
"nestedEntry"
] |
URL
| true
| 2
| 7.68
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return "Call(callName=" + callName + ", deadlineMs=" + deadlineMs +
", tries=" + tries + ", nextAllowedTryMs=" + nextAllowedTryMs + ")";
}
|
Handle an UnsupportedVersionException.
@param exception The exception.
@return True if the exception can be handled; false otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 999
|
[] |
String
| true
| 1
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
validState
|
public static void validState(final boolean expression, final String message, final Object... values) {
if (!expression) {
throw new IllegalStateException(getMessage(message, values));
}
}
|
Validate that the stateful condition is {@code true}; otherwise
throwing an exception with the specified message. This method is useful when
validating according to an arbitrary boolean expression, such as validating a
primitive number or using your own custom validation expression.
<pre>Validate.validState(this.isOk(), "The state is not OK: %s", myObject);</pre>
@param expression the boolean expression to check.
@param message the {@link String#format(String, Object...)} exception message if invalid, not null.
@param values the optional values for the formatted exception message, null array not recommended.
@throws IllegalStateException if expression is {@code false}.
@see #validState(boolean)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 1,268
|
[
"expression",
"message"
] |
void
| true
| 2
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_get_relevant_map_indexes
|
def _get_relevant_map_indexes(
*,
task: Operator,
run_id: str,
map_index: int,
relative: Operator,
ti_count: int | None,
session: Session,
) -> int | range | None:
"""
Infer the map indexes of a relative that's "relevant" to this ti.
The bulk of the logic mainly exists to solve the problem described by
the following example, where 'val' must resolve to different values,
depending on where the reference is being used::
@task
def this_task(v): # This is self.task.
return v * 2
@task_group
def tg1(inp):
val = upstream(inp) # This is the upstream task.
this_task(val) # When inp is 1, val here should resolve to 2.
return val
# This val is the same object returned by tg1.
val = tg1.expand(inp=[1, 2, 3])
@task_group
def tg2(inp):
another_task(inp, val) # val here should resolve to [2, 4, 6].
tg2.expand(inp=["a", "b"])
The surrounding mapped task groups of ``upstream`` and ``task`` are
inspected to find a common "ancestor". If such an ancestor is found,
we need to return specific map indexes to pull a partial value from
upstream XCom.
The same logic apply for finding downstream tasks.
:param task: Current task being inspected.
:param run_id: Current run ID.
:param map_index: Map index of the current task instance.
:param relative: The relative task to find relevant map indexes for.
:param ti_count: The total count of task instance this task was expanded
by the scheduler, i.e. ``expanded_ti_count`` in the template context.
:return: Specific map index or map indexes to pull, or ``None`` if we
want to "whole" return value (i.e. no mapped task groups involved).
"""
from airflow.models.mappedoperator import get_mapped_ti_count
# This value should never be None since we already know the current task
# is in a mapped task group, and should have been expanded, despite that,
# we need to check that it is not None to satisfy Mypy.
# But this value can be 0 when we expand an empty list, for that it is
# necessary to check that ti_count is not 0 to avoid dividing by 0.
if not ti_count:
return None
# Find the innermost common mapped task group between the current task
# If the current task and the referenced task does not have a common
# mapped task group, the two are in different task mapping contexts
# (like another_task above), and we should use the "whole" value.
if (common_ancestor := _find_common_ancestor_mapped_group(task, relative)) is None:
return None
# At this point we know the two tasks share a mapped task group, and we
# should use a "partial" value. Let's break down the mapped ti count
# between the ancestor and further expansion happened inside it.
ancestor_ti_count = get_mapped_ti_count(common_ancestor, run_id, session=session)
ancestor_map_index = map_index * ancestor_ti_count // ti_count
# If the task is NOT further expanded inside the common ancestor, we
# only want to reference one single ti. We must walk the actual DAG,
# and "ti_count == ancestor_ti_count" does not work, since the further
# expansion may be of length 1.
if not _is_further_mapped_inside(relative, common_ancestor):
return ancestor_map_index
# Otherwise we need a partial aggregation for values from selected task
# instances in the ancestor's expansion context.
further_count = ti_count // ancestor_ti_count
map_index_start = ancestor_map_index * further_count
return range(map_index_start, map_index_start + further_count)
|
Infer the map indexes of a relative that's "relevant" to this ti.
The bulk of the logic mainly exists to solve the problem described by
the following example, where 'val' must resolve to different values,
depending on where the reference is being used::
@task
def this_task(v): # This is self.task.
return v * 2
@task_group
def tg1(inp):
val = upstream(inp) # This is the upstream task.
this_task(val) # When inp is 1, val here should resolve to 2.
return val
# This val is the same object returned by tg1.
val = tg1.expand(inp=[1, 2, 3])
@task_group
def tg2(inp):
another_task(inp, val) # val here should resolve to [2, 4, 6].
tg2.expand(inp=["a", "b"])
The surrounding mapped task groups of ``upstream`` and ``task`` are
inspected to find a common "ancestor". If such an ancestor is found,
we need to return specific map indexes to pull a partial value from
upstream XCom.
The same logic apply for finding downstream tasks.
:param task: Current task being inspected.
:param run_id: Current run ID.
:param map_index: Map index of the current task instance.
:param relative: The relative task to find relevant map indexes for.
:param ti_count: The total count of task instance this task was expanded
by the scheduler, i.e. ``expanded_ti_count`` in the template context.
:return: Specific map index or map indexes to pull, or ``None`` if we
want to "whole" return value (i.e. no mapped task groups involved).
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 2,206
|
[
"task",
"run_id",
"map_index",
"relative",
"ti_count",
"session"
] |
int | range | None
| true
| 4
| 8.16
|
apache/airflow
| 43,597
|
sphinx
| false
|
_check_non_neg_array
|
def _check_non_neg_array(self, X, reset_n_features, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype
X = validate_data(
self,
X,
reset=reset_n_features,
accept_sparse="csr",
dtype=dtype,
)
check_non_negative(X, whom)
return X
|
check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
|
python
|
sklearn/decomposition/_lda.py
| 553
|
[
"self",
"X",
"reset_n_features",
"whom"
] | false
| 2
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
device_name
|
def device_name(self) -> Optional[str]:
"""
Get the device name information.
Returns:
A tuple of (gpu_name, vendor, model)
"""
if self._device_name is None:
device = self.device()
if self.device_type == "cuda":
device_properties = torch.cuda.get_device_properties(device)
self._device_name = device_properties.gcnArchName
return self._device_name
|
Get the device name information.
Returns:
A tuple of (gpu_name, vendor, model)
|
python
|
torch/_inductor/kernel_inputs.py
| 94
|
[
"self"
] |
Optional[str]
| true
| 3
| 7.76
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
read_query
|
def read_query(
self,
sql: str,
index_col: str | list[str] | None = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
result = self.execute(sql, params)
columns = result.keys()
if chunksize is not None:
self.returns_generator = True
return self._query_iterator(
result,
self.exit_stack,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
return frame
|
Read SQL query into a DataFrame.
Parameters
----------
sql : str
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : bool, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{'a': np.float64, 'b': np.int32, 'c': 'Int64'}
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
|
python
|
pandas/io/sql.py
| 1,801
|
[
"self",
"sql",
"index_col",
"coerce_float",
"parse_dates",
"params",
"chunksize",
"dtype",
"dtype_backend"
] |
DataFrame | Iterator[DataFrame]
| true
| 3
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toIntegerObject
|
public static Integer toIntegerObject(final boolean bool) {
return bool ? NumberUtils.INTEGER_ONE : NumberUtils.INTEGER_ZERO;
}
|
Converts a boolean to an Integer using the convention that
{@code true} is {@code 1} and {@code false} is {@code 0}.
<pre>
BooleanUtils.toIntegerObject(true) = Integer.valueOf(1)
BooleanUtils.toIntegerObject(false) = Integer.valueOf(0)
</pre>
@param bool the boolean to convert
@return one if {@code true}, zero if {@code false}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 941
|
[
"bool"
] |
Integer
| true
| 2
| 7.36
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
restore_to_event
|
def restore_to_event(
self,
node: fx.Node,
prev_event: Optional[PGEvent],
next_event: Optional[PGEvent],
) -> None:
"""Restore node to timeline after failed merge attempt."""
event = self.node_to_event[node]
# Reinsert into linked list
event.insert_between(prev_event, next_event)
if prev_event:
self.aug_graph.add_extra_dep(n=node, dep=prev_event.node)
if next_event and not prev_event:
self.aug_graph.add_extra_dep(n=next_event.node, dep=node)
# Remove bypass dependency
if prev_event and next_event:
self.aug_graph.remove_extra_dep(n=next_event.node, dep=prev_event.node)
|
Restore node to timeline after failed merge attempt.
|
python
|
torch/_inductor/fx_passes/overlap_preserving_bucketer.py
| 655
|
[
"self",
"node",
"prev_event",
"next_event"
] |
None
| true
| 6
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
createStrategyMap
|
private static Map<State, AbstractStateStrategy> createStrategyMap() {
final Map<State, AbstractStateStrategy> map = new EnumMap<>(State.class);
map.put(State.CLOSED, new StateStrategyClosed());
map.put(State.OPEN, new StateStrategyOpen());
return map;
}
|
Creates the map with strategy objects. It allows access for a strategy for a given
state.
@return the strategy map
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/EventCountCircuitBreaker.java
| 292
|
[] | true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
loadConfiguration
|
@Override
protected void loadConfiguration(LoggingInitializationContext initializationContext, String location,
@Nullable LogFile logFile) {
load(initializationContext, location, logFile);
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 264
|
[
"initializationContext",
"location",
"logFile"
] |
void
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
start
|
def start(self, c: Consumer) -> None:
"""Initialize delayed delivery for all broker URLs.
Attempts to set up delayed delivery for each broker URL in the configuration.
Failures are logged but don't prevent attempting remaining URLs.
Args:
c: The Celery consumer instance
Raises:
ValueError: If configuration validation fails
"""
app: Celery = c.app
try:
self._validate_configuration(app)
except ValueError as e:
logger.critical("Configuration validation failed: %s", str(e))
raise
broker_urls = self._validate_broker_urls(app.conf.broker_url)
setup_errors = []
for broker_url in broker_urls:
try:
retry_over_time(
self._setup_delayed_delivery,
args=(c, broker_url),
catch=RETRIED_EXCEPTIONS,
errback=self._on_retry,
interval_start=RETRY_INTERVAL,
max_retries=MAX_RETRIES,
)
except Exception as e:
logger.warning(
"Failed to setup delayed delivery for %r: %s",
maybe_sanitize_url(broker_url), str(e)
)
setup_errors.append((broker_url, e))
if len(setup_errors) == len(broker_urls):
logger.critical(
"Failed to setup delayed delivery for all broker URLs. "
"Native delayed delivery will not be available."
)
|
Initialize delayed delivery for all broker URLs.
Attempts to set up delayed delivery for each broker URL in the configuration.
Failures are logged but don't prevent attempting remaining URLs.
Args:
c: The Celery consumer instance
Raises:
ValueError: If configuration validation fails
|
python
|
celery/worker/consumer/delayed_delivery.py
| 63
|
[
"self",
"c"
] |
None
| true
| 3
| 6.56
|
celery/celery
| 27,741
|
google
| false
|
common_type
|
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays except int64 and uint64 can be safely cast to the
returned dtype without loss of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<class 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<class 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<class 'numpy.complex128'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 2 # array_precision[_nx.double]
else:
p = array_precision.get(t)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]
|
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays except int64 and uint64 can be safely cast to the
returned dtype without loss of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<class 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<class 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<class 'numpy.complex128'>
|
python
|
numpy/lib/_type_check_impl.py
| 658
|
[] | false
| 8
| 7.2
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
difference
|
public static <K extends @Nullable Object, V extends @Nullable Object>
SortedMapDifference<K, V> difference(
SortedMap<K, ? extends V> left, Map<? extends K, ? extends V> right) {
checkNotNull(left);
checkNotNull(right);
Comparator<? super K> comparator = orNaturalOrder(left.comparator());
SortedMap<K, V> onlyOnLeft = newTreeMap(comparator);
SortedMap<K, V> onlyOnRight = newTreeMap(comparator);
onlyOnRight.putAll(right); // will whittle it down
SortedMap<K, V> onBoth = newTreeMap(comparator);
SortedMap<K, ValueDifference<V>> differences = newTreeMap(comparator);
doDifference(left, right, Equivalence.equals(), onlyOnLeft, onlyOnRight, onBoth, differences);
return new SortedMapDifferenceImpl<>(onlyOnLeft, onlyOnRight, onBoth, differences);
}
|
Computes the difference between two sorted maps, using the comparator of the left map, or
{@code Ordering.natural()} if the left map uses the natural ordering of its elements. This
difference is an immutable snapshot of the state of the maps at the time this method is called.
It will never change, even if the maps change at a later time.
<p>Since this method uses {@code TreeMap} instances internally, the keys of the right map must
all compare as distinct according to the comparator of the left map.
<p><b>Note:</b>If you only need to know whether two sorted maps have the same mappings, call
{@code left.equals(right)} instead of this method.
@param left the map to treat as the "left" map for purposes of comparison
@param right the map to treat as the "right" map for purposes of comparison
@return the difference between the two maps
@since 11.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 531
|
[
"left",
"right"
] | true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
isAfter
|
public boolean isAfter(final T element) {
if (element == null) {
return false;
}
return comparator.compare(element, minimum) < 0;
}
|
Checks whether this range is after the specified element.
@param element the element to check for, null returns false.
@return true if this range is entirely after the specified element.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 419
|
[
"element"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PreparedTxnState that = (PreparedTxnState) o;
return producerId == that.producerId && epoch == that.epoch;
}
|
Returns a serialized string representation of this transaction state.
The format is "producerId:epoch" for an initialized state, or an empty string
for an uninitialized state (where producerId and epoch are both -1).
@return a serialized string representation
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/PreparedTxnState.java
| 113
|
[
"o"
] | true
| 5
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
asSupplier
|
public static <O> Supplier<O> asSupplier(final FailableSupplier<O, ?> supplier) {
return () -> get(supplier);
}
|
Converts the given {@link FailableSupplier} into a standard {@link Supplier}.
@param <O> the type supplied by the suppliers
@param supplier a {@link FailableSupplier}
@return a standard {@link Supplier}
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 451
|
[
"supplier"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
formatOutput
|
function formatOutput(generator: Generator): string {
const output = generator.options?.generator.output
return output ? dim(` to .${path.sep}${path.relative(process.cwd(), parseEnvValue(output))}`) : ''
}
|
Creates and formats the success message for the given generator to print to
the console after generation finishes.
@param time time in milliseconds it took for the generator to run.
|
typescript
|
packages/internals/src/cli/getGeneratorSuccessMessage.ts
| 31
|
[
"generator"
] | true
| 2
| 6.48
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
deduceBindMethod
|
static org.springframework.boot.context.properties.bind.BindMethod deduceBindMethod(Bindable<Object> bindable) {
return deduceBindMethod(BindConstructorProvider.DEFAULT.getBindConstructor(bindable, false));
}
|
Deduce the {@code BindMethod} that should be used for the given {@link Bindable}.
@param bindable the source bindable
@return the bind method to use
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
| 309
|
[
"bindable"
] | true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getTrustDiagnosticFailure
|
public String getTrustDiagnosticFailure(
X509Certificate[] chain,
PeerType peerType,
SSLSession session,
String contextName,
@Nullable Map<String, List<X509Certificate>> trustedIssuers
) {
final String peerAddress = Optional.ofNullable(session).map(SSLSession::getPeerHost).orElse("<unknown host>");
final StringBuilder message = new StringBuilder("failed to establish trust with ").append(peerType.name().toLowerCase(Locale.ROOT))
.append(" at [")
.append(peerAddress)
.append("]; ");
if (chain == null || chain.length == 0) {
message.append("the ").append(peerType.name().toLowerCase(Locale.ROOT)).append(" did not provide a certificate");
return message.toString();
}
final X509Certificate peerCert = chain[0];
message.append("the ")
.append(peerType.name().toLowerCase(Locale.ROOT))
.append(" provided a certificate with subject name [")
.append(peerCert.getSubjectX500Principal().getName())
.append("], ")
.append(fingerprintDescription(peerCert))
.append(", ")
.append(keyUsageDescription(peerCert))
.append(" and ")
.append(extendedKeyUsageDescription(peerCert));
addCertificateExpiryDescription(peerCert, message);
addSessionDescription(session, message);
if (peerType == PeerType.SERVER) {
try {
final Collection<List<?>> alternativeNames = peerCert.getSubjectAlternativeNames();
if (alternativeNames == null || alternativeNames.isEmpty()) {
message.append("; the certificate does not have any subject alternative names");
} else {
final List<String> hostnames = describeValidHostnames(peerCert);
if (hostnames.isEmpty()) {
message.append("; the certificate does not have any DNS/IP subject alternative names");
} else {
message.append("; the certificate has subject alternative names [").append(String.join(",", hostnames)).append("]");
}
}
} catch (CertificateParsingException e) {
message.append("; the certificate's subject alternative names cannot be parsed");
}
}
if (isSelfIssued(peerCert)) {
message.append("; the certificate is ").append(describeSelfIssuedCertificate(peerCert, contextName, trustedIssuers));
} else {
final String issuerName = peerCert.getIssuerX500Principal().getName();
message.append("; the certificate is issued by [").append(issuerName).append("]");
if (chain.length == 1) {
message.append(" but the ")
.append(peerType.name().toLowerCase(Locale.ROOT))
.append(" did not provide a copy of the issuing certificate in the certificate chain")
.append(describeIssuerTrust(contextName, trustedIssuers, peerCert, issuerName));
}
}
if (chain.length > 1) {
message.append("; the certificate is ");
// skip index-0, that's the peer cert.
for (int i = 1; i < chain.length; i++) {
message.append("signed by (subject [")
.append(chain[i].getSubjectX500Principal().getName())
.append("] ")
.append(fingerprintDescription(chain[i]));
if (trustedIssuers != null) {
if (resolveCertificateTrust(trustedIssuers, chain[i]).isTrusted()) {
message.append(" {trusted issuer}");
}
}
message.append(") ");
}
final X509Certificate root = chain[chain.length - 1];
if (isSelfIssued(root)) {
message.append("which is ").append(describeSelfIssuedCertificate(root, contextName, trustedIssuers));
} else {
final String rootIssuer = root.getIssuerX500Principal().getName();
message.append("which is issued by [")
.append(rootIssuer)
.append("] (but that issuer certificate was not provided in the chain)")
.append(describeIssuerTrust(contextName, trustedIssuers, root, rootIssuer));
}
}
return message.toString();
}
|
@param contextName The descriptive name of this SSL context (e.g. "xpack.security.transport.ssl")
@param trustedIssuers A Map of DN to Certificate, for the issuers that were trusted in the context in which this failure occurred
(see {@link javax.net.ssl.X509TrustManager#getAcceptedIssuers()})
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslDiagnostics.java
| 194
|
[
"chain",
"peerType",
"session",
"contextName",
"trustedIssuers"
] |
String
| true
| 15
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_is_dtype_compat
|
def _is_dtype_compat(self, other: Index) -> Categorical:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
"""
if isinstance(other.dtype, CategoricalDtype):
cat = extract_array(other)
cat = cast(Categorical, cat)
if not cat._categories_match_up_to_permutation(self._values):
raise TypeError(
"categories must match existing categories when appending"
)
elif other._is_multi:
# preempt raising NotImplementedError in isna call
raise TypeError("MultiIndex is not dtype-compatible with CategoricalIndex")
else:
values = other
codes = self.categories.get_indexer(values)
if ((codes == -1) & ~values.isna()).any():
# GH#37667 see test_equals_non_category
raise TypeError(
"categories must match existing categories when appending"
)
cat = Categorical(other, dtype=self.dtype)
other = CategoricalIndex(cat)
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
cat = other._values
return cat
|
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
|
python
|
pandas/core/indexes/category.py
| 226
|
[
"self",
"other"
] |
Categorical
| true
| 7
| 6.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
doProcess
|
protected abstract T doProcess();
|
Run AOT processing.
@return the result of the processing.
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AbstractAotProcessor.java
| 91
|
[] |
T
| true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getServicesObjectAllocator
|
function getServicesObjectAllocator(): ObjectAllocator {
return {
getNodeConstructor: () => NodeObject,
getTokenConstructor: () => TokenObject,
getIdentifierConstructor: () => IdentifierObject,
getPrivateIdentifierConstructor: () => PrivateIdentifierObject,
getSourceFileConstructor: () => SourceFileObject,
getSymbolConstructor: () => SymbolObject,
getTypeConstructor: () => TypeObject,
getSignatureConstructor: () => SignatureObject,
getSourceMapSourceConstructor: () => SourceMapSourceObject,
};
}
|
Returns whether or not the given node has a JSDoc "inheritDoc" tag on it.
@param node the Node in question.
@returns `true` if `node` has a JSDoc "inheritDoc" tag on it, otherwise `false`.
|
typescript
|
src/services/services.ts
| 1,326
|
[] | true
| 1
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
between
|
public static <A extends Comparable<A>> Predicate<A> between(final A b, final A c) {
return a -> is(a).between(b, c);
}
|
Creates a predicate to test if {@code [b <= a <= c]} or {@code [b >= a >= c]} where the {@code a} is the tested object.
@param b the object to compare to the tested object
@param c the object to compare to the tested object
@param <A> type of the test object
@return a predicate for true if the tested object is between b and c
|
java
|
src/main/java/org/apache/commons/lang3/compare/ComparableUtils.java
| 136
|
[
"b",
"c"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
is_integer_dtype
|
def is_integer_dtype(arr_or_dtype) -> bool:
"""
Check whether the provided array or dtype is of an integer dtype.
Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an integer dtype and
not an instance of timedelta64.
See Also
--------
api.types.is_integer : Return True if given object is integer.
api.types.is_numeric_dtype : Check whether the provided array or dtype is of a
numeric dtype.
api.types.is_float_dtype : Check whether the provided array or dtype is of a
float dtype.
Int64Dtype : An ExtensionDtype for Int64Dtype integer data.
Examples
--------
>>> from pandas.api.types import is_integer_dtype
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype("int8")
True
>>> is_integer_dtype("Int8")
True
>>> is_integer_dtype(pd.Int8Dtype)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(["a", "b"]))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.0])) # float
False
"""
return _is_dtype_type(
arr_or_dtype, _classes_and_not_datetimelike(np.integer)
) or _is_dtype(
arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu"
)
|
Check whether the provided array or dtype is of an integer dtype.
Unlike in `is_any_int_dtype`, timedelta64 instances will return False.
The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered
as integer by this function.
Parameters
----------
arr_or_dtype : array-like or dtype
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of an integer dtype and
not an instance of timedelta64.
See Also
--------
api.types.is_integer : Return True if given object is integer.
api.types.is_numeric_dtype : Check whether the provided array or dtype is of a
numeric dtype.
api.types.is_float_dtype : Check whether the provided array or dtype is of a
float dtype.
Int64Dtype : An ExtensionDtype for Int64Dtype integer data.
Examples
--------
>>> from pandas.api.types import is_integer_dtype
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype("int8")
True
>>> is_integer_dtype("Int8")
True
>>> is_integer_dtype(pd.Int8Dtype)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(["a", "b"]))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.0])) # float
False
|
python
|
pandas/core/dtypes/common.py
| 729
|
[
"arr_or_dtype"
] |
bool
| true
| 3
| 7.76
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
density
|
def density(w):
"""Compute density of a sparse vector.
Parameters
----------
w : {ndarray, sparse matrix}
The input data can be numpy ndarray or a sparse matrix.
Returns
-------
float
The density of w, between 0 and 1.
Examples
--------
>>> from scipy import sparse
>>> from sklearn.utils.extmath import density
>>> X = sparse.random(10, 10, density=0.25, random_state=0)
>>> density(X)
0.25
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
|
Compute density of a sparse vector.
Parameters
----------
w : {ndarray, sparse matrix}
The input data can be numpy ndarray or a sparse matrix.
Returns
-------
float
The density of w, between 0 and 1.
Examples
--------
>>> from scipy import sparse
>>> from sklearn.utils.extmath import density
>>> X = sparse.random(10, 10, density=0.25, random_state=0)
>>> density(X)
0.25
|
python
|
sklearn/utils/extmath.py
| 138
|
[
"w"
] | false
| 4
| 6.32
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
find
|
static @Nullable Command find(Collection<? extends Command> commands, String name) {
for (Command command : commands) {
if (command.getName().equals(name)) {
return command;
}
}
return null;
}
|
Static method that can be used to find a single command from a collection.
@param commands the commands to search
@param name the name of the command to find
@return a {@link Command} instance or {@code null}.
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 147
|
[
"commands",
"name"
] |
Command
| true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
unregister
|
private void unregister(KafkaMbean mbean) {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
if (server.isRegistered(mbean.name()))
server.unregisterMBean(mbean.name());
} catch (JMException e) {
throw new KafkaException("Error unregistering mbean", e);
}
}
|
@param metricName
@return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/JmxReporter.java
| 199
|
[
"mbean"
] |
void
| true
| 3
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
readFile
|
function readFile(path, options, callback) {
callback ||= options;
validateFunction(callback, 'cb');
options = getOptions(options, { flag: 'r' });
ReadFileContext ??= require('internal/fs/read/context');
const context = new ReadFileContext(callback, options.encoding);
context.isUserFd = isFd(path); // File descriptor ownership
if (options.signal) {
context.signal = options.signal;
}
if (context.isUserFd) {
process.nextTick(function tick(context) {
FunctionPrototypeCall(readFileAfterOpen, { context }, null, path);
}, context);
return;
}
if (checkAborted(options.signal, callback))
return;
const flagsNumber = stringToFlags(options.flag, 'options.flag');
const req = new FSReqCallback();
req.context = context;
req.oncomplete = readFileAfterOpen;
binding.open(getValidatedPath(path), flagsNumber, 0o666, req);
}
|
Asynchronously reads the entire contents of a file.
@param {string | Buffer | URL | number} path
@param {{
encoding?: string | null;
flag?: string;
signal?: AbortSignal;
} | string} [options]
@param {(
err?: Error,
data?: string | Buffer
) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 357
|
[
"path",
"options",
"callback"
] | false
| 4
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
equals
|
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
return ObjectUtils.nullSafeEquals(this.value, ((OriginTrackedValue) obj).value);
}
|
Return the tracked value.
@return the tracked value
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/OriginTrackedValue.java
| 57
|
[
"obj"
] | true
| 3
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
isVariableDeclaratorListTerminator
|
function isVariableDeclaratorListTerminator(): boolean {
// If we can consume a semicolon (either explicitly, or with ASI), then consider us done
// with parsing the list of variable declarators.
if (canParseSemicolon()) {
return true;
}
// in the case where we're parsing the variable declarator of a 'for-in' statement, we
// are done if we see an 'in' keyword in front of us. Same with for-of
if (isInOrOfKeyword(token())) {
return true;
}
// ERROR RECOVERY TWEAK:
// For better error recovery, if we see an '=>' then we just stop immediately. We've got an
// arrow function here and it's going to be very unlikely that we'll resynchronize and get
// another variable declaration.
if (token() === SyntaxKind.EqualsGreaterThanToken) {
return true;
}
// Keep trying to parse out variable declarators.
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,052
|
[] | true
| 4
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
splitByWholeSeparatorPreserveAllTokens
|
public static String[] splitByWholeSeparatorPreserveAllTokens(final String str, final String separator) {
return splitByWholeSeparatorWorker(str, separator, -1, true);
}
|
Splits the provided text into an array, separator string specified.
<p>
The separator is not included in the returned String array. Adjacent separators are treated as separators for empty tokens. For more control over the
split use the StrTokenizer class.
</p>
<p>
A {@code null} input String returns {@code null}. A {@code null} separator splits on whitespace.
</p>
<pre>
StringUtils.splitByWholeSeparatorPreserveAllTokens(null, *) = null
StringUtils.splitByWholeSeparatorPreserveAllTokens("", *) = []
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab de fg", null) = ["ab", "de", "fg"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab de fg", null) = ["ab", "", "", "de", "fg"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab:cd:ef", ":") = ["ab", "cd", "ef"]
StringUtils.splitByWholeSeparatorPreserveAllTokens("ab-!-cd-!-ef", "-!-") = ["ab", "cd", "ef"]
</pre>
@param str the String to parse, may be null.
@param separator String containing the String to be used as a delimiter, {@code null} splits on whitespace.
@return an array of parsed Strings, {@code null} if null String was input.
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,307
|
[
"str",
"separator"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
availableLocaleList
|
public static List<Locale> availableLocaleList() {
return SyncAvoid.AVAILABLE_LOCALE_ULIST;
}
|
Obtains an unmodifiable and sorted list of installed locales.
<p>
This method is a wrapper around {@link Locale#getAvailableLocales()}. It is more efficient, as the JDK method must create a new array each time it is
called.
</p>
@return the unmodifiable and sorted list of available locales.
|
java
|
src/main/java/org/apache/commons/lang3/LocaleUtils.java
| 103
|
[] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toZonedDateTime
|
public static ZonedDateTime toZonedDateTime(final Date date) {
return toZonedDateTime(date, TimeZone.getDefault());
}
|
Converts a {@link Date} to a {@link ZonedDateTime}.
@param date the Date to convert, not null.
@return a new ZonedDateTime.
@since 3.19.0
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,685
|
[
"date"
] |
ZonedDateTime
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.