function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from_frame
|
def from_frame(
cls,
df: DataFrame,
sortorder: int | None = None,
names: Sequence[Hashable] | Hashable | None = None,
) -> MultiIndex:
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame(
... [["HI", "Temp"], ["HI", "Precip"], ["NJ", "Temp"], ["NJ", "Precip"]],
... columns=["a", "b"],
... )
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=["state", "observation"])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = zip(*df.items(), strict=True)
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
|
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame(
... [["HI", "Temp"], ["HI", "Precip"], ["NJ", "Temp"], ["NJ", "Precip"]],
... columns=["a", "b"],
... )
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=["state", "observation"])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
|
python
|
pandas/core/indexes/multi.py
| 656
|
[
"cls",
"df",
"sortorder",
"names"
] |
MultiIndex
| true
| 3
| 7.92
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
register_writer
|
def register_writer(klass: ExcelWriter_t) -> None:
"""
Add engine to the excel writer registry.io.excel.
You must use this method to integrate with ``to_excel``.
Parameters
----------
klass : ExcelWriter
"""
if not callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass._engine
_writers[engine_name] = klass
|
Add engine to the excel writer registry.io.excel.
You must use this method to integrate with ``to_excel``.
Parameters
----------
klass : ExcelWriter
|
python
|
pandas/io/excel/_util.py
| 34
|
[
"klass"
] |
None
| true
| 2
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_get_additional_distribution_info
|
def _get_additional_distribution_info(provider_distribution_path: Path) -> str:
"""Returns additional info for the package.
:param provider_distribution_path: path for the package
:return: additional information for the path (empty string if missing)
"""
additional_info_file_path = provider_distribution_path / "ADDITIONAL_INFO.md"
if additional_info_file_path.is_file():
additional_info = additional_info_file_path.read_text()
additional_info_lines = additional_info.splitlines(keepends=True)
result = ""
skip_comment = True
for line in additional_info_lines:
if line.startswith(" -->"):
skip_comment = False
elif not skip_comment:
result += line
return result
return ""
|
Returns additional info for the package.
:param provider_distribution_path: path for the package
:return: additional information for the path (empty string if missing)
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
| 654
|
[
"provider_distribution_path"
] |
str
| true
| 5
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
cos
|
public static double cos(double a) {
if (a < 0.0) {
a = -a;
}
if (a > SIN_COS_MAX_VALUE_FOR_INT_MODULO) {
return Math.cos(a);
}
// index: possibly outside tables range.
int index = (int) (a * SIN_COS_INDEXER + 0.5);
double delta = (a - index * SIN_COS_DELTA_HI) - index * SIN_COS_DELTA_LO;
// Making sure index is within tables range.
// Last value of each table is the same than first, so we ignore it (tabs size minus one) for
// modulo.
index &= (SIN_COS_TABS_SIZE - 2); // index % (SIN_COS_TABS_SIZE-1)
double indexCos = cosTab[index];
double indexSin = sinTab[index];
return indexCos + delta * (-indexSin + delta * (-indexCos * ONE_DIV_F2 + delta * (indexSin * ONE_DIV_F3 + delta * indexCos
* ONE_DIV_F4)));
}
|
Returns the trigonometric cosine of an angle.
<p>Error is around 1E-15.
<p>Special cases:
<ul>
<li>If the argument is {@code NaN} or an infinity, then the result is {@code NaN}.
</ul>
@param a an angle, in radians.
@return the cosine of the argument.
@see Math#cos(double)
|
java
|
libs/geo/src/main/java/org/elasticsearch/geometry/simplify/SloppyMath.java
| 77
|
[
"a"
] | true
| 3
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
_import_helper
|
def _import_helper(file_path: str, overwrite: bool) -> None:
"""
Load connections from a file and save them to the DB.
:param overwrite: Whether to skip or overwrite on collision.
"""
connections_dict = load_connections_dict(file_path)
with create_session() as session:
for conn_id, conn in connections_dict.items():
try:
helpers.validate_key(conn_id, max_length=200)
except Exception as e:
print(f"Could not import connection. {e}")
continue
existing_conn_id = session.scalar(select(Connection.id).where(Connection.conn_id == conn_id))
if existing_conn_id is not None:
if not overwrite:
print(f"Could not import connection {conn_id}: connection already exists.")
continue
# The conn_ids match, but the PK of the new entry must also be the same as the old
conn.id = existing_conn_id
session.merge(conn)
session.commit()
print(f"Imported connection {conn_id}")
|
Load connections from a file and save them to the DB.
:param overwrite: Whether to skip or overwrite on collision.
|
python
|
airflow-core/src/airflow/cli/commands/connection_command.py
| 330
|
[
"file_path",
"overwrite"
] |
None
| true
| 4
| 7.2
|
apache/airflow
| 43,597
|
sphinx
| false
|
loadClassInLaunchedClassLoader
|
private Class<?> loadClassInLaunchedClassLoader(String name) throws ClassNotFoundException {
try {
String internalName = name.replace('.', '/') + ".class";
try (InputStream inputStream = getParent().getResourceAsStream(internalName);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
if (inputStream == null) {
throw new ClassNotFoundException(name);
}
inputStream.transferTo(outputStream);
byte[] bytes = outputStream.toByteArray();
Class<?> definedClass = defineClass(name, bytes, 0, bytes.length);
definePackageIfNecessary(name);
return definedClass;
}
}
catch (IOException ex) {
throw new ClassNotFoundException("Cannot load resource for class [" + name + "]", ex);
}
}
|
Create a new {@link LaunchedClassLoader} instance.
@param exploded if the underlying archive is exploded
@param rootArchive the root archive or {@code null}
@param urls the URLs from which to load classes and resources
@param parent the parent class loader for delegation
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/LaunchedClassLoader.java
| 94
|
[
"name"
] | true
| 3
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
hashCode
|
@Override
public int hashCode() {
if (hash == 0) {
hash = bytes().hashCode();
}
return hash;
}
|
Whether a {@link String} view of the data is already materialized.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/Text.java
| 136
|
[] | true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
compressionType
|
CompressionType compressionType();
|
Get the compression type of this record batch.
@return The compression type
|
java
|
clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java
| 182
|
[] |
CompressionType
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
increment
|
public static InetAddress increment(InetAddress address) {
byte[] addr = address.getAddress();
int i = addr.length - 1;
while (i >= 0 && addr[i] == (byte) 0xff) {
addr[i] = 0;
i--;
}
checkArgument(i >= 0, "Incrementing %s would wrap.", address);
addr[i]++;
return bytesToInetAddress(addr, null);
}
|
Returns a new InetAddress that is one more than the passed in address. This method works for
both IPv4 and IPv6 addresses.
@param address the InetAddress to increment
@return a new InetAddress that is one more than the passed in address
@throws IllegalArgumentException if InetAddress is at the end of its range
@since 10.0
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 1,202
|
[
"address"
] |
InetAddress
| true
| 3
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
zipObject
|
function zipObject(props, values) {
return baseZipObject(props || [], values || [], assignValue);
}
|
This method is like `_.fromPairs` except that it accepts two arrays,
one of property identifiers and one of corresponding values.
@static
@memberOf _
@since 0.4.0
@category Array
@param {Array} [props=[]] The property identifiers.
@param {Array} [values=[]] The property values.
@returns {Object} Returns the new object.
@example
_.zipObject(['a', 'b'], [1, 2]);
// => { 'a': 1, 'b': 2 }
|
javascript
|
lodash.js
| 8,758
|
[
"props",
"values"
] | false
| 3
| 7.44
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
findThreadById
|
public static Thread findThreadById(final long threadId, final ThreadGroup threadGroup) {
Objects.requireNonNull(threadGroup, "threadGroup");
final Thread thread = findThreadById(threadId);
if (thread != null && threadGroup.equals(thread.getThreadGroup())) {
return thread;
}
return null;
}
|
Finds the active thread with the specified id if it belongs to the specified thread group.
@param threadId The thread id.
@param threadGroup The thread group.
@return The thread which belongs to a specified thread group and the thread's id match the specified id. {@code null} is returned if no such thread
exists.
@throws NullPointerException if {@code threadGroup == null}.
@throws IllegalArgumentException if the specified id is zero or negative.
@throws SecurityException if the current thread cannot access the system thread group.
@throws SecurityException if the current thread cannot modify thread groups from this thread's thread group up to the system thread group.
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 224
|
[
"threadId",
"threadGroup"
] |
Thread
| true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
notmasked_contiguous
|
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array, and this
is the same as `flatnotmasked_contiguous`.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
If the input is 2d and axis is specified, the result is a list of lists.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> import numpy as np
>>> a = np.arange(12).reshape((3, 4))
>>> mask = np.zeros_like(a)
>>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
>>> ma = np.ma.array(a, mask=mask)
>>> ma
masked_array(
data=[[0, --, 2, 3],
[--, --, --, 7],
[8, --, --, 11]],
mask=[[False, True, False, False],
[ True, True, True, False],
[False, True, True, False]],
fill_value=999999)
>>> np.array(ma[~ma.mask])
array([ 0, 2, 3, 7, 8, 11])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
[[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
[[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
""" # noqa: E501
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to at most 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[tuple(idx)]))
return result
|
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array, and this
is the same as `flatnotmasked_contiguous`.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
If the input is 2d and axis is specified, the result is a list of lists.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> import numpy as np
>>> a = np.arange(12).reshape((3, 4))
>>> mask = np.zeros_like(a)
>>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
>>> ma = np.ma.array(a, mask=mask)
>>> ma
masked_array(
data=[[0, --, 2, 3],
[--, --, --, 7],
[8, --, --, 11]],
mask=[[False, True, False, False],
[ True, True, True, False],
[False, True, True, False]],
fill_value=999999)
>>> np.array(ma[~ma.mask])
array([ 0, 2, 3, 7, 8, 11])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
[[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
[[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
|
python
|
numpy/ma/extras.py
| 2,032
|
[
"a",
"axis"
] | false
| 5
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getErrorResponse
|
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
JoinGroupResponseData data = new JoinGroupResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code())
.setGenerationId(UNKNOWN_GENERATION_ID)
.setProtocolName(UNKNOWN_PROTOCOL_NAME)
.setLeader(UNKNOWN_MEMBER_ID)
.setMemberId(UNKNOWN_MEMBER_ID)
.setMembers(Collections.emptyList());
if (version() >= 7)
data.setProtocolName(null);
else
data.setProtocolName(UNKNOWN_PROTOCOL_NAME);
return new JoinGroupResponse(data, version());
}
|
Get the client's join reason.
@param request The JoinGroupRequest.
@return The join reason.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java
| 191
|
[
"throttleTimeMs",
"e"
] |
AbstractResponse
| true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
forms_of_context
|
def forms_of_context() -> Sequence[str]:
"""Return the compile context forms provided by this class.
Returns:
A sequence containing the available compile context forms:
- "torch_version_hash": PyTorch version hash
- "triton_version_hash": Triton version hash (if available)
- "runtime": Runtime type (CUDA/HIP/None)
- "runtime_version": Runtime version string
- "accelerator_properties": GPU/accelerator properties
"""
return (
"torch_version_hash",
"triton_version_hash",
"runtime",
"runtime_version",
"accelerator_properties",
)
|
Return the compile context forms provided by this class.
Returns:
A sequence containing the available compile context forms:
- "torch_version_hash": PyTorch version hash
- "triton_version_hash": Triton version hash (if available)
- "runtime": Runtime type (CUDA/HIP/None)
- "runtime_version": Runtime version string
- "accelerator_properties": GPU/accelerator properties
|
python
|
torch/_inductor/runtime/caching/context.py
| 123
|
[] |
Sequence[str]
| true
| 1
| 6.08
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
init_RISCV_32Bit
|
private static void init_RISCV_32Bit() {
addProcessors(new Processor(Processor.Arch.BIT_32, Processor.Type.RISC_V), "riscv32");
}
|
Gets a {@link Processor} object the given value {@link String}. The {@link String} must be like a value returned by the {@code "os.arch"} system
property.
@param value A {@link String} like a value returned by the {@code os.arch} System Property.
@return A {@link Processor} when it exists, else {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArchUtils.java
| 123
|
[] |
void
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_post_process_frame
|
def _post_process_frame(frame, feature_names, target_names):
"""Post process a dataframe to select the desired columns in `X` and `y`.
Parameters
----------
frame : dataframe
The dataframe to split into `X` and `y`.
feature_names : list of str
The list of feature names to populate `X`.
target_names : list of str
The list of target names to populate `y`.
Returns
-------
X : dataframe
The dataframe containing the features.
y : {series, dataframe} or None
The series or dataframe containing the target.
"""
X = frame[feature_names]
if len(target_names) >= 2:
y = frame[target_names]
elif len(target_names) == 1:
y = frame[target_names[0]]
else:
y = None
return X, y
|
Post process a dataframe to select the desired columns in `X` and `y`.
Parameters
----------
frame : dataframe
The dataframe to split into `X` and `y`.
feature_names : list of str
The list of feature names to populate `X`.
target_names : list of str
The list of target names to populate `y`.
Returns
-------
X : dataframe
The dataframe containing the features.
y : {series, dataframe} or None
The series or dataframe containing the target.
|
python
|
sklearn/datasets/_arff_parser.py
| 75
|
[
"frame",
"feature_names",
"target_names"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
defaultValues
|
public Map<String, Object> defaultValues() {
Map<String, Object> defaultValues = new HashMap<>();
for (ConfigKey key : configKeys.values()) {
if (key.defaultValue != NO_DEFAULT_VALUE)
defaultValues.put(key.name, key.defaultValue);
}
return defaultValues;
}
|
Returns unmodifiable set of properties names defined in this {@linkplain ConfigDef}
@return new unmodifiable {@link Set} instance containing the keys
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 120
|
[] | true
| 2
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
compareZeroThreshold
|
public int compareZeroThreshold(ZeroBucket other) {
return compareExponentiallyScaledValues(index(), scale(), other.index(), other.scale());
}
|
Compares the zero threshold of this bucket with another one.
@param other The other zero bucket to compare against.
@return A negative integer, zero, or a positive integer if this bucket's threshold is less than,
equal to, or greater than the other's.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ZeroBucket.java
| 234
|
[
"other"
] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
get_previous_dagrun
|
def get_previous_dagrun(
dag_run: DagRun, state: DagRunState | None = None, session: Session = NEW_SESSION
) -> DagRun | None:
"""
Return the previous DagRun, if there is one.
:param dag_run: the dag run
:param session: SQLAlchemy ORM Session
:param state: the dag run state
"""
if not dag_run or dag_run.logical_date is None:
return None
filters = [
DagRun.dag_id == dag_run.dag_id,
DagRun.logical_date < dag_run.logical_date,
]
if state is not None:
filters.append(DagRun.state == state)
return session.scalar(select(DagRun).where(*filters).order_by(DagRun.logical_date.desc()).limit(1))
|
Return the previous DagRun, if there is one.
:param dag_run: the dag run
:param session: SQLAlchemy ORM Session
:param state: the dag run state
|
python
|
airflow-core/src/airflow/models/dagrun.py
| 945
|
[
"dag_run",
"state",
"session"
] |
DagRun | None
| true
| 4
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
calculateValueDelta
|
function calculateValueDelta(srcValue: number, targetValue: number, changeRate: number): number {
const valueSpan = targetValue - srcValue;
return valueSpan * changeRate;
}
|
Calculate the next `CssPropertyValue` based on the source and a target one.
@param srcValue The source value
@param targetValue The target values (it's either the final or the initial value)
@param changeRate The change rate relative to the target (i.e. 1 = target value; 0 = source value)
@returns The newly generated value
|
typescript
|
adev/src/app/features/home/animation/calculations/calc-css-value.ts
| 119
|
[
"srcValue",
"targetValue",
"changeRate"
] | true
| 1
| 6.64
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
describe_tuning_job
|
def describe_tuning_job(self, name: str) -> dict:
"""
Get the tuning job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_hyper_parameter_tuning_job`
:param name: the name of the tuning job
:return: A dict contains all the tuning job info
"""
return self.get_conn().describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
|
Get the tuning job info associated with the name.
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.describe_hyper_parameter_tuning_job`
:param name: the name of the tuning job
:return: A dict contains all the tuning job info
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 643
|
[
"self",
"name"
] |
dict
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
set_context
|
def set_context(self, ti) -> None:
"""
Accept the run-time context (i.e. the current task) and configure the formatter accordingly.
:param ti:
:return:
"""
# Returns if there is no formatter or if the prefix has already been set
if ti.raw or self.formatter is None or self.prefix_jinja_template is not None:
return
prefix = conf.get("logging", "task_log_prefix_template")
if prefix:
_, self.prefix_jinja_template = parse_template_string(prefix)
rendered_prefix = self._render_prefix(ti)
else:
rendered_prefix = ""
formatter = logging.Formatter(f"{rendered_prefix}:{self.formatter._fmt}")
self.setFormatter(formatter)
self.setLevel(self.level)
|
Accept the run-time context (i.e. the current task) and configure the formatter accordingly.
:param ti:
:return:
|
python
|
airflow-core/src/airflow/utils/log/task_handler_with_custom_formatter.py
| 41
|
[
"self",
"ti"
] |
None
| true
| 6
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
or
|
default FailablePredicate<T, E> or(final FailablePredicate<? super T, E> other) {
Objects.requireNonNull(other);
return t -> test(t) || other.test(t);
}
|
Returns a composed {@link FailablePredicate} like {@link Predicate#and(Predicate)}.
@param other a predicate that will be logically-ORed with this predicate.
@return a composed {@link FailablePredicate} like {@link Predicate#and(Predicate)}.
@throws NullPointerException if other is null
|
java
|
src/main/java/org/apache/commons/lang3/function/FailablePredicate.java
| 93
|
[
"other"
] | true
| 2
| 7.36
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
is_trivial_mask_graph
|
def is_trivial_mask_graph(graph_module: GraphModule) -> bool:
"""Mask graph is trivial when it only gates via the default full op."""
graph = graph_module.graph
nodes = list(graph.nodes)
placeholders = [n for n in nodes if n.op == "placeholder"]
output = [n for n in nodes if n.op == "output"]
assert len(output) == 1, "Got graph w/ multiple outputs"
output_val = output[0].args[0]
# mask mod graph is empty if we have 4 inputs and full_default output
return len(placeholders) == 4 and output_val.target is torch.ops.aten.full.default
|
Mask graph is trivial when it only gates via the default full op.
|
python
|
torch/_inductor/kernel/flex/flex_flash_attention.py
| 158
|
[
"graph_module"
] |
bool
| true
| 2
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
aot_compile
|
def aot_compile(
f: Callable,
args: tuple[Any, ...],
kwargs: Optional[dict[str, Any]] = None,
*,
dynamic_shapes: Optional[dict[str, Any]] = None,
options: Optional[dict[str, Any]] = None,
remove_runtime_assertions: bool = False,
disable_constraint_solver: bool = False,
same_signature: bool = True,
) -> Union[list[Any], str]:
"""
Note: this function is not stable yet
Traces either an nn.Module's forward function or just a callable with PyTorch
operations inside, generates executable cpp code from the program, and returns
the path to the generated shared library
Args:
f: the `nn.Module` or callable to trace.
args: example positional inputs.
kwargs: optional example keyword inputs.
dynamic_shapes: Should either be:
1) a dict from argument names of ``f`` to their dynamic shape specifications,
2) a tuple that specifies dynamic shape specifications for each input in original order.
If you are specifying dynamism on keyword args, you will need to pass them in the order that
is defined in the original function signature.
The dynamic shape of a tensor argument can be specified as either
(1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
not required to include static dimension indices in this dict, but when they are,
they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
are denoted by None. Arguments that are dicts or tuples / lists of tensors are
recursively specified by using mappings or sequences of contained specifications.
options: A dictionary of options to control inductor
disable_constraint_solver: Whether the dim constraint solver must be disabled.
Returns:
Path to the generated shared library
"""
from torch.export._trace import _export_to_torch_ir
from torch._inductor.decomposition import select_decomp_table
from torch._inductor import config as inductor_config
aot_compile_warning()
if inductor_config.is_predispatch:
gm = torch.export._trace._export(f, args, kwargs, dynamic_shapes, pre_dispatch=True).module()
else:
# We want to export to Torch IR here to utilize the pre_grad passes in
# inductor, which run on Torch IR.
with torch._export.config.patch(use_new_tracer_experimental=True):
gm = _export_to_torch_ir(
f,
args,
kwargs,
dynamic_shapes,
disable_constraint_solver=disable_constraint_solver,
same_signature=same_signature,
# Disabling this flag, because instead we can rely on the mapping
# dynamo_flat_name_to_original_fqn which is coming from Dynamo.
restore_fqn=False,
)
with torch.no_grad():
so_path = torch._inductor.aot_compile(gm, args, kwargs, options=options) # type: ignore[arg-type]
assert isinstance(so_path, (str, list))
return so_path
|
Note: this function is not stable yet
Traces either an nn.Module's forward function or just a callable with PyTorch
operations inside, generates executable cpp code from the program, and returns
the path to the generated shared library
Args:
f: the `nn.Module` or callable to trace.
args: example positional inputs.
kwargs: optional example keyword inputs.
dynamic_shapes: Should either be:
1) a dict from argument names of ``f`` to their dynamic shape specifications,
2) a tuple that specifies dynamic shape specifications for each input in original order.
If you are specifying dynamism on keyword args, you will need to pass them in the order that
is defined in the original function signature.
The dynamic shape of a tensor argument can be specified as either
(1) a dict from dynamic dimension indices to :func:`Dim` types, where it is
not required to include static dimension indices in this dict, but when they are,
they should be mapped to None; or (2) a tuple / list of :func:`Dim` types or None,
where the :func:`Dim` types correspond to dynamic dimensions, and static dimensions
are denoted by None. Arguments that are dicts or tuples / lists of tensors are
recursively specified by using mappings or sequences of contained specifications.
options: A dictionary of options to control inductor
disable_constraint_solver: Whether the dim constraint solver must be disabled.
Returns:
Path to the generated shared library
|
python
|
torch/_export/__init__.py
| 80
|
[
"f",
"args",
"kwargs",
"dynamic_shapes",
"options",
"remove_runtime_assertions",
"disable_constraint_solver",
"same_signature"
] |
Union[list[Any], str]
| true
| 3
| 8
|
pytorch/pytorch
| 96,034
|
google
| false
|
response
|
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
"""Serialize the given arguments as JSON, and return a
:class:`~flask.Response` object with it. The response mimetype
will be "application/json" and can be changed with
:attr:`mimetype`.
If :attr:`compact` is ``False`` or debug mode is enabled, the
output will be formatted to be easier to read.
Either positional or keyword arguments can be given, not both.
If no arguments are given, ``None`` is serialized.
:param args: A single value to serialize, or multiple values to
treat as a list to serialize.
:param kwargs: Treat as a dict to serialize.
"""
obj = self._prepare_response_obj(args, kwargs)
dump_args: dict[str, t.Any] = {}
if (self.compact is None and self._app.debug) or self.compact is False:
dump_args.setdefault("indent", 2)
else:
dump_args.setdefault("separators", (",", ":"))
return self._app.response_class(
f"{self.dumps(obj, **dump_args)}\n", mimetype=self.mimetype
)
|
Serialize the given arguments as JSON, and return a
:class:`~flask.Response` object with it. The response mimetype
will be "application/json" and can be changed with
:attr:`mimetype`.
If :attr:`compact` is ``False`` or debug mode is enabled, the
output will be formatted to be easier to read.
Either positional or keyword arguments can be given, not both.
If no arguments are given, ``None`` is serialized.
:param args: A single value to serialize, or multiple values to
treat as a list to serialize.
:param kwargs: Treat as a dict to serialize.
|
python
|
src/flask/json/provider.py
| 189
|
[
"self"
] |
Response
| true
| 5
| 6.72
|
pallets/flask
| 70,946
|
sphinx
| false
|
function
|
static <T, R, E extends Throwable> FailableFunction<T, R, E> function(final FailableFunction<T, R, E> function) {
return function;
}
|
Starts a fluent chain like {@code function(foo::bar).andThen(...).andThen(...).apply(...);}
@param <T> Input type.
@param <R> Return type.
@param <E> The type of thrown exception or error.
@param function the argument to return.
@return the argument
@since 3.14.0
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableFunction.java
| 48
|
[
"function"
] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof NameMatchMethodPointcut that &&
this.mappedNamePatterns.equals(that.mappedNamePatterns)));
}
|
Determine if the given method name matches the mapped name pattern.
<p>The default implementation checks for {@code xxx*}, {@code *xxx},
{@code *xxx*}, and {@code xxx*yyy} matches, as well as direct equality.
<p>Can be overridden in subclasses.
@param methodName the method name to check
@param mappedNamePattern the method name pattern
@return {@code true} if the method name matches the pattern
@see PatternMatchUtils#simpleMatch(String, String)
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/NameMatchMethodPointcut.java
| 113
|
[
"other"
] | true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
checkQualifiers
|
protected @Nullable Boolean checkQualifiers(BeanDefinitionHolder bdHolder, Annotation[] annotationsToSearch) {
boolean qualifierFound = false;
if (!ObjectUtils.isEmpty(annotationsToSearch)) {
SimpleTypeConverter typeConverter = new SimpleTypeConverter();
for (Annotation annotation : annotationsToSearch) {
Class<? extends Annotation> type = annotation.annotationType();
if (isPlainJavaAnnotation(type)) {
continue;
}
boolean checkMeta = true;
boolean fallbackToMeta = false;
if (isQualifier(type)) {
qualifierFound = true;
if (!checkQualifier(bdHolder, annotation, typeConverter)) {
fallbackToMeta = true;
}
else {
checkMeta = false;
}
}
if (checkMeta) {
boolean foundMeta = false;
for (Annotation metaAnn : type.getAnnotations()) {
Class<? extends Annotation> metaType = metaAnn.annotationType();
if (isPlainJavaAnnotation(metaType)) {
continue;
}
if (isQualifier(metaType)) {
qualifierFound = true;
foundMeta = true;
// Only accept fallback match if @Qualifier annotation has a value...
// Otherwise, it is just a marker for a custom qualifier annotation.
if ((fallbackToMeta && ObjectUtils.isEmpty(AnnotationUtils.getValue(metaAnn))) ||
!checkQualifier(bdHolder, metaAnn, typeConverter)) {
return false;
}
}
}
if (fallbackToMeta && !foundMeta) {
return false;
}
}
}
}
return (qualifierFound ? true : null);
}
|
Match the given qualifier annotations against the candidate bean definition.
@return {@code false} if a qualifier has been found but not matched,
{@code true} if a qualifier has been found and matched,
{@code null} if no qualifier has been found at all
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/QualifierAnnotationAutowireCandidateResolver.java
| 175
|
[
"bdHolder",
"annotationsToSearch"
] |
Boolean
| true
| 14
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_getitem_tuple_same_dim
|
def _getitem_tuple_same_dim(self, tup: tuple):
"""
Index with indexers that should return an object of the same dimension
as self.obj.
This is only called after a failed call to _getitem_lowerdim.
"""
retval = self.obj
# Selecting columns before rows is significantly faster
start_val = (self.ndim - len(tup)) + 1
for i, key in enumerate(reversed(tup)):
i = self.ndim - i - start_val
if com.is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
# We should never have retval.ndim < self.ndim, as that should
# be handled by the _getitem_lowerdim call above.
assert retval.ndim == self.ndim
if retval is self.obj:
# if all axes were a null slice (`df.loc[:, :]`), ensure we still
# return a new object (https://github.com/pandas-dev/pandas/pull/49469)
retval = retval.copy(deep=False)
return retval
|
Index with indexers that should return an object of the same dimension
as self.obj.
This is only called after a failed call to _getitem_lowerdim.
|
python
|
pandas/core/indexing.py
| 1,031
|
[
"self",
"tup"
] | true
| 4
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
appendSeparator
|
public StrBuilder appendSeparator(final char standard, final char defaultIfEmpty) {
if (isNotEmpty()) {
append(standard);
} else {
append(defaultIfEmpty);
}
return this;
}
|
Append one of both separators to the builder
If the builder is currently empty it will append the defaultIfEmpty-separator
Otherwise it will append the standard-separator
The separator is appended using {@link #append(char)}.
@param standard the separator if builder is not empty
@param defaultIfEmpty the separator if builder is empty
@return {@code this} instance.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,237
|
[
"standard",
"defaultIfEmpty"
] |
StrBuilder
| true
| 2
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
availablePartitionsForTopic
|
public List<PartitionInfo> availablePartitionsForTopic(String topic) {
return availablePartitionsByTopic.getOrDefault(topic, Collections.emptyList());
}
|
Get the list of available partitions for this topic
@param topic The topic name
@return A list of partitions
|
java
|
clients/src/main/java/org/apache/kafka/common/Cluster.java
| 313
|
[
"topic"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
apply_patch
|
def apply_patch(patch_file: str, target_dir: str | None, strip_count: int) -> None:
"""
Applies the downloaded patch to the specified directory using the given strip count.
Args:
patch_file (str): The path to the patch file.
target_dir (Optional[str]): The directory to apply the patch to. If None, uses PyTorch installation path.
strip_count (int): The number of leading directories to strip from file paths in the patch.
Exits:
If the patch command fails or the 'patch' utility is not available, the script will exit.
"""
if target_dir:
print(f"Applying patch in directory: {target_dir}")
else:
print("No target directory specified. Using PyTorch installation path.")
print(f"Applying patch with strip count: {strip_count}")
try:
# Construct the patch command with -d and -p options
patch_command = ["patch", f"-p{strip_count}", "-i", patch_file]
if target_dir:
patch_command.insert(
1, f"-d{target_dir}"
) # Insert -d option right after 'patch'
print(f"Running command: {' '.join(patch_command)}")
result = subprocess.run(patch_command, capture_output=True, text=True)
else:
patch_command.insert(1, f"-d{target_dir}")
print(f"Running command: {' '.join(patch_command)}")
result = subprocess.run(patch_command, capture_output=True, text=True)
# Check if the patch was applied successfully
if result.returncode != 0:
print("Failed to apply patch.")
print("Patch output:")
print(result.stdout)
print(result.stderr)
sys.exit(1)
else:
print("Patch applied successfully.")
except FileNotFoundError:
print("Error: The 'patch' utility is not installed or not found in PATH.")
sys.exit(1)
except Exception as e:
print(f"An error occurred while applying the patch: {e}")
sys.exit(1)
|
Applies the downloaded patch to the specified directory using the given strip count.
Args:
patch_file (str): The path to the patch file.
target_dir (Optional[str]): The directory to apply the patch to. If None, uses PyTorch installation path.
strip_count (int): The number of leading directories to strip from file paths in the patch.
Exits:
If the patch command fails or the 'patch' utility is not available, the script will exit.
|
python
|
tools/nightly_hotpatch.py
| 136
|
[
"patch_file",
"target_dir",
"strip_count"
] |
None
| true
| 7
| 7.04
|
pytorch/pytorch
| 96,034
|
google
| false
|
commaMatcher
|
public static StrMatcher commaMatcher() {
return COMMA_MATCHER;
}
|
Gets the matcher for the comma character.
@return the matcher for a comma.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrMatcher.java
| 286
|
[] |
StrMatcher
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
matchFieldNamesWithDots
|
private boolean matchFieldNamesWithDots(String name, int dotIndex, List<FilterPath> nextFilters) {
String prefixName = name.substring(0, dotIndex);
String suffixName = name.substring(dotIndex + 1);
List<FilterPath> prefixFilterPath = new ArrayList<>();
boolean prefixMatch = matches(prefixName, prefixFilterPath, true);
// if prefixMatch return true(because prefix is a final FilterPath node)
if (prefixMatch) {
return true;
}
// if has prefixNextFilter, use them to match suffix
for (FilterPath filter : prefixFilterPath) {
boolean matches = filter.matches(suffixName, nextFilters, true);
if (matches) {
return true;
}
}
return false;
}
|
check if the name matches filter nodes
if the name equals the filter node name, the node will add to nextFilters.
if the filter node is a final node, it means the name matches the pattern, and return true
if the name don't equal a final node, then return false, continue to check the inner filter node
if current node is a double wildcard node, the node will also add to nextFilters.
@param name the xcontent property name
@param nextFilters nextFilters is a List, used to check the inner property of name
@param matchFieldNamesWithDots support dot in field name or not
@return true if the name equal a final node, otherwise return false
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/support/filtering/FilterPath.java
| 118
|
[
"name",
"dotIndex",
"nextFilters"
] | true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
getHandlerMappings
|
private Map<String, Object> getHandlerMappings() {
Map<String, Object> handlerMappings = this.handlerMappings;
if (handlerMappings == null) {
synchronized (this) {
handlerMappings = this.handlerMappings;
if (handlerMappings == null) {
if (logger.isTraceEnabled()) {
logger.trace("Loading NamespaceHandler mappings from [" + this.handlerMappingsLocation + "]");
}
try {
Properties mappings =
PropertiesLoaderUtils.loadAllProperties(this.handlerMappingsLocation, this.classLoader);
if (logger.isTraceEnabled()) {
logger.trace("Loaded NamespaceHandler mappings: " + mappings);
}
handlerMappings = new ConcurrentHashMap<>(mappings.size());
CollectionUtils.mergePropertiesIntoMap(mappings, handlerMappings);
this.handlerMappings = handlerMappings;
}
catch (IOException ex) {
throw new IllegalStateException(
"Unable to load NamespaceHandler mappings from location [" + this.handlerMappingsLocation + "]", ex);
}
}
}
}
return handlerMappings;
}
|
Load the specified NamespaceHandler mappings lazily.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/DefaultNamespaceHandlerResolver.java
| 150
|
[] | true
| 6
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
notifyStarted
|
protected final void notifyStarted() {
monitor.enter();
try {
// We have to examine the internal state of the snapshot here to properly handle the stop
// while starting case.
if (snapshot.state != STARTING) {
IllegalStateException failure =
new IllegalStateException(
"Cannot notifyStarted() when the service is " + snapshot.state);
notifyFailed(failure);
throw failure;
}
if (snapshot.shutdownWhenStartupFinishes) {
snapshot = new StateSnapshot(STOPPING);
// We don't call listeners here because we already did that when we set the
// shutdownWhenStartupFinishes flag.
doStop();
} else {
snapshot = new StateSnapshot(RUNNING);
enqueueRunningEvent();
}
} finally {
monitor.leave();
dispatchListenerEvents();
}
}
|
Implementing classes should invoke this method once their service has started. It will cause
the service to transition from {@link State#STARTING} to {@link State#RUNNING}.
@throws IllegalStateException if the service is not {@link State#STARTING}.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractService.java
| 383
|
[] |
void
| true
| 3
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
invokeInitMethods
|
protected void invokeInitMethods(String beanName, Object bean, @Nullable RootBeanDefinition mbd)
throws Throwable {
boolean isInitializingBean = (bean instanceof InitializingBean);
if (isInitializingBean && (mbd == null || !mbd.hasAnyExternallyManagedInitMethod("afterPropertiesSet"))) {
if (logger.isTraceEnabled()) {
logger.trace("Invoking afterPropertiesSet() on bean with name '" + beanName + "'");
}
((InitializingBean) bean).afterPropertiesSet();
}
if (mbd != null && bean.getClass() != NullBean.class) {
String[] initMethodNames = mbd.getInitMethodNames();
if (initMethodNames != null) {
for (String initMethodName : initMethodNames) {
if (StringUtils.hasLength(initMethodName) &&
!(isInitializingBean && "afterPropertiesSet".equals(initMethodName)) &&
!mbd.hasAnyExternallyManagedInitMethod(initMethodName)) {
invokeCustomInitMethod(beanName, bean, mbd, initMethodName);
}
}
}
}
}
|
Give a bean a chance to initialize itself after all its properties are set,
and a chance to know about its owning bean factory (this object).
<p>This means checking whether the bean implements {@link InitializingBean}
or defines any custom init methods, and invoking the necessary callback(s)
if it does.
@param beanName the bean name in the factory (for debugging purposes)
@param bean the new bean instance we may need to initialize
@param mbd the merged bean definition that the bean was created with
(can also be {@code null}, if given an existing bean instance)
@throws Throwable if thrown by init methods or by the invocation process
@see #invokeCustomInitMethod
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,856
|
[
"beanName",
"bean",
"mbd"
] |
void
| true
| 12
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
max
|
public static byte max(final byte... array) {
// Validates input
validateArray(array);
// Finds and returns max
byte max = array[0];
for (int i = 1; i < array.length; i++) {
if (array[i] > max) {
max = array[i];
}
}
return max;
}
|
Returns the maximum value in an array.
@param array an array, must not be null or empty.
@return the maximum value in the array.
@throws NullPointerException if {@code array} is {@code null}.
@throws IllegalArgumentException if {@code array} is empty.
@since 3.4 Changed signature from max(byte[]) to max(byte...).
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 822
|
[] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
lazyTransform
|
@J2ktIncompatible
@GwtIncompatible // TODO
public static <I extends @Nullable Object, O extends @Nullable Object> Future<O> lazyTransform(
Future<I> input, Function<? super I, ? extends O> function) {
checkNotNull(input);
checkNotNull(function);
return new Future<O>() {
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return input.cancel(mayInterruptIfRunning);
}
@Override
public boolean isCancelled() {
return input.isCancelled();
}
@Override
public boolean isDone() {
return input.isDone();
}
@Override
public O get() throws InterruptedException, ExecutionException {
return applyTransformation(input.get());
}
@Override
public O get(long timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return applyTransformation(input.get(timeout, unit));
}
private O applyTransformation(I input) throws ExecutionException {
try {
return function.apply(input);
} catch (Throwable t) {
// Any Exception is either a RuntimeException or sneaky checked exception.
throw new ExecutionException(t);
}
}
};
}
|
Like {@link #transform(ListenableFuture, Function, Executor)} except that the transformation
{@code function} is invoked on each call to {@link Future#get() get()} on the returned future.
<p>The returned {@code Future} reflects the input's cancellation state directly, and any
attempt to cancel the returned Future is likewise passed through to the input Future.
<p>Note that calls to {@linkplain Future#get(long, TimeUnit) timed get} only apply the timeout
to the execution of the underlying {@code Future}, <em>not</em> to the execution of the
transformation function.
<p>The primary audience of this method is callers of {@code transform} who don't have a {@code
ListenableFuture} available and do not mind repeated, lazy function evaluation.
@param input The future to transform
@param function A Function to transform the results of the provided future to the results of
the returned future.
@return A future that returns the result of the transformation.
@since 10.0
|
java
|
android/guava/src/com/google/common/util/concurrent/Futures.java
| 514
|
[
"input",
"function"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
buildSafePointcut
|
public final Pointcut buildSafePointcut() {
Pointcut pc = getPointcut();
MethodMatcher safeMethodMatcher = MethodMatchers.intersection(
new AdviceExcludingMethodMatcher(this.aspectJAdviceMethod), pc.getMethodMatcher());
return new ComposablePointcut(pc.getClassFilter(), safeMethodMatcher);
}
|
Build a 'safe' pointcut that excludes the AspectJ advice method itself.
@return a composable pointcut that builds on the original AspectJ expression pointcut
@see #getPointcut()
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AbstractAspectJAdvice.java
| 194
|
[] |
Pointcut
| true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
transitionToSendingLeaveGroup
|
private void transitionToSendingLeaveGroup(boolean dueToExpiredPollTimer) {
if (state == MemberState.FATAL) {
log.warn("Member {} with epoch {} won't send leave group request because it is in " +
"FATAL state", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED) {
log.warn("Member {} won't send leave group request because it is already out of the group.",
memberId);
return;
}
if (dueToExpiredPollTimer) {
isPollTimerExpired = true;
// Briefly transition through prepare leaving. The member does not have to release
// any assignment before sending the leave group given that is stale. It will invoke
// onAllTasksLost after sending the leave group on the STALE state.
transitionTo(MemberState.PREPARE_LEAVING);
}
finalizeLeaving();
transitionTo(MemberState.LEAVING);
}
|
Reset member epoch to the value required for the leave the group heartbeat request, and
transition to the {@link MemberState#LEAVING} state so that a heartbeat request is sent
out with it.
@param dueToExpiredPollTimer True if the leave group is due to an expired poll timer. This
will indicate that the member must remain STALE after leaving,
until it releases its assignment and the timer is reset.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 428
|
[
"dueToExpiredPollTimer"
] |
void
| true
| 4
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
replaceFirst
|
public StrBuilder replaceFirst(final char search, final char replace) {
if (search != replace) {
for (int i = 0; i < size; i++) {
if (buffer[i] == search) {
buffer[i] = replace;
break;
}
}
}
return this;
}
|
Replaces the first instance of the search character with the
replace character in the builder.
@param search the search character
@param replace the replace character
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,627
|
[
"search",
"replace"
] |
StrBuilder
| true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
are_dependents_done
|
def are_dependents_done(self, session: Session = NEW_SESSION) -> bool:
"""
Check whether the immediate dependents of this task instance have succeeded or have been skipped.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
:param session: SQLAlchemy ORM Session
"""
task = self.task
if TYPE_CHECKING:
assert task
if not task.downstream_task_ids:
return True
ti = select(func.count(TaskInstance.task_id)).where(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.run_id == self.run_id,
TaskInstance.state.in_((TaskInstanceState.SKIPPED, TaskInstanceState.SUCCESS)),
)
count = session.scalar(ti)
return count == len(task.downstream_task_ids)
|
Check whether the immediate dependents of this task instance have succeeded or have been skipped.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
:param session: SQLAlchemy ORM Session
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 797
|
[
"self",
"session"
] |
bool
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
sourceOutput
|
public Builder sourceOutput(Path sourceOutput) {
this.sourceOutput = sourceOutput;
return this;
}
|
Set the output directory for generated sources.
@param sourceOutput the location of generated sources
@return this builder for method chaining
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AbstractAotProcessor.java
| 221
|
[
"sourceOutput"
] |
Builder
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
now
|
function now(): number {
if (supportsPerformanceNow) {
return performance.now();
}
return Date.now();
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow strict-local
|
javascript
|
packages/react-devtools-shared/src/PerformanceLoggingUtils.js
| 38
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
transitionTo
|
private void transitionTo(State target, RuntimeException error) {
if (!currentState.isTransitionValid(currentState, target)) {
String idString = transactionalId == null ? "" : "TransactionalId " + transactionalId + ": ";
String message = idString + "Invalid transition attempted from state "
+ currentState.name() + " to state " + target.name();
if (shouldPoisonStateOnInvalidTransition()) {
currentState = State.FATAL_ERROR;
lastError = new IllegalStateException(message);
throw lastError;
} else {
throw new IllegalStateException(message);
}
} else if (target == State.FATAL_ERROR || target == State.ABORTABLE_ERROR) {
if (error == null)
throw new IllegalArgumentException("Cannot transition to " + target + " with a null exception");
lastError = error;
} else {
lastError = null;
}
if (lastError != null)
log.debug("Transition from state {} to error state {}", currentState, target, lastError);
else
log.debug("Transition from state {} to {}", currentState, target);
currentState = target;
}
|
Check if the transaction is in the prepared state.
@return true if the current state is PREPARED_TRANSACTION
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,120
|
[
"target",
"error"
] |
void
| true
| 8
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
cancel
|
@CanIgnoreReturnValue
@SuppressWarnings("Interruption") // We are propagating an interrupt from a caller.
public boolean cancel(boolean mayInterruptIfRunning) {
logger.get().log(FINER, "cancelling {0}", this);
boolean cancelled = future.cancel(mayInterruptIfRunning);
if (cancelled) {
close();
}
return cancelled;
}
|
Attempts to cancel execution of this step. This attempt will fail if the step has already
completed, has already been cancelled, or could not be cancelled for some other reason. If
successful, and this step has not started when {@code cancel} is called, this step should never
run.
<p>If successful, causes the objects captured by this step (if already started) and its input
step(s) for later closing to be closed on their respective {@link Executor}s. If any such calls
specified {@link MoreExecutors#directExecutor()}, those objects will be closed synchronously.
@param mayInterruptIfRunning {@code true} if the thread executing this task should be
interrupted; otherwise, in-progress tasks are allowed to complete, but the step will be
cancelled regardless
@return {@code false} if the step could not be cancelled, typically because it has already
completed normally; {@code true} otherwise
|
java
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
| 1,086
|
[
"mayInterruptIfRunning"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
dtypes
|
def dtypes(self) -> DtypeObj:
"""
Return the dtype object of the underlying data.
See Also
--------
DataFrame.dtypes : Return the dtypes in the DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.dtypes
dtype('int64')
"""
# DataFrame compatibility
return self.dtype
|
Return the dtype object of the underlying data.
See Also
--------
DataFrame.dtypes : Return the dtypes in the DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.dtypes
dtype('int64')
|
python
|
pandas/core/series.py
| 677
|
[
"self"
] |
DtypeObj
| true
| 1
| 6.24
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
next
|
public String next(final int count, final boolean letters, final boolean numbers) {
return next(count, 0, 0, letters, numbers);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
</p>
@param count the length of random string to create.
@param letters if {@code true}, generated string may include alphabetic characters.
@param numbers if {@code true}, generated string may include numeric characters.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 709
|
[
"count",
"letters",
"numbers"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
is_due
|
def is_due(self, last_run_at: datetime) -> tuple[bool, datetime]:
"""Return tuple of ``(is_due, next_time_to_check)``.
Notes:
- next time to check is in seconds.
- ``(True, 20)``, means the task should be run now, and the next
time to check is in 20 seconds.
- ``(False, 12.3)``, means the task is not due, but that the
scheduler should check again in 12.3 seconds.
The next time to check is used to save energy/CPU cycles,
it does not need to be accurate but will influence the precision
of your schedule. You must also keep in mind
the value of :setting:`beat_max_loop_interval`,
that decides the maximum number of seconds the scheduler can
sleep between re-checking the periodic task intervals. So if you
have a task that changes schedule at run-time then your next_run_at
check will decide how long it will take before a change to the
schedule takes effect. The max loop interval takes precedence
over the next check at value returned.
.. admonition:: Scheduler max interval variance
The default max loop interval may vary for different schedulers.
For the default scheduler the value is 5 minutes, but for example
the :pypi:`django-celery-beat` database scheduler the value
is 5 seconds.
"""
last_run_at = self.maybe_make_aware(last_run_at)
rem_delta = self.remaining_estimate(last_run_at)
remaining_s = max(rem_delta.total_seconds(), 0)
if remaining_s == 0:
return schedstate(is_due=True, next=self.seconds)
return schedstate(is_due=False, next=remaining_s)
|
Return tuple of ``(is_due, next_time_to_check)``.
Notes:
- next time to check is in seconds.
- ``(True, 20)``, means the task should be run now, and the next
time to check is in 20 seconds.
- ``(False, 12.3)``, means the task is not due, but that the
scheduler should check again in 12.3 seconds.
The next time to check is used to save energy/CPU cycles,
it does not need to be accurate but will influence the precision
of your schedule. You must also keep in mind
the value of :setting:`beat_max_loop_interval`,
that decides the maximum number of seconds the scheduler can
sleep between re-checking the periodic task intervals. So if you
have a task that changes schedule at run-time then your next_run_at
check will decide how long it will take before a change to the
schedule takes effect. The max loop interval takes precedence
over the next check at value returned.
.. admonition:: Scheduler max interval variance
The default max loop interval may vary for different schedulers.
For the default scheduler the value is 5 minutes, but for example
the :pypi:`django-celery-beat` database scheduler the value
is 5 seconds.
|
python
|
celery/schedules.py
| 138
|
[
"self",
"last_run_at"
] |
tuple[bool, datetime]
| true
| 2
| 6.72
|
celery/celery
| 27,741
|
unknown
| false
|
english_capitalize
|
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy._core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
|
Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy._core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
|
python
|
numpy/_core/_string_helpers.py
| 72
|
[
"s"
] | false
| 3
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
nested
|
@SuppressWarnings("unchecked")
void nested(T item, BiConsumer<String, Object> pairs) {
LinkedHashMap<String, Object> result = new LinkedHashMap<>();
this.addedPairs.forEach((addedPair) -> {
addedPair.accept(item, joining((name, value) -> {
List<String> nameParts = List.of(name.split("\\."));
Map<String, Object> destination = result;
for (int i = 0; i < nameParts.size() - 1; i++) {
Object existing = destination.computeIfAbsent(nameParts.get(i), (key) -> new LinkedHashMap<>());
if (!(existing instanceof Map)) {
String common = String.join(".", nameParts.subList(0, i + 1));
throw new IllegalStateException(
"Duplicate nested pairs added under '%s'".formatted(common));
}
destination = (Map<String, Object>) existing;
}
Object previous = destination.put(nameParts.get(nameParts.size() - 1), value);
Assert.state(previous == null, () -> "Duplicate nested pairs added under '%s'".formatted(name));
}));
});
result.forEach(pairs);
}
|
Add pairs using the given callback.
@param <V> the value type
@param pairs callback provided with the item and consumer that can be called to
actually add the pairs
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/ContextPairs.java
| 191
|
[
"item",
"pairs"
] |
void
| true
| 3
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
maybeTriggerWakeup
|
public void maybeTriggerWakeup() {
if (!wakeupDisabled.get() && wakeup.get()) {
log.debug("Raising WakeupException in response to user wakeup");
wakeup.set(false);
throw new WakeupException();
}
}
|
Check whether there is pending request. This includes both requests that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@return A boolean indicating whether there is pending request
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 527
|
[] |
void
| true
| 3
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
decide_loop_order_to_match
|
def decide_loop_order_to_match(self, other: "MemoryDep") -> Optional[list[int]]:
"""
Can return None if not able to decide loop orders.
"""
assert self.num_vars == other.num_vars
# ignore broadcast for now since broadcast causes extra 0 strides
# which makes it hard to decide the correct loop orders.
if self.num_vars != len(self.index.free_symbols):
return None
if other.num_vars != len(other.index.free_symbols):
return None
# bail out if any size is 0 or 1
# For size == 0, it's an empty tensor, any strides for that dimension
# are equivalent. Skip for simplicity and it may not matter that much.
#
# For size == 1, it cause cause tie for strides of different dimensions.
# Also when we first time create LoopBody in ComputedBuffer.simplify_and_reorder
# we can dependencies.index_vars_squeeze which should already sqeeuze
# the size == 1 dimensions.
if any(s == 0 or s == 1 for s in itertools.chain(self.size, other.size)):
return None
# Extract strides for both expression
self_strides = V.graph.sizevars.stride_hints(self.index, self.var_names)
other_strides = V.graph.sizevars.stride_hints(other.index, other.var_names)
# Even if the shape contains no 0/1, some complex index expression may
# still have duplicate stride values. Here is an example:
# https://gist.github.com/shunting314/511a7e1ec88aa2e1a8ec85d8445ab129
# We don't reorder the loop for these cases for now, but in theory
# we could improve the algorithm to detect the correct loop orders.
if len(OrderedSet(self_strides)) != len(self_strides) or len(
OrderedSet(other_strides)
) != len(other_strides):
log.debug(
"unable to decide loop order. self_dep=%s v.s. other_dep=%s, self_strides=%s v.s. other_strides=%s",
self,
other,
self_strides,
other_strides,
)
return None
# May happen if self and other are as follows
# MemoryDep('addmm_6', 393216*d0 + 768*d1 + d2, {d0: 16, d1: 512, d2: 768}, None)
# MemoryDep('addmm_6', 98304*d0 + d1 + 768*d2, {d0: 64, d1: 768, d2: 128}, None)
if OrderedSet(self_strides) != OrderedSet(other_strides):
return None
stride_to_index = {s: i for i, s in enumerate(self_strides)}
order = [stride_to_index[s] for s in other_strides]
assert OrderedSet(order) == OrderedSet(range(self.num_vars))
return order
|
Can return None if not able to decide loop orders.
|
python
|
torch/_inductor/dependencies.py
| 104
|
[
"self",
"other"
] |
Optional[list[int]]
| true
| 8
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
poll_query_status
|
def poll_query_status(
self, query_execution_id: str, max_polling_attempts: int | None = None, sleep_time: int | None = None
) -> str | None:
"""
Poll the state of a submitted query until it reaches final state.
:param query_execution_id: ID of submitted athena query
:param max_polling_attempts: Number of times to poll for query state before function exits
:param sleep_time: Time (in seconds) to wait between two consecutive query status checks.
:return: One of the final states
"""
try:
wait(
waiter=self.get_waiter("query_complete"),
waiter_delay=30 if sleep_time is None else sleep_time,
waiter_max_attempts=max_polling_attempts or 120,
args={"QueryExecutionId": query_execution_id},
failure_message=f"Error while waiting for query {query_execution_id} to complete",
status_message=f"Query execution id: {query_execution_id}, "
f"Query is still in non-terminal state",
status_args=["QueryExecution.Status.State"],
)
except AirflowException as error:
# this function does not raise errors to keep previous behavior.
self.log.warning(
"AirflowException while polling query status. Query execution id: %s, Exception: %s",
query_execution_id,
error,
)
except Exception as e:
self.log.warning(
"Unexpected exception while polling query status. Query execution id: %s, Exception: %s",
query_execution_id,
e,
)
return self.check_query_status(query_execution_id)
|
Poll the state of a submitted query until it reaches final state.
:param query_execution_id: ID of submitted athena query
:param max_polling_attempts: Number of times to poll for query state before function exits
:param sleep_time: Time (in seconds) to wait between two consecutive query status checks.
:return: One of the final states
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/athena.py
| 272
|
[
"self",
"query_execution_id",
"max_polling_attempts",
"sleep_time"
] |
str | None
| true
| 3
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
assignedTopicIds
|
public synchronized Set<Uuid> assignedTopicIds() {
return assignedTopicIds;
}
|
@return Topic IDs received in an assignment that have not been reconciled yet, so we need metadata for them.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 957
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
create
|
public static ConfigurationMetadataRepositoryJsonBuilder create(InputStream... inputStreams) throws IOException {
ConfigurationMetadataRepositoryJsonBuilder builder = create();
for (InputStream inputStream : inputStreams) {
builder = builder.withJsonResource(inputStream);
}
return builder;
}
|
Create a new builder instance using {@link StandardCharsets#UTF_8} as the default
charset and the specified JSON resource.
@param inputStreams the source input streams
@return a new {@link ConfigurationMetadataRepositoryJsonBuilder} instance.
@throws IOException on error
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/ConfigurationMetadataRepositoryJsonBuilder.java
| 153
|
[] |
ConfigurationMetadataRepositoryJsonBuilder
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
OffsetAndMetadata that = (OffsetAndMetadata) o;
return offset == that.offset &&
Objects.equals(metadata, that.metadata) &&
Objects.equals(leaderEpoch(), that.leaderEpoch());
}
|
Get the leader epoch of the previously consumed record (if one is known). Log truncation is detected
if there exists a leader epoch which is larger than this epoch and begins at an offset earlier than
the committed offset.
@return the leader epoch or empty if not known
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java
| 104
|
[
"o"
] | true
| 6
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
extendWith
|
public PatternBank extendWith(Map<String, String> extraPatterns) {
if (extraPatterns == null || extraPatterns.isEmpty()) {
return this;
}
var extendedBank = new LinkedHashMap<>(bank);
extendedBank.putAll(extraPatterns);
return new PatternBank(extendedBank);
}
|
Extends a pattern bank with extra patterns, returning a new pattern bank.
<p>
The returned bank will be the same reference as the original pattern bank if the extra patterns map is null or empty.
@param extraPatterns the patterns to extend this bank with (may be empty or null)
@return the extended pattern bank
|
java
|
libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java
| 55
|
[
"extraPatterns"
] |
PatternBank
| true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
createProxyClass
|
private Class<?> createProxyClass(Class<?> beanClass, @Nullable String beanName,
Object @Nullable [] specificInterceptors, TargetSource targetSource) {
return (Class<?>) buildProxy(beanClass, beanName, specificInterceptors, targetSource, true);
}
|
Create an AOP proxy for the given bean.
@param beanClass the class of the bean
@param beanName the name of the bean
@param specificInterceptors the set of interceptors that is
specific to this bean (may be empty, but not null)
@param targetSource the TargetSource for the proxy,
already pre-configured to access the bean
@return the AOP proxy for the bean
@see #buildAdvisors
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AbstractAutoProxyCreator.java
| 434
|
[
"beanClass",
"beanName",
"specificInterceptors",
"targetSource"
] | true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
compute_list_like
|
def compute_list_like(
self,
op_name: Literal["agg", "apply"],
selected_obj: Series | DataFrame,
kwargs: dict[str, Any],
) -> tuple[list[Hashable] | Index, list[Any]]:
"""
Compute agg/apply results for like-like input.
Parameters
----------
op_name : {"agg", "apply"}
Operation being performed.
selected_obj : Series or DataFrame
Data to perform operation on.
kwargs : dict
Keyword arguments to pass to the functions.
Returns
-------
keys : list[Hashable] or Index
Index labels for result.
results : list
Data for result. When aggregating with a Series, this can contain any
Python objects.
"""
func = cast(list[AggFuncTypeBase], self.func)
obj = self.obj
results = []
keys = []
# degenerate case
if selected_obj.ndim == 1:
for a in func:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
args = (
[self.axis, *self.args]
if include_axis(op_name, colg)
else self.args
)
new_res = getattr(colg, op_name)(a, *args, **kwargs)
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
else:
indices = []
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
args = (
[self.axis, *self.args]
if include_axis(op_name, colg)
else self.args
)
new_res = getattr(colg, op_name)(func, *args, **kwargs)
results.append(new_res)
indices.append(index)
# error: Incompatible types in assignment (expression has type "Any |
# Index", variable has type "list[Any | Callable[..., Any] | str]")
keys = selected_obj.columns.take(indices) # type: ignore[assignment]
return keys, results
|
Compute agg/apply results for like-like input.
Parameters
----------
op_name : {"agg", "apply"}
Operation being performed.
selected_obj : Series or DataFrame
Data to perform operation on.
kwargs : dict
Keyword arguments to pass to the functions.
Returns
-------
keys : list[Hashable] or Index
Index labels for result.
results : list
Data for result. When aggregating with a Series, this can contain any
Python objects.
|
python
|
pandas/core/apply.py
| 416
|
[
"self",
"op_name",
"selected_obj",
"kwargs"
] |
tuple[list[Hashable] | Index, list[Any]]
| true
| 8
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
findThreads
|
@Deprecated
public static Collection<Thread> findThreads(final ThreadGroup threadGroup, final boolean recurse, final ThreadPredicate predicate) {
return findThreads(threadGroup, recurse, (Predicate<Thread>) predicate::test);
}
|
Finds all active threads which match the given predicate and which belongs to the given thread group (or one of its subgroups).
@param threadGroup the thread group.
@param recurse if {@code true} then evaluate the predicate recursively on all threads in all subgroups of the given group.
@param predicate the predicate.
@return An unmodifiable {@link Collection} of active threads which match the given predicate and which belongs to the given thread group.
@throws NullPointerException if the given group or predicate is null.
@throws SecurityException if the current thread cannot modify thread groups from this thread's thread group up to the system thread group.
@deprecated Use {@link #findThreads(ThreadGroup, boolean, Predicate)}.
|
java
|
src/main/java/org/apache/commons/lang3/ThreadUtils.java
| 365
|
[
"threadGroup",
"recurse",
"predicate"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
opj_int_add_no_overflow
|
static INLINE OPJ_INT32 opj_int_add_no_overflow(OPJ_INT32 a, OPJ_INT32 b)
{
void* pa = &a;
void* pb = &b;
OPJ_UINT32* upa = (OPJ_UINT32*)pa;
OPJ_UINT32* upb = (OPJ_UINT32*)pb;
OPJ_UINT32 ures = *upa + *upb;
void* pures = &ures;
OPJ_INT32* ipres = (OPJ_INT32*)pures;
return *ipres;
}
|
Addition two signed integers with a wrap-around behaviour.
Assumes complement-to-two signed integers.
@param a
@param b
@return Returns a + b
|
cpp
|
3rdparty/openjpeg/openjp2/opj_intmath.h
| 297
|
[
"a",
"b"
] | true
| 1
| 6.88
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
_validate_teams_exist_in_database
|
def _validate_teams_exist_in_database(cls, team_names: set[str]) -> None:
"""
Validate that all specified team names exist in the database.
:param team_names: Set of team names to validate
:raises AirflowConfigException: If any team names don't exist in the database
"""
if not team_names:
return
existing_teams = Team.get_all_team_names()
missing_teams = team_names - existing_teams
if missing_teams:
missing_teams_list = sorted(missing_teams)
missing_teams_str = ", ".join(missing_teams_list)
raise AirflowConfigException(
f"One or more teams specified in executor configuration do not exist in database: {missing_teams_str}. "
"Please create these teams first or remove them from executor configuration."
)
|
Validate that all specified team names exist in the database.
:param team_names: Set of team names to validate
:raises AirflowConfigException: If any team names don't exist in the database
|
python
|
airflow-core/src/airflow/executors/executor_loader.py
| 164
|
[
"cls",
"team_names"
] |
None
| true
| 3
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
readHugePageSizes
|
HugePageSizeVec readHugePageSizes() {
HugePageSizeVec sizeVec = readRawHugePageSizes();
if (sizeVec.empty()) {
return sizeVec; // nothing to do
}
std::sort(sizeVec.begin(), sizeVec.end());
size_t defaultHugePageSize = getDefaultHugePageSize();
struct PageSizeLess {
bool operator()(const HugePageSize& a, size_t b) const {
return a.size < b;
}
};
// Read and parse /proc/mounts
std::vector<StringPiece> parts;
std::vector<StringPiece> options;
gen::byLine("/proc/mounts") | gen::eachAs<StringPiece>() |
[&](StringPiece line) {
parts.clear();
split(' ', line, parts);
// device path fstype options uid gid
if (parts.size() != 6) {
throw std::runtime_error("Invalid /proc/mounts line");
}
if (parts[2] != "hugetlbfs") {
return; // we only care about hugetlbfs
}
options.clear();
split(',', parts[3], options);
size_t pageSize = defaultHugePageSize;
// Search for the "pagesize" option, which must have a value
for (auto& option : options) {
// key=value
auto p = static_cast<const char*>(
memchr(option.data(), '=', option.size()));
if (!p) {
continue;
}
if (StringPiece(option.data(), p) != "pagesize") {
continue;
}
pageSize = parsePageSizeValue(StringPiece(p + 1, option.end()));
break;
}
auto pos = std::lower_bound(
sizeVec.begin(), sizeVec.end(), pageSize, PageSizeLess());
if (pos == sizeVec.end() || pos->size != pageSize) {
throw std::runtime_error("Mount page size not found");
}
if (!pos->mountPoint.empty()) {
// Only one mount point per page size is allowed
return;
}
// Store mount point
fs::path path(parts[1].begin(), parts[1].end());
struct stat st;
const int ret = stat(path.string().c_str(), &st);
if (ret == -1 && errno == ENOENT) {
return;
}
checkUnixError(ret, "stat hugepage mountpoint failed");
pos->mountPoint = fs::canonical(path);
pos->device = st.st_dev;
};
return sizeVec;
}
|
Get list of supported huge page sizes and their mount points, if
hugetlbfs file systems are mounted for those sizes.
|
cpp
|
folly/io/HugePages.cpp
| 125
|
[] | true
| 11
| 7.12
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
between
|
public static JavaUnicodeEscaper between(final int codePointLow, final int codePointHigh) {
return new JavaUnicodeEscaper(codePointLow, codePointHigh, true);
}
|
Constructs a {@link JavaUnicodeEscaper} between the specified values (inclusive).
@param codePointLow
above which to escape.
@param codePointHigh
below which to escape.
@return the newly created {@link UnicodeEscaper} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/JavaUnicodeEscaper.java
| 61
|
[
"codePointLow",
"codePointHigh"
] |
JavaUnicodeEscaper
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
compressed
|
def compressed(self):
"""
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> import numpy as np
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<class 'numpy.ndarray'>
N-D arrays are compressed to 1-D.
>>> arr = [[1, 2], [3, 4]]
>>> mask = [[1, 0], [0, 1]]
>>> x = np.ma.array(arr, mask=mask)
>>> x.compressed()
array([2, 3])
"""
data = ndarray.ravel(self._data)
if self._mask is not nomask:
data = data.compress(np.logical_not(ndarray.ravel(self._mask)))
return data
|
Return all the non-masked data as a 1-D array.
Returns
-------
data : ndarray
A new `ndarray` holding the non-masked data is returned.
Notes
-----
The result is **not** a MaskedArray!
Examples
--------
>>> import numpy as np
>>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3)
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
<class 'numpy.ndarray'>
N-D arrays are compressed to 1-D.
>>> arr = [[1, 2], [3, 4]]
>>> mask = [[1, 0], [0, 1]]
>>> x = np.ma.array(arr, mask=mask)
>>> x.compressed()
array([2, 3])
|
python
|
numpy/ma/core.py
| 3,937
|
[
"self"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
parseType
|
function parseType(): TypeNode {
if (contextFlags & NodeFlags.TypeExcludesFlags) {
return doOutsideOfContext(NodeFlags.TypeExcludesFlags, parseType);
}
if (isStartOfFunctionTypeOrConstructorType()) {
return parseFunctionOrConstructorType();
}
const pos = getNodePos();
const type = parseUnionTypeOrHigher();
if (!inDisallowConditionalTypesContext() && !scanner.hasPrecedingLineBreak() && parseOptional(SyntaxKind.ExtendsKeyword)) {
// The type following 'extends' is not permitted to be another conditional type
const extendsType = disallowConditionalTypesAnd(parseType);
parseExpected(SyntaxKind.QuestionToken);
const trueType = allowConditionalTypesAnd(parseType);
parseExpected(SyntaxKind.ColonToken);
const falseType = allowConditionalTypesAnd(parseType);
return finishNode(factory.createConditionalTypeNode(type, extendsType, trueType, falseType), pos);
}
return type;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,940
|
[] | true
| 6
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isValidAnnotationMemberType
|
public static boolean isValidAnnotationMemberType(Class<?> type) {
if (type == null) {
return false;
}
if (type.isArray()) {
type = type.getComponentType();
}
return type.isPrimitive() || type.isEnum() || type.isAnnotation()
|| String.class.equals(type) || Class.class.equals(type);
}
|
Checks if the specified type is permitted as an annotation member.
<p>The Java language specification only permits certain types to be used
in annotations. These include {@link String}, {@link Class}, primitive
types, {@link Annotation}, {@link Enum}, and single-dimensional arrays of
these types.</p>
@param type the type to check, {@code null}
@return {@code true} if the type is a valid type to use in an annotation
|
java
|
src/main/java/org/apache/commons/lang3/AnnotationUtils.java
| 286
|
[
"type"
] | true
| 7
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
of
|
public static <T> Bindable<T> of(ResolvableType type) {
Assert.notNull(type, "'type' must not be null");
ResolvableType boxedType = box(type);
return new Bindable<>(type, boxedType, null, NO_ANNOTATIONS, NO_BIND_RESTRICTIONS, null);
}
|
Create a new {@link Bindable} of the specified type.
@param <T> the source type
@param type the type (must not be {@code null})
@return a {@link Bindable} instance
@see #of(Class)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 314
|
[
"type"
] | true
| 1
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
oas
|
def oas(X, *, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
Returns
-------
shrunk_cov : array-like of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
(see [1]_).
The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
the original article, formula (23) states that 2/p (p being the number of
features) is multiplied by Trace(cov*cov) in both the numerator and
denominator, but this operation is omitted because for a large p, the value
of 2/p is so small that it doesn't affect the value of the estimator.
References
----------
.. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
<0907.4698>`
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import oas
>>> rng = np.random.RandomState(0)
>>> real_cov = [[.8, .3], [.3, .4]]
>>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
>>> shrunk_cov, shrinkage = oas(X)
>>> shrunk_cov
array([[0.7533, 0.2763],
[0.2763, 0.3964]])
>>> shrinkage
np.float64(0.0195)
"""
estimator = OAS(
assume_centered=assume_centered,
).fit(X)
return estimator.covariance_, estimator.shrinkage_
|
Estimate covariance with the Oracle Approximating Shrinkage.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
Returns
-------
shrunk_cov : array-like of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
(see [1]_).
The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
the original article, formula (23) states that 2/p (p being the number of
features) is multiplied by Trace(cov*cov) in both the numerator and
denominator, but this operation is omitted because for a large p, the value
of 2/p is so small that it doesn't affect the value of the estimator.
References
----------
.. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
<0907.4698>`
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import oas
>>> rng = np.random.RandomState(0)
>>> real_cov = [[.8, .3], [.3, .4]]
>>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
>>> shrunk_cov, shrinkage = oas(X)
>>> shrunk_cov
array([[0.7533, 0.2763],
[0.2763, 0.3964]])
>>> shrinkage
np.float64(0.0195)
|
python
|
sklearn/covariance/_shrunk_covariance.py
| 621
|
[
"X",
"assume_centered"
] | false
| 1
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_promote_fields
|
def _promote_fields(dt1, dt2):
""" Perform type promotion for two structured dtypes.
Parameters
----------
dt1 : structured dtype
First dtype.
dt2 : structured dtype
Second dtype.
Returns
-------
out : dtype
The promoted dtype
Notes
-----
If one of the inputs is aligned, the result will be. The titles of
both descriptors must match (point to the same field).
"""
# Both must be structured and have the same names in the same order
if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
raise DTypePromotionError(
f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
# if both are identical, we can (maybe!) just return the same dtype.
identical = dt1 is dt2
new_fields = []
for name in dt1.names:
field1 = dt1.fields[name]
field2 = dt2.fields[name]
new_descr = promote_types(field1[0], field2[0])
identical = identical and new_descr is field1[0]
# Check that the titles match (if given):
if field1[2:] != field2[2:]:
raise DTypePromotionError(
f"field titles of field '{name}' mismatch")
if len(field1) == 2:
new_fields.append((name, new_descr))
else:
new_fields.append(((field1[2], name), new_descr))
res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
# Might as well preserve identity (and metadata) if the dtype is identical
# and the itemsize, offsets are also unmodified. This could probably be
# sped up, but also probably just be removed entirely.
if identical and res.itemsize == dt1.itemsize:
for name in dt1.names:
if dt1.fields[name][1] != res.fields[name][1]:
return res # the dtype changed.
return dt1
return res
|
Perform type promotion for two structured dtypes.
Parameters
----------
dt1 : structured dtype
First dtype.
dt2 : structured dtype
Second dtype.
Returns
-------
out : dtype
The promoted dtype
Notes
-----
If one of the inputs is aligned, the result will be. The titles of
both descriptors must match (point to the same field).
|
python
|
numpy/_core/_internal.py
| 410
|
[
"dt1",
"dt2"
] | false
| 14
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
drop_airflow_models
|
def drop_airflow_models(connection):
"""
Drop all airflow models.
:param connection: SQLAlchemy Connection
:return: None
"""
from airflow.models.base import Base
Base.metadata.drop_all(connection)
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
migration_ctx = MigrationContext.configure(connection)
version = migration_ctx._version
if inspect(connection).has_table(version.name):
version.drop(connection)
|
Drop all airflow models.
:param connection: SQLAlchemy Connection
:return: None
|
python
|
airflow-core/src/airflow/utils/db.py
| 1,285
|
[
"connection"
] | false
| 2
| 7.12
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
runWithTimeout
|
@Override
public void runWithTimeout(Runnable runnable, long timeoutDuration, TimeUnit timeoutUnit)
throws TimeoutException, InterruptedException {
checkNotNull(runnable);
checkNotNull(timeoutUnit);
checkPositiveTimeout(timeoutDuration);
Future<?> future = executor.submit(runnable);
try {
future.get(timeoutDuration, timeoutUnit);
} catch (InterruptedException | TimeoutException e) {
future.cancel(true /* mayInterruptIfRunning */);
throw e;
} catch (ExecutionException e) {
wrapAndThrowRuntimeExecutionExceptionOrError(e.getCause());
throw new AssertionError();
}
}
|
Creates a TimeLimiter instance using the given executor service to execute method calls.
<p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills
up, any time callers spend waiting for a thread may count toward their time limit, and in this
case the call may even time out before the target method is ever invoked.
@param executor the ExecutorService that will execute the method calls on the target objects;
for example, a {@link Executors#newCachedThreadPool()}.
@since 22.0
|
java
|
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
| 181
|
[
"runnable",
"timeoutDuration",
"timeoutUnit"
] |
void
| true
| 3
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
reduce
|
public Fraction reduce() {
if (numerator == 0) {
return equals(ZERO) ? this : ZERO;
}
final int gcd = greatestCommonDivisor(Math.abs(numerator), denominator);
if (gcd == 1) {
return this;
}
return getFraction(numerator / gcd, denominator / gcd);
}
|
Reduce the fraction to the smallest values for the numerator and denominator, returning the result.
<p>
For example, if this fraction represents 2/4, then the result will be 1/2.
</p>
@return a new reduced fraction instance, or this if no simplification possible
|
java
|
src/main/java/org/apache/commons/lang3/math/Fraction.java
| 848
|
[] |
Fraction
| true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
regroupPartitionMapByNode
|
<T> Map<Node, Map<TopicPartition, T>> regroupPartitionMapByNode(Map<TopicPartition, T> partitionMap) {
return partitionMap.entrySet()
.stream()
.collect(Collectors.groupingBy(entry -> metadata.fetch().leaderFor(entry.getKey()),
Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)));
}
|
Callback for the response of the list offset call.
@param listOffsetsResponse The response from the server.
@return {@link OffsetFetcherUtils.ListOffsetResult} extracted from the response, containing the fetched offsets
and partitions to retry.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java
| 168
|
[
"partitionMap"
] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
clean_docker_context_files
|
def clean_docker_context_files():
"""
Cleans up docker context files folder - leaving only .README.md there.
"""
if get_verbose() or get_dry_run():
get_console().print("[info]Cleaning docker-context-files[/]")
if get_dry_run():
return
context_files_to_delete = DOCKER_CONTEXT_PATH.rglob("*")
for file_to_delete in context_files_to_delete:
if file_to_delete.name != ".README.md":
file_to_delete.unlink(missing_ok=True)
|
Cleans up docker context files folder - leaving only .README.md there.
|
python
|
dev/breeze/src/airflow_breeze/commands/production_image_commands.py
| 808
|
[] | false
| 6
| 6.24
|
apache/airflow
| 43,597
|
unknown
| false
|
|
_is_dtype
|
def _is_dtype(arr_or_dtype, condition) -> bool:
"""
Return true if the condition is satisfied for the arr_or_dtype.
Parameters
----------
arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
The array-like or dtype object whose dtype we want to extract.
condition : callable[Union[np.dtype, ExtensionDtype]]
Returns
-------
bool
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except (TypeError, ValueError):
return False
return condition(dtype)
|
Return true if the condition is satisfied for the arr_or_dtype.
Parameters
----------
arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType
The array-like or dtype object whose dtype we want to extract.
condition : callable[Union[np.dtype, ExtensionDtype]]
Returns
-------
bool
|
python
|
pandas/core/dtypes/common.py
| 1,600
|
[
"arr_or_dtype",
"condition"
] |
bool
| true
| 2
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
visitorWorker
|
function visitorWorker(node: Node, valueIsDiscarded: boolean): VisitResult<Node> {
// This visitor does not need to descend into the tree if there is no dynamic import, destructuring assignment, or update expression
// as export/import statements are only transformed at the top level of a file.
if (!(node.transformFlags & (TransformFlags.ContainsDynamicImport | TransformFlags.ContainsDestructuringAssignment | TransformFlags.ContainsUpdateExpressionForIdentifier)) && !importsAndRequiresToRewriteOrShim?.length) {
return node;
}
switch (node.kind) {
case SyntaxKind.ForStatement:
return visitForStatement(node as ForStatement, /*isTopLevel*/ false);
case SyntaxKind.ExpressionStatement:
return visitExpressionStatement(node as ExpressionStatement);
case SyntaxKind.ParenthesizedExpression:
return visitParenthesizedExpression(node as ParenthesizedExpression, valueIsDiscarded);
case SyntaxKind.PartiallyEmittedExpression:
return visitPartiallyEmittedExpression(node as PartiallyEmittedExpression, valueIsDiscarded);
case SyntaxKind.CallExpression:
const needsRewrite = node === firstOrUndefined(importsAndRequiresToRewriteOrShim);
if (needsRewrite) {
importsAndRequiresToRewriteOrShim!.shift();
}
if (isImportCall(node) && host.shouldTransformImportCall(currentSourceFile)) {
return visitImportCallExpression(node, needsRewrite);
}
else if (needsRewrite) {
return shimOrRewriteImportOrRequireCall(node as CallExpression);
}
break;
case SyntaxKind.BinaryExpression:
if (isDestructuringAssignment(node)) {
return visitDestructuringAssignment(node, valueIsDiscarded);
}
break;
case SyntaxKind.PrefixUnaryExpression:
case SyntaxKind.PostfixUnaryExpression:
return visitPreOrPostfixUnaryExpression(node as PrefixUnaryExpression | PostfixUnaryExpression, valueIsDiscarded);
}
return visitEachChild(node, visitor, context);
}
|
Visit nested elements at the top-level of a module.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 795
|
[
"node",
"valueIsDiscarded"
] | true
| 9
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isNonPowerOf2NorNullLiteral
|
static bool isNonPowerOf2NorNullLiteral(const EnumConstantDecl *EnumConst) {
const llvm::APSInt &Val = EnumConst->getInitVal();
if (Val.isPowerOf2() || !Val.getBoolValue())
return false;
const Expr *InitExpr = EnumConst->getInitExpr();
if (!InitExpr)
return true;
return isa<IntegerLiteral>(InitExpr->IgnoreImpCasts());
}
|
Return the number of EnumConstantDecls in an EnumDecl.
|
cpp
|
clang-tools-extra/clang-tidy/bugprone/SuspiciousEnumUsageCheck.cpp
| 66
|
[] | true
| 4
| 6.56
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
yield_namespace_device_dtype_combinations
|
def yield_namespace_device_dtype_combinations(include_numpy_namespaces=True):
"""Yield supported namespace, device, dtype tuples for testing.
Use this to test that an estimator works with all combinations.
Use in conjunction with `ids=_get_namespace_device_dtype_ids` to give
clearer pytest parametrization ID names.
Parameters
----------
include_numpy_namespaces : bool, default=True
If True, also yield numpy namespaces.
Returns
-------
array_namespace : str
The name of the Array API namespace.
device : str
The name of the device on which to allocate the arrays. Can be None to
indicate that the default value should be used.
dtype_name : str
The name of the data type to use for arrays. Can be None to indicate
that the default value should be used.
"""
for array_namespace in yield_namespaces(
include_numpy_namespaces=include_numpy_namespaces
):
if array_namespace == "torch":
for device, dtype in itertools.product(
("cpu", "cuda", "xpu"), ("float64", "float32")
):
yield array_namespace, device, dtype
yield array_namespace, "mps", "float32"
elif array_namespace == "array_api_strict":
try:
import array_api_strict
yield array_namespace, array_api_strict.Device("CPU_DEVICE"), "float64"
yield array_namespace, array_api_strict.Device("device1"), "float32"
except ImportError:
# Those combinations will typically be skipped by pytest if
# array_api_strict is not installed but we still need to see them in
# the test output.
yield array_namespace, "CPU_DEVICE", "float64"
yield array_namespace, "device1", "float32"
else:
yield array_namespace, None, None
|
Yield supported namespace, device, dtype tuples for testing.
Use this to test that an estimator works with all combinations.
Use in conjunction with `ids=_get_namespace_device_dtype_ids` to give
clearer pytest parametrization ID names.
Parameters
----------
include_numpy_namespaces : bool, default=True
If True, also yield numpy namespaces.
Returns
-------
array_namespace : str
The name of the Array API namespace.
device : str
The name of the device on which to allocate the arrays. Can be None to
indicate that the default value should be used.
dtype_name : str
The name of the data type to use for arrays. Can be None to indicate
that the default value should be used.
|
python
|
sklearn/utils/_array_api.py
| 59
|
[
"include_numpy_namespaces"
] | false
| 6
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
bindOptionalChain
|
function bindOptionalChain(node: OptionalChain, trueTarget: FlowLabel, falseTarget: FlowLabel) {
// For an optional chain, we emulate the behavior of a logical expression:
//
// a?.b -> a && a.b
// a?.b.c -> a && a.b.c
// a?.b?.c -> a && a.b && a.b.c
// a?.[x = 1] -> a && a[x = 1]
//
// To do this we descend through the chain until we reach the root of a chain (the expression with a `?.`)
// and build it's CFA graph as if it were the first condition (`a && ...`). Then we bind the rest
// of the node as part of the "true" branch, and continue to do so as we ascend back up to the outermost
// chain node. We then treat the entire node as the right side of the expression.
const preChainLabel = isOptionalChainRoot(node) ? createBranchLabel() : undefined;
bindOptionalExpression(node.expression, preChainLabel || trueTarget, falseTarget);
if (preChainLabel) {
currentFlow = finishFlowLabel(preChainLabel);
}
doWithConditionalBranches(bindOptionalChainRest, node, trueTarget, falseTarget);
if (isOutermostOptionalChain(node)) {
addAntecedent(trueTarget, createFlowCondition(FlowFlags.TrueCondition, currentFlow, node));
addAntecedent(falseTarget, createFlowCondition(FlowFlags.FalseCondition, currentFlow, node));
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,170
|
[
"node",
"trueTarget",
"falseTarget"
] | false
| 5
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
std
|
def std(self, ddof: int = 1, numeric_only: bool = False, **kwargs):
"""
Calculate the rolling weighted window standard deviation.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Keyword arguments to configure the ``SciPy`` weighted window type.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.std : Aggregating std for Series.
DataFrame.std : Aggregating std for DataFrame.
Examples
--------
>>> ser = pd.Series([0, 1, 5, 2, 8])
To get an instance of :class:`~pandas.core.window.rolling.Window` we need
to pass the parameter `win_type`.
>>> type(ser.rolling(2, win_type="gaussian"))
<class 'pandas.api.typing.Window'>
In order to use the `SciPy` Gaussian window we need to provide the parameters
`M` and `std`. The parameter `M` corresponds to 2 in our example.
We pass the second parameter `std` as a parameter of the following method:
>>> ser.rolling(2, win_type="gaussian").std(std=3)
0 NaN
1 0.707107
2 2.828427
3 2.121320
4 4.242641
dtype: float64
"""
return zsqrt(
self.var(ddof=ddof, name="std", numeric_only=numeric_only, **kwargs)
)
|
Calculate the rolling weighted window standard deviation.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Keyword arguments to configure the ``SciPy`` weighted window type.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.std : Aggregating std for Series.
DataFrame.std : Aggregating std for DataFrame.
Examples
--------
>>> ser = pd.Series([0, 1, 5, 2, 8])
To get an instance of :class:`~pandas.core.window.rolling.Window` we need
to pass the parameter `win_type`.
>>> type(ser.rolling(2, win_type="gaussian"))
<class 'pandas.api.typing.Window'>
In order to use the `SciPy` Gaussian window we need to provide the parameters
`M` and `std`. The parameter `M` corresponds to 2 in our example.
We pass the second parameter `std` as a parameter of the following method:
>>> ser.rolling(2, win_type="gaussian").std(std=3)
0 NaN
1 0.707107
2 2.828427
3 2.121320
4 4.242641
dtype: float64
|
python
|
pandas/core/window/rolling.py
| 1,467
|
[
"self",
"ddof",
"numeric_only"
] | true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
difference
|
public static <K extends @Nullable Object, V extends @Nullable Object>
MapDifference<K, V> difference(
Map<? extends K, ? extends V> left,
Map<? extends K, ? extends V> right,
Equivalence<? super @NonNull V> valueEquivalence) {
Preconditions.checkNotNull(valueEquivalence);
Map<K, V> onlyOnLeft = new LinkedHashMap<>();
Map<K, V> onlyOnRight = new LinkedHashMap<>(right); // will whittle it down
Map<K, V> onBoth = new LinkedHashMap<>();
Map<K, ValueDifference<V>> differences = new LinkedHashMap<>();
doDifference(left, right, valueEquivalence, onlyOnLeft, onlyOnRight, onBoth, differences);
return new MapDifferenceImpl<>(onlyOnLeft, onlyOnRight, onBoth, differences);
}
|
Computes the difference between two maps. This difference is an immutable snapshot of the state
of the maps at the time this method is called. It will never change, even if the maps change at
a later time.
<p>Since this method uses {@code HashMap} instances internally, the keys of the supplied maps
must be well-behaved with respect to {@link Object#equals} and {@link Object#hashCode}.
@param left the map to treat as the "left" map for purposes of comparison
@param right the map to treat as the "right" map for purposes of comparison
@param valueEquivalence the equivalence relationship to use to compare values
@return the difference between the two maps
@since 10.0
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 499
|
[
"left",
"right",
"valueEquivalence"
] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
toStringMatches
|
private boolean toStringMatches(String s1, String s2) {
return s1.hashCode() == s2.hashCode() && s1.equals(s2);
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 381
|
[
"s1",
"s2"
] | true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
kneighbors_graph
|
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X)
NearestNeighbors(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
# check the input only in self.kneighbors
# construct CSR matrix representation of the k-NN graph
if mode == "connectivity":
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
n_queries = A_ind.shape[0]
A_data = np.ones(n_queries * n_neighbors)
elif mode == "distance":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
f'or "distance" but got "{mode}" instead'
)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_nonzero = n_queries * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
kneighbors_graph = csr_matrix(
(A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)
)
return kneighbors_graph
|
Compute the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X)
NearestNeighbors(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
|
python
|
sklearn/neighbors/_base.py
| 960
|
[
"self",
"X",
"n_neighbors",
"mode"
] | false
| 5
| 7.28
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
baseFill
|
function baseFill(array, value, start, end) {
var length = array.length;
start = toInteger(start);
if (start < 0) {
start = -start > length ? 0 : (length + start);
}
end = (end === undefined || end > length) ? length : toInteger(end);
if (end < 0) {
end += length;
}
end = start > end ? 0 : toLength(end);
while (start < end) {
array[start++] = value;
}
return array;
}
|
The base implementation of `_.fill` without an iteratee call guard.
@private
@param {Array} array The array to fill.
@param {*} value The value to fill `array` with.
@param {number} [start=0] The start position.
@param {number} [end=array.length] The end position.
@returns {Array} Returns `array`.
|
javascript
|
lodash.js
| 2,928
|
[
"array",
"value",
"start",
"end"
] | false
| 8
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getEmbeddedIPv4ClientAddress
|
public static Inet4Address getEmbeddedIPv4ClientAddress(Inet6Address ip) {
if (isCompatIPv4Address(ip)) {
return getCompatIPv4Address(ip);
}
if (is6to4Address(ip)) {
return get6to4IPv4Address(ip);
}
if (isTeredoAddress(ip)) {
return getTeredoInfo(ip).getClient();
}
throw formatIllegalArgumentException("'%s' has no embedded IPv4 address.", toAddrString(ip));
}
|
Examines the Inet6Address to extract the embedded IPv4 client address if the InetAddress is an
IPv6 address of one of the specified address types that contain an embedded IPv4 address.
<p>NOTE: ISATAP addresses are explicitly excluded from this method due to their trivial
spoofability. With other transition addresses spoofing involves (at least) infection of one's
BGP routing table.
@param ip {@link Inet6Address} to be examined for embedded IPv4 client address
@return {@link Inet4Address} of embedded IPv4 client address
@throws IllegalArgumentException if the argument does not have a valid embedded IPv4 address
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 918
|
[
"ip"
] |
Inet4Address
| true
| 4
| 7.52
|
google/guava
| 51,352
|
javadoc
| false
|
getMessage
|
@Override
public @Nullable String getMessage() {
if (ObjectUtils.isEmpty(this.messageExceptions)) {
return super.getMessage();
}
else {
StringBuilder sb = new StringBuilder();
String baseMessage = super.getMessage();
if (baseMessage != null) {
sb.append(baseMessage).append(". ");
}
sb.append("Failed messages: ");
for (int i = 0; i < this.messageExceptions.length; i++) {
Exception subEx = this.messageExceptions[i];
sb.append(subEx.toString());
if (i < this.messageExceptions.length - 1) {
sb.append("; ");
}
}
return sb.toString();
}
}
|
Return an array with thrown message exceptions.
<p>Note that a general mail server connection failure will not result
in failed messages being returned here: A message will only be
contained here if actually sending it was attempted but failed.
@return the array of thrown message exceptions,
or an empty array if no failed messages
|
java
|
spring-context-support/src/main/java/org/springframework/mail/MailSendException.java
| 126
|
[] |
String
| true
| 5
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
transformedBeanName
|
public static String transformedBeanName(String name) {
Assert.notNull(name, "'name' must not be null");
if (name.isEmpty() || name.charAt(0) != BeanFactory.FACTORY_BEAN_PREFIX_CHAR) {
return name;
}
return transformedBeanNameCache.computeIfAbsent(name, beanName -> {
do {
beanName = beanName.substring(1); // length of '&'
}
while (beanName.charAt(0) == BeanFactory.FACTORY_BEAN_PREFIX_CHAR);
return beanName;
});
}
|
Return the actual bean name, stripping out the factory dereference
prefix (if any, also stripping repeated factory prefixes if found).
@param name the name of the bean
@return the transformed name
@see BeanFactory#FACTORY_BEAN_PREFIX
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryUtils.java
| 86
|
[
"name"
] |
String
| true
| 3
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
sendOffsetFetchRequest
|
private RequestFuture<Map<TopicPartition, OffsetAndMetadata>> sendOffsetFetchRequest(Set<TopicPartition> partitions) {
Node coordinator = checkAndGetCoordinator();
if (coordinator == null)
return RequestFuture.coordinatorNotAvailable();
log.debug("Fetching committed offsets for partitions: {}", partitions);
// construct the request
List<OffsetFetchRequestData.OffsetFetchRequestTopics> topics = partitions.stream()
.collect(Collectors.groupingBy(TopicPartition::topic))
.entrySet()
.stream()
.map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics()
.setName(entry.getKey())
.setPartitionIndexes(entry.getValue().stream()
.map(TopicPartition::partition)
.collect(Collectors.toList())))
.collect(Collectors.toList());
OffsetFetchRequest.Builder requestBuilder = OffsetFetchRequest.Builder.forTopicNames(
new OffsetFetchRequestData()
.setRequireStable(true)
.setGroups(List.of(
new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId(this.rebalanceConfig.groupId)
.setTopics(topics))),
throwOnFetchStableOffsetsUnsupported);
// send the request with a callback
return client.send(coordinator, requestBuilder)
.compose(new OffsetFetchResponseHandler());
}
|
Fetch the committed offsets for a set of partitions. This is a non-blocking call. The
returned future can be polled to get the actual offsets returned from the broker.
@param partitions The set of partitions to get offsets for.
@return A request future containing the committed offsets.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
| 1,477
|
[
"partitions"
] | true
| 2
| 8.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
GetEnclosingFunction
|
static func::FuncOp GetEnclosingFunction(mlir::Operation* op) {
while (op) {
// Check if the current operation is a function.
auto func_op = mlir::dyn_cast<mlir::func::FuncOp>(op);
if (func_op) {
return func_op;
}
op = op->getParentOp();
}
return nullptr; // Operation is not within a function.
}
|
Replaces a quant.quantize composite op with a TFLite quantize op that outputs
a quantized tensor based on the composite op's attributes.
|
cpp
|
tensorflow/compiler/mlir/lite/transforms/lower_quant_annotations_pass.cc
| 116
|
[] | true
| 3
| 7.04
|
tensorflow/tensorflow
| 192,880
|
doxygen
| false
|
|
flat
|
public <T> BiConsumer<T, BiConsumer<String, Object>> flat(String delimiter, Consumer<Pairs<T>> pairs) {
return flat(joinWith(delimiter), pairs);
}
|
Add pairs using flat naming.
@param <T> the item type
@param delimiter the delimiter used if there is a prefix
@param pairs callback to add all the pairs
@return a {@link BiConsumer} for use with the {@link JsonWriter}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/ContextPairs.java
| 59
|
[
"delimiter",
"pairs"
] | true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
producerId
|
@Override
public long producerId() {
return buffer.getLong(PRODUCER_ID_OFFSET);
}
|
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas.
@return The base timestamp
|
java
|
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
| 189
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
invokeAnd
|
public <R> Stream<R> invokeAnd(Function<C, @Nullable R> invoker) {
Function<C, InvocationResult<R>> mapper = (callbackInstance) -> {
Supplier<@Nullable R> supplier = () -> invoker.apply(callbackInstance);
return invoke(callbackInstance, supplier);
};
return this.callbackInstances.stream()
.map(mapper)
.filter(InvocationResult::hasResult)
.map(InvocationResult::get);
}
|
Invoke the callback instances where the callback method returns a result.
@param invoker the invoker used to invoke the callback
@param <R> the result type
@return the results of the invocation (may be an empty stream if no callbacks
could be called)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/LambdaSafe.java
| 314
|
[
"invoker"
] | true
| 1
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
factories
|
public static Loader factories(SpringFactoriesLoader springFactoriesLoader) {
Assert.notNull(springFactoriesLoader, "'springFactoriesLoader' must not be null");
return new Loader(springFactoriesLoader, null);
}
|
Create a new {@link Loader} that will obtain AOT services from the given
{@link SpringFactoriesLoader}.
@param springFactoriesLoader the spring factories loader
@return a new {@link Loader} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotServices.java
| 107
|
[
"springFactoriesLoader"
] |
Loader
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_get_secret
|
def _get_secret(self, path_prefix, secret_id: str, lookup_pattern: str | None) -> str | None:
"""
Get secret value from Secrets Manager.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
:param lookup_pattern: If provided, `secret_id` must match this pattern to look up the secret in
Secrets Manager
"""
if lookup_pattern and not re.match(lookup_pattern, secret_id, re.IGNORECASE):
return None
error_msg = "An error occurred when calling the get_secret_value operation"
if path_prefix:
secrets_path = self.build_path(path_prefix, secret_id, self.sep)
else:
secrets_path = secret_id
try:
response = self.client.get_secret_value(
SecretId=secrets_path,
)
return response.get("SecretString")
except self.client.exceptions.ResourceNotFoundException:
self.log.debug(
"ResourceNotFoundException: %s. Secret %s not found.",
error_msg,
secret_id,
)
return None
except self.client.exceptions.InvalidParameterException:
self.log.debug(
"InvalidParameterException: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.InvalidRequestException:
self.log.debug(
"InvalidRequestException: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.DecryptionFailure:
self.log.debug(
"DecryptionFailure: %s",
error_msg,
exc_info=True,
)
return None
except self.client.exceptions.InternalServiceError:
self.log.debug(
"InternalServiceError: %s",
error_msg,
exc_info=True,
)
return None
|
Get secret value from Secrets Manager.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
:param lookup_pattern: If provided, `secret_id` must match this pattern to look up the secret in
Secrets Manager
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/secrets/secrets_manager.py
| 253
|
[
"self",
"path_prefix",
"secret_id",
"lookup_pattern"
] |
str | None
| true
| 5
| 6.48
|
apache/airflow
| 43,597
|
sphinx
| false
|
originalsStrings
|
public Map<String, String> originalsStrings() {
Map<String, String> copy = new RecordingMap<>();
for (Map.Entry<String, ?> entry : originals.entrySet()) {
if (!(entry.getValue() instanceof String))
throw new ClassCastException("Non-string value found in original settings for key " + entry.getKey() +
": " + (entry.getValue() == null ? null : entry.getValue().getClass().getName()));
copy.put(entry.getKey(), (String) entry.getValue());
}
return copy;
}
|
Get all the original settings, ensuring that all values are of type String.
@return the original settings
@throws ClassCastException if any of the values are not strings
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 262
|
[] | true
| 3
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
instantiateBean
|
protected BeanWrapper instantiateBean(String beanName, RootBeanDefinition mbd) {
try {
Object beanInstance = getInstantiationStrategy().instantiate(mbd, beanName, this);
BeanWrapper bw = new BeanWrapperImpl(beanInstance);
initBeanWrapper(bw);
return bw;
}
catch (Throwable ex) {
throw new BeanCreationException(mbd.getResourceDescription(), beanName, ex.getMessage(), ex);
}
}
|
Instantiate the given bean using its default constructor.
@param beanName the name of the bean
@param mbd the bean definition for the bean
@return a BeanWrapper for the new instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,336
|
[
"beanName",
"mbd"
] |
BeanWrapper
| true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.