function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
start_python_pipeline
|
def start_python_pipeline(
self,
variables: dict,
py_file: str,
py_options: list[str],
py_interpreter: str = "python3",
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
process_line_callback: Callable[[str], None] | None = None,
is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None,
):
"""
Start Apache Beam python pipeline.
:param variables: Variables passed to the pipeline.
:param py_file: Path to the python file to execute.
:param py_options: Additional options.
:param py_interpreter: Python version of the Apache Beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system, or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
"""
if "labels" in variables:
variables["labels"] = [f"{key}={value}" for key, value in variables["labels"].items()]
with contextlib.ExitStack() as exit_stack:
if py_requirements is not None:
if not py_requirements and not py_system_site_packages:
warning_invalid_environment = textwrap.dedent(
"""\
Invalid method invocation. You have disabled inclusion of system packages and empty
list required for installation, so it is not possible to create a valid virtual
environment. In the virtual environment, apache-beam package must be installed for
your job to be executed.
To fix this problem:
* install apache-beam on the system, then set parameter py_system_site_packages
to True,
* add apache-beam to the list of required packages in parameter py_requirements.
"""
)
raise AirflowException(warning_invalid_environment)
tmp_dir = exit_stack.enter_context(tempfile.TemporaryDirectory(prefix="apache-beam-venv"))
py_interpreter = prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=py_interpreter,
system_site_packages=py_system_site_packages,
requirements=py_requirements,
)
command_prefix = [py_interpreter, *py_options, py_file]
beam_version = (
subprocess.check_output([py_interpreter, "-c", _APACHE_BEAM_VERSION_SCRIPT]).decode().strip()
)
self.log.info("Beam version: %s", beam_version)
impersonate_service_account = variables.get("impersonate_service_account")
if impersonate_service_account:
if Version(beam_version) < Version("2.39.0"):
raise AirflowException(
"The impersonateServiceAccount option requires Apache Beam 2.39.0 or newer."
)
self._start_pipeline(
variables=variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
|
Start Apache Beam python pipeline.
:param variables: Variables passed to the pipeline.
:param py_file: Path to the python file to execute.
:param py_options: Additional options.
:param py_interpreter: Python version of the Apache Beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system, or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 243
|
[
"self",
"variables",
"py_file",
"py_options",
"py_interpreter",
"py_requirements",
"py_system_site_packages",
"process_line_callback",
"is_dataflow_job_id_exist_callback"
] | true
| 7
| 6.32
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
poll
|
@Override
public List<ClientResponse> poll(long timeout, long now) {
ensureActive();
if (!abortedSends.isEmpty()) {
// If there are aborted sends because of unsupported version exceptions or disconnects,
// handle them immediately without waiting for Selector#poll.
List<ClientResponse> responses = new ArrayList<>();
handleAbortedSends(responses);
completeResponses(responses);
return responses;
}
long metadataTimeout = metadataUpdater.maybeUpdate(now);
long telemetryTimeout = telemetrySender != null ? telemetrySender.maybeUpdate(now) : Integer.MAX_VALUE;
try {
this.selector.poll(Utils.min(timeout, metadataTimeout, telemetryTimeout, defaultRequestTimeoutMs));
} catch (IOException e) {
log.error("Unexpected error during I/O", e);
}
// process completed actions
long updatedNow = this.time.milliseconds();
List<ClientResponse> responses = new ArrayList<>();
handleCompletedSends(responses, updatedNow);
handleCompletedReceives(responses, updatedNow);
handleDisconnections(responses, updatedNow);
handleConnections();
handleInitiateApiVersionRequests(updatedNow);
handleTimedOutConnections(responses, updatedNow);
handleTimedOutRequests(responses, updatedNow);
handleRebootstrap(responses, updatedNow);
completeResponses(responses);
return responses;
}
|
Do actual reads and writes to sockets.
@param timeout The maximum amount of time to wait (in ms) for responses if there are none immediately,
must be non-negative. The actual timeout will be the minimum of timeout, request timeout and
metadata timeout
@param now The current time in milliseconds
@return The list of responses received
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 629
|
[
"timeout",
"now"
] | true
| 4
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
common_dtype_categorical_compat
|
def common_dtype_categorical_compat(
objs: Sequence[Index | ArrayLike], dtype: DtypeObj
) -> DtypeObj:
"""
Update the result of find_common_type to account for NAs in a Categorical.
Parameters
----------
objs : list[np.ndarray | ExtensionArray | Index]
dtype : np.dtype or ExtensionDtype
Returns
-------
np.dtype or ExtensionDtype
"""
# GH#38240
# TODO: more generally, could do `not can_hold_na(dtype)`
if lib.is_np_dtype(dtype, "iu"):
for obj in objs:
# We don't want to accidentally allow e.g. "categorical" str here
obj_dtype = getattr(obj, "dtype", None)
if isinstance(obj_dtype, CategoricalDtype):
if isinstance(obj, ABCIndex):
# This check may already be cached
hasnas = obj.hasnans
else:
# Categorical
hasnas = cast("Categorical", obj)._hasna
if hasnas:
# see test_union_int_categorical_with_nan
dtype = np.dtype(np.float64)
break
return dtype
|
Update the result of find_common_type to account for NAs in a Categorical.
Parameters
----------
objs : list[np.ndarray | ExtensionArray | Index]
dtype : np.dtype or ExtensionDtype
Returns
-------
np.dtype or ExtensionDtype
|
python
|
pandas/core/dtypes/cast.py
| 1,232
|
[
"objs",
"dtype"
] |
DtypeObj
| true
| 7
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
subscription
|
public synchronized Set<String> subscription() {
if (hasAutoAssignedPartitions())
return this.subscription;
return Collections.emptySet();
}
|
Check whether a topic matches a subscribed pattern.
@return true if pattern subscription is in use and the topic matches the subscribed pattern, false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 369
|
[] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
preProcessParsedConfig
|
protected Map<String, Object> preProcessParsedConfig(Map<String, Object> parsedValues) {
return parsedValues;
}
|
Called directly after user configs got parsed (and thus default values is not set).
This allows to check user's config.
@param parsedValues unmodifiable map of current configuration
@return a map of updates that should be applied to the configuration (will be validated to prevent bad updates)
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 159
|
[
"parsedValues"
] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
start_java_pipeline
|
def start_java_pipeline(
self,
variables: dict,
jar: str,
job_class: str | None = None,
process_line_callback: Callable[[str], None] | None = None,
is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None,
) -> None:
"""
Start Apache Beam Java pipeline.
:param variables: Variables passed to the job.
:param jar: Name of the jar for the pipeline
:param job_class: Name of the java class for the pipeline.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
"""
if "labels" in variables:
variables["labels"] = json.dumps(variables["labels"], separators=(",", ":"))
command_prefix = ["java", "-cp", jar, job_class] if job_class else ["java", "-jar", jar]
self._start_pipeline(
variables=variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
|
Start Apache Beam Java pipeline.
:param variables: Variables passed to the job.
:param jar: Name of the jar for the pipeline
:param job_class: Name of the java class for the pipeline.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 324
|
[
"self",
"variables",
"jar",
"job_class",
"process_line_callback",
"is_dataflow_job_id_exist_callback"
] |
None
| true
| 3
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
_shallow_copy
|
def _shallow_copy(self, values, name: Hashable = no_default) -> Self:
"""
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
"""
name = self._name if name is no_default else name
return self._simple_new(values, name=name, refs=self._references)
|
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
|
python
|
pandas/core/indexes/base.py
| 763
|
[
"self",
"values",
"name"
] |
Self
| true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getPemSslStore
|
private static @Nullable PemSslStore getPemSslStore(String propertyName, PemSslBundleProperties.Store properties,
ResourceLoader resourceLoader) {
PemSslStoreDetails details = asPemSslStoreDetails(properties);
PemSslStore pemSslStore = PemSslStore.load(details, resourceLoader);
if (properties.isVerifyKeys()) {
Assert.state(pemSslStore != null, "'pemSslStore' must not be null");
PrivateKey privateKey = pemSslStore.privateKey();
Assert.state(privateKey != null, "'privateKey' must not be null");
CertificateMatcher certificateMatcher = new CertificateMatcher(privateKey);
List<X509Certificate> certificates = pemSslStore.certificates();
Assert.state(certificates != null, "'certificates' must not be null");
Assert.state(certificateMatcher.matchesAny(certificates),
() -> "Private key in %s matches none of the certificates in the chain".formatted(propertyName));
}
return pemSslStore;
}
|
Get an {@link SslBundle} for the given {@link PemSslBundleProperties}.
@param properties the source properties
@param resourceLoader the resource loader used to load content
@return an {@link SslBundle} instance
@since 3.3.5
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/PropertiesSslBundle.java
| 129
|
[
"propertyName",
"properties",
"resourceLoader"
] |
PemSslStore
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
definePackageForExploded
|
private Package definePackageForExploded(String name, URL sealBase, Supplier<Package> call) {
synchronized (this.definePackageLock) {
if (this.definePackageCallType == null) {
// We're not part of a call chain which means that the URLClassLoader
// is trying to define a package for our exploded JAR. We use the
// manifest version to ensure package attributes are set
Manifest manifest = getManifest(this.rootArchive);
if (manifest != null) {
return definePackage(name, manifest, sealBase);
}
}
return definePackage(DefinePackageCallType.ATTRIBUTES, call);
}
}
|
Create a new {@link LaunchedClassLoader} instance.
@param exploded if the underlying archive is exploded
@param rootArchive the root archive or {@code null}
@param urls the URLs from which to load classes and resources
@param parent the parent class loader for delegation
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/LaunchedClassLoader.java
| 136
|
[
"name",
"sealBase",
"call"
] |
Package
| true
| 3
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
deduceRelativeDir
|
private @Nullable String deduceRelativeDir(File sourceDirectory, File workingDir) {
String sourcePath = sourceDirectory.getAbsolutePath();
String workingPath = workingDir.getAbsolutePath();
if (sourcePath.equals(workingPath) || !sourcePath.startsWith(workingPath)) {
return null;
}
String relativePath = sourcePath.substring(workingPath.length() + 1);
return !relativePath.isEmpty() ? relativePath : null;
}
|
Create a new {@link Context} instance with the specified value.
@param archiveFile the source archive file
@param workingDir the working directory
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Context.java
| 110
|
[
"sourceDirectory",
"workingDir"
] |
String
| true
| 4
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
add_categories
|
def add_categories(self, new_categories) -> Self:
"""
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
Returns
-------
Categorical
Categorical with new categories added.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(["c", "b", "c"])
>>> c
['c', 'b', 'c']
Categories (2, str): ['b', 'c']
>>> c.add_categories(["d", "a"])
['c', 'b', 'c']
Categories (4, str): ['b', 'c', 'd', 'a']
"""
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
raise ValueError(
f"new categories must not include old categories: {already_included}"
)
if hasattr(new_categories, "dtype"):
from pandas import Series
dtype = find_common_type(
[self.dtype.categories.dtype, new_categories.dtype]
)
new_categories = Series(
list(self.dtype.categories) + list(new_categories), dtype=dtype
)
else:
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self.copy()
codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories)
NDArrayBacked.__init__(cat, codes, new_dtype)
return cat
|
Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
Returns
-------
Categorical
Categorical with new categories added.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
>>> c = pd.Categorical(["c", "b", "c"])
>>> c
['c', 'b', 'c']
Categories (2, str): ['b', 'c']
>>> c.add_categories(["d", "a"])
['c', 'b', 'c']
Categories (4, str): ['b', 'c', 'd', 'a']
|
python
|
pandas/core/arrays/categorical.py
| 1,343
|
[
"self",
"new_categories"
] |
Self
| true
| 5
| 7.76
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
run_beam_command_async
|
async def run_beam_command_async(
self,
cmd: list[str],
log: Logger,
working_directory: str | None = None,
process_line_callback: Callable[[str], None] | None = None,
) -> int:
"""
Run pipeline command in subprocess.
:param cmd: Parts of the command to be run in subprocess
:param working_directory: Working directory
:param log: logger
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id
"""
cmd_str_representation = " ".join(shlex.quote(c) for c in cmd)
log.info("Running command: %s", cmd_str_representation)
# Creating a separate asynchronous process
process = await asyncio.create_subprocess_shell(
cmd_str_representation,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=working_directory,
)
# Waits for Apache Beam pipeline to complete.
log.info("Start waiting for Apache Beam process to complete.")
# Creating separate threads for stdout and stderr
stdout_task = asyncio.create_task(self.read_logs(process.stdout, process_line_callback))
stderr_task = asyncio.create_task(self.read_logs(process.stderr, process_line_callback))
# Waiting for the both tasks to complete
await asyncio.gather(stdout_task, stderr_task)
# Wait for the process to complete and return return_code
return_code = await process.wait()
log.info("Process exited with return code: %s", return_code)
if return_code != 0:
raise AirflowException(f"Apache Beam process failed with return code {return_code}")
return return_code
|
Run pipeline command in subprocess.
:param cmd: Parts of the command to be run in subprocess
:param working_directory: Working directory
:param log: logger
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id
|
python
|
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
| 614
|
[
"self",
"cmd",
"log",
"working_directory",
"process_line_callback"
] |
int
| true
| 2
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
toProtocolTextSpanWithContext
|
function toProtocolTextSpanWithContext(span: TextSpan, contextSpan: TextSpan | undefined, scriptInfo: ScriptInfo): protocol.TextSpanWithContext {
const textSpan = toProtocolTextSpan(span, scriptInfo);
const contextTextSpan = contextSpan && toProtocolTextSpan(contextSpan, scriptInfo);
return contextTextSpan ?
{ ...textSpan, contextStart: contextTextSpan.start, contextEnd: contextTextSpan.end } :
textSpan;
}
|
@param projects Projects initially known to contain {@link initialLocation}
@param defaultProject The default project containing {@link initialLocation}
@param initialLocation Where the search operation was triggered
@param getResultsForPosition This is where you plug in `findReferences`, `renameLocation`, etc
@param forPositionInResult Given an item returned by {@link getResultsForPosition} enumerate the positions referred to by that result
@returns In the common case where there's only one project, returns an array of results from {@link getResultsForPosition}.
If multiple projects were searched - even if they didn't return results - the result will be a map from project to per-project results.
|
typescript
|
src/server/session.ts
| 3,990
|
[
"span",
"contextSpan",
"scriptInfo"
] | true
| 3
| 7.12
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_get_changes_classified
|
def _get_changes_classified(
changes: list[Change], with_breaking_changes: bool, maybe_with_new_features: bool
) -> ClassifiedChanges:
"""
Pre-classifies changes based on their type_of_change attribute derived based on release manager's call.
The classification is based on the decision made by the release manager when classifying the release.
If we switch to semantic commits, this process could be automated. This list is still supposed to be
manually reviewed and re-classified by the release manager if needed.
:param changes: list of changes to be classified
:param with_breaking_changes: whether to include breaking changes in the classification
:param maybe_with_new_features: whether to include new features in the classification
:return: ClassifiedChanges object containing changes classified into fixes, features, breaking changes,
misc.
"""
classified_changes = ClassifiedChanges()
for change in changes:
type_of_change = None
if change.short_hash in SHORT_HASH_TO_TYPE_DICT:
type_of_change = SHORT_HASH_TO_TYPE_DICT[change.short_hash]
if type_of_change == TypeOfChange.BUGFIX:
classified_changes.fixes.append(change)
elif type_of_change == TypeOfChange.MISC or type_of_change == TypeOfChange.MIN_AIRFLOW_VERSION_BUMP:
classified_changes.misc.append(change)
elif type_of_change == TypeOfChange.FEATURE and maybe_with_new_features:
classified_changes.features.append(change)
elif type_of_change == TypeOfChange.BREAKING_CHANGE and with_breaking_changes:
classified_changes.breaking_changes.append(change)
elif type_of_change == TypeOfChange.DOCUMENTATION:
classified_changes.docs.append(change)
else:
classified_changes.other.append(change)
return classified_changes
|
Pre-classifies changes based on their type_of_change attribute derived based on release manager's call.
The classification is based on the decision made by the release manager when classifying the release.
If we switch to semantic commits, this process could be automated. This list is still supposed to be
manually reviewed and re-classified by the release manager if needed.
:param changes: list of changes to be classified
:param with_breaking_changes: whether to include breaking changes in the classification
:param maybe_with_new_features: whether to include new features in the classification
:return: ClassifiedChanges object containing changes classified into fixes, features, breaking changes,
misc.
|
python
|
dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py
| 1,011
|
[
"changes",
"with_breaking_changes",
"maybe_with_new_features"
] |
ClassifiedChanges
| true
| 12
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
populateFunctionNames
|
static void populateFunctionNames(cl::opt<std::string> &FunctionNamesFile,
cl::list<std::string> &FunctionNames) {
if (FunctionNamesFile.empty())
return;
std::ifstream FuncsFile(FunctionNamesFile, std::ios::in);
std::string FuncName;
while (std::getline(FuncsFile, FuncName))
FunctionNames.push_back(FuncName);
}
|
Return true if the function \p BF should be disassembled.
|
cpp
|
bolt/lib/Rewrite/RewriteInstance.cpp
| 3,289
|
[] | true
| 3
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
write
|
@Override
public long write(ByteBuffer[] srcs) throws IOException {
return write(srcs, 0, srcs.length);
}
|
Writes a sequence of bytes to this channel from the given buffers.
@param srcs The buffers from which bytes are to be retrieved
@return returns no.of bytes consumed by SSLEngine.wrap , possibly zero.
@throws IOException If some other I/O error occurs
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 784
|
[
"srcs"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
mean
|
def mean(self, numeric_only: bool = False, **kwargs):
"""
Calculate the rolling weighted window mean.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Keyword arguments to configure the ``SciPy`` weighted window type.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.mean : Aggregating mean for Series.
DataFrame.mean : Aggregating mean for DataFrame.
Examples
--------
>>> ser = pd.Series([0, 1, 5, 2, 8])
To get an instance of :class:`~pandas.core.window.rolling.Window` we need
to pass the parameter `win_type`.
>>> type(ser.rolling(2, win_type="gaussian"))
<class 'pandas.api.typing.Window'>
In order to use the `SciPy` Gaussian window we need to provide the parameters
`M` and `std`. The parameter `M` corresponds to 2 in our example.
We pass the second parameter `std` as a parameter of the following method:
>>> ser.rolling(2, win_type="gaussian").mean(std=3)
0 NaN
1 0.5
2 3.0
3 3.5
4 5.0
dtype: float64
"""
window_func = window_aggregations.roll_weighted_mean
# error: Argument 1 to "_apply" of "Window" has incompatible type
# "Callable[[ndarray, ndarray, int], ndarray]"; expected
# "Callable[[ndarray, int, int], ndarray]"
return self._apply(
window_func, # type: ignore[arg-type]
name="mean",
numeric_only=numeric_only,
**kwargs,
)
|
Calculate the rolling weighted window mean.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
**kwargs
Keyword arguments to configure the ``SciPy`` weighted window type.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.mean : Aggregating mean for Series.
DataFrame.mean : Aggregating mean for DataFrame.
Examples
--------
>>> ser = pd.Series([0, 1, 5, 2, 8])
To get an instance of :class:`~pandas.core.window.rolling.Window` we need
to pass the parameter `win_type`.
>>> type(ser.rolling(2, win_type="gaussian"))
<class 'pandas.api.typing.Window'>
In order to use the `SciPy` Gaussian window we need to provide the parameters
`M` and `std`. The parameter `M` corresponds to 2 in our example.
We pass the second parameter `std` as a parameter of the following method:
>>> ser.rolling(2, win_type="gaussian").mean(std=3)
0 NaN
1 0.5
2 3.0
3 3.5
4 5.0
dtype: float64
|
python
|
pandas/core/window/rolling.py
| 1,357
|
[
"self",
"numeric_only"
] | true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
close
|
@Override
public void close() {
if (sslFactory != null) sslFactory.close();
}
|
Constructs an SSL channel builder. ListenerName is provided only
for server channel builder and will be null for client channel builder.
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java
| 115
|
[] |
void
| true
| 2
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
nop
|
@SuppressWarnings("unchecked")
static <E extends Throwable> FailableIntToFloatFunction<E> nop() {
return NOP;
}
|
Gets the NOP singleton.
@param <E> The kind of thrown exception or error.
@return The NOP singleton.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableIntToFloatFunction.java
| 41
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
renameFile
|
private void renameFile(File file, File dest) {
if (!file.renameTo(dest)) {
throw new IllegalStateException("Unable to rename '" + file + "' to '" + dest + "'");
}
}
|
Repackage to the given destination so that it can be launched using '
{@literal java -jar}'.
@param destination the destination file (may be the same as the source)
@param libraries the libraries required to run the archive
@param lastModifiedTime an optional last modified time to apply to the archive and
its contents
@throws IOException if the file cannot be repackaged
@since 4.0.0
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Repackager.java
| 148
|
[
"file",
"dest"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
findResolvableAssignmentAndTriggerMetadataUpdate
|
private TopicIdPartitionSet findResolvableAssignmentAndTriggerMetadataUpdate() {
final TopicIdPartitionSet assignmentReadyToReconcile = new TopicIdPartitionSet();
final HashMap<Uuid, SortedSet<Integer>> unresolved = new HashMap<>(currentTargetAssignment.partitions);
// Try to resolve topic names from metadata cache or subscription cache, and move
// assignments from the "unresolved" collection, to the "assignmentReadyToReconcile" one.
Iterator<Map.Entry<Uuid, SortedSet<Integer>>> it = unresolved.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Uuid, SortedSet<Integer>> e = it.next();
Uuid topicId = e.getKey();
SortedSet<Integer> topicPartitions = e.getValue();
Optional<String> nameFromMetadata = findTopicNameInGlobalOrLocalCache(topicId);
nameFromMetadata.ifPresent(resolvedTopicName -> {
// Name resolved, so assignment is ready for reconciliation.
assignmentReadyToReconcile.addAll(topicId, resolvedTopicName, topicPartitions);
it.remove();
});
}
if (!unresolved.isEmpty()) {
log.debug("Topic Ids {} received in target assignment were not found in metadata and " +
"are not currently assigned. Requesting a metadata update now to resolve " +
"topic names.", unresolved.keySet());
metadata.requestUpdate(true);
}
return assignmentReadyToReconcile;
}
|
Build set of TopicIdPartition (topic ID, topic name and partition id) from the target assignment
received from the broker (topic IDs and list of partitions).
<p>
This will:
<ol type="1">
<li>Try to find topic names in the metadata cache</li>
<li>For topics not found in metadata, try to find names in the local topic names cache
(contains topic id and names currently assigned and resolved)</li>
<li>If there are topics that are not in metadata cache or in the local cache
of topic names assigned to this member, request a metadata update, and continue
resolving names as the cache is updated.
</li>
</ol>
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 1,067
|
[] |
TopicIdPartitionSet
| true
| 3
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
reorder
|
function reorder(array, indexes) {
var arrLength = array.length,
length = nativeMin(indexes.length, arrLength),
oldArray = copyArray(array);
while (length--) {
var index = indexes[length];
array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined;
}
return array;
}
|
Reorder `array` according to the specified indexes where the element at
the first index is assigned as the first element, the element at
the second index is assigned as the second element, and so on.
@private
@param {Array} array The array to reorder.
@param {Array} indexes The arranged array indexes.
@returns {Array} Returns `array`.
|
javascript
|
lodash.js
| 6,692
|
[
"array",
"indexes"
] | false
| 3
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
findAdvisorBeans
|
public List<Advisor> findAdvisorBeans() {
// Determine list of advisor bean names, if not cached already.
String[] advisorNames = this.cachedAdvisorBeanNames;
if (advisorNames == null) {
// Do not initialize FactoryBeans here: We need to leave all regular beans
// uninitialized to let the auto-proxy creator apply to them!
advisorNames = BeanFactoryUtils.beanNamesForTypeIncludingAncestors(
this.beanFactory, Advisor.class, true, false);
this.cachedAdvisorBeanNames = advisorNames;
}
if (advisorNames.length == 0) {
return new ArrayList<>();
}
List<Advisor> advisors = new ArrayList<>();
for (String name : advisorNames) {
if (isEligibleBean(name)) {
if (this.beanFactory.isCurrentlyInCreation(name)) {
if (logger.isTraceEnabled()) {
logger.trace("Skipping currently created advisor '" + name + "'");
}
}
else {
try {
advisors.add(this.beanFactory.getBean(name, Advisor.class));
}
catch (BeanCreationException ex) {
Throwable rootCause = ex.getMostSpecificCause();
if (rootCause instanceof BeanCurrentlyInCreationException bce) {
String bceBeanName = bce.getBeanName();
if (bceBeanName != null && this.beanFactory.isCurrentlyInCreation(bceBeanName)) {
if (logger.isTraceEnabled()) {
logger.trace("Skipping advisor '" + name +
"' with dependency on currently created bean: " + ex.getMessage());
}
// Ignore: indicates a reference back to the bean we're trying to advise.
// We want to find advisors other than the currently created bean itself.
continue;
}
}
throw ex;
}
}
}
}
return advisors;
}
|
Find all eligible Advisor beans in the current bean factory,
ignoring FactoryBeans and excluding beans that are currently in creation.
@return the list of {@link org.springframework.aop.Advisor} beans
@see #isEligibleBean
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/BeanFactoryAdvisorRetrievalHelper.java
| 66
|
[] | true
| 11
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
swapCase
|
public static String swapCase(final String str) {
if (isEmpty(str)) {
return str;
}
final int strLen = str.length();
final int[] newCodePoints = new int[strLen]; // cannot be longer than the char array
int outOffset = 0;
for (int i = 0; i < strLen;) {
final int oldCodepoint = str.codePointAt(i);
final int newCodePoint;
if (Character.isUpperCase(oldCodepoint) || Character.isTitleCase(oldCodepoint)) {
newCodePoint = Character.toLowerCase(oldCodepoint);
} else if (Character.isLowerCase(oldCodepoint)) {
newCodePoint = Character.toUpperCase(oldCodepoint);
} else {
newCodePoint = oldCodepoint;
}
newCodePoints[outOffset++] = newCodePoint;
i += Character.charCount(newCodePoint);
}
return new String(newCodePoints, 0, outOffset);
}
|
Swaps the case of a String changing upper and title case to lower case, and lower case to upper case.
<ul>
<li>Upper case character converts to Lower case</li>
<li>Title case character converts to Lower case</li>
<li>Lower case character converts to Upper case</li>
</ul>
<p>
For a word based algorithm, see {@link org.apache.commons.text.WordUtils#swapCase(String)}. A {@code null} input String returns {@code null}.
</p>
<pre>
StringUtils.swapCase(null) = null
StringUtils.swapCase("") = ""
StringUtils.swapCase("The dog has a BONE") = "tHE DOG HAS A bone"
</pre>
<p>
NOTE: This method changed in Lang version 2.0. It no longer performs a word based algorithm. If you only use ASCII, you will notice no change. That
functionality is available in org.apache.commons.lang3.text.WordUtils.
</p>
@param str the String to swap case, may be null.
@return the changed String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,601
|
[
"str"
] |
String
| true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getExplicitBeanName
|
private @Nullable String getExplicitBeanName(AnnotationMetadata metadata) {
List<String> names = metadata.getAnnotations().stream(COMPONENT_ANNOTATION_CLASSNAME)
.map(annotation -> annotation.getString(MergedAnnotation.VALUE))
.filter(StringUtils::hasText)
.map(String::trim)
.distinct()
.toList();
if (names.size() == 1) {
return names.get(0);
}
if (names.size() > 1) {
throw new IllegalStateException(
"Stereotype annotations suggest inconsistent component names: " + names);
}
return null;
}
|
Get the explicit bean name for the underlying class, as configured via
{@link org.springframework.stereotype.Component @Component} and taking into
account {@link org.springframework.core.annotation.AliasFor @AliasFor}
semantics for annotation attribute overrides for {@code @Component}'s
{@code value} attribute.
@param metadata the {@link AnnotationMetadata} for the underlying class
@return the explicit bean name, or {@code null} if not found
@since 6.1
@see org.springframework.stereotype.Component#value()
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationBeanNameGenerator.java
| 191
|
[
"metadata"
] |
String
| true
| 3
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_compute_score_samples
|
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
Returns
-------
scores : ndarray of shape (n_samples,)
The score of each sample in X.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
average_path_length_max_samples = _average_path_length([self._max_samples])
# Note: we use default n_jobs value, i.e. sequential computation, which
# we expect to be more performant that parallelizing for small number
# of samples, e.g. < 1k samples. Default n_jobs value can be overridden
# by using joblib.parallel_backend context manager around
# ._compute_score_samples. Using a higher n_jobs may speed up the
# computation of the scores, e.g. for > 1k samples. See
# https://github.com/scikit-learn/scikit-learn/pull/28622 for more
# details.
lock = threading.Lock()
Parallel(
verbose=self.verbose,
require="sharedmem",
)(
delayed(_parallel_compute_tree_depths)(
tree,
X,
features if subsample_features else None,
self._decision_path_lengths[tree_idx],
self._average_path_length_per_tree[tree_idx],
depths,
lock,
)
for tree_idx, (tree, features) in enumerate(
zip(self.estimators_, self.estimators_features_)
)
)
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (
# For a single training sample, denominator and depth are 0.
# Therefore, we set the score manually to 1.
-np.divide(
depths, denominator, out=np.ones_like(depths), where=denominator != 0
)
)
return scores
|
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
Returns
-------
scores : ndarray of shape (n_samples,)
The score of each sample in X.
|
python
|
sklearn/ensemble/_iforest.py
| 582
|
[
"self",
"X",
"subsample_features"
] | false
| 2
| 6.16
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
f_oneway
|
def f_oneway(*args):
"""Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : {array-like, sparse matrix}
Sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
f_statistic : float
The computed F-value of the test.
p_value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://vassarstats.net/textbook
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s**2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.0)[0]
if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size:
warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
|
Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : {array-like, sparse matrix}
Sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
f_statistic : float
The computed F-value of the test.
p_value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://vassarstats.net/textbook
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
|
python
|
sklearn/feature_selection/_univariate_selection.py
| 41
|
[] | false
| 4
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
fromParts
|
public static HostAndPort fromParts(String host, int port) {
checkArgument(isValidPort(port), "Port out of range: %s", port);
HostAndPort parsedHost = fromString(host);
checkArgument(!parsedHost.hasPort(), "Host has a port: %s", host);
return new HostAndPort(parsedHost.host, port, parsedHost.hasBracketlessColons);
}
|
Build a HostAndPort instance from separate host and port values.
<p>Note: Non-bracketed IPv6 literals are allowed. Use {@link #requireBracketsForIPv6()} to
prohibit these.
@param host the host string to parse. Must not contain a port number.
@param port a port number from [0..65535]
@return if parsing was successful, a populated HostAndPort object.
@throws IllegalArgumentException if {@code host} contains a port number, or {@code port} is out
of range.
|
java
|
android/guava/src/com/google/common/net/HostAndPort.java
| 133
|
[
"host",
"port"
] |
HostAndPort
| true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
strictLastIndexOf
|
function strictLastIndexOf(array, value, fromIndex) {
var index = fromIndex + 1;
while (index--) {
if (array[index] === value) {
return index;
}
}
return index;
}
|
A specialized version of `_.lastIndexOf` which performs strict equality
comparisons of values, i.e. `===`.
@private
@param {Array} array The array to inspect.
@param {*} value The value to search for.
@param {number} fromIndex The index to search from.
@returns {number} Returns the index of the matched value, else `-1`.
|
javascript
|
lodash.js
| 1,320
|
[
"array",
"value",
"fromIndex"
] | false
| 3
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getCombinedPathLength
|
function getCombinedPathLength(error: EngineValidationError) {
let score = 0
if (Array.isArray(error['selectionPath'])) {
score += error['selectionPath'].length
}
if (Array.isArray(error['argumentPath'])) {
score += error['argumentPath'].length
}
return score
}
|
Function that attempts to pick the best error from the list
by ranking them. In most cases, highest ranking error would be the one
which has the longest combined "selectionPath" + "argumentPath". Justification
for that is that type that made it deeper into validation tree before failing
is probably closer to the one user actually wanted to do.
However, if two errors are at the same depth level, we introduce additional ranking based
on error type. See `getErrorTypeScore` function for details
@param errors
@returns
|
typescript
|
packages/client/src/runtime/core/errorRendering/applyUnionError.ts
| 110
|
[
"error"
] | false
| 3
| 7.12
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
getCauseType
|
@SuppressWarnings("unchecked")
protected Class<? extends T> getCauseType() {
Class<? extends T> type = (Class<? extends T>) ResolvableType
.forClass(AbstractFailureAnalyzer.class, getClass())
.resolveGeneric();
Assert.state(type != null, "Unable to resolve generic");
return type;
}
|
Return the cause type being handled by the analyzer. By default the class generic
is used.
@return the cause type
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/diagnostics/AbstractFailureAnalyzer.java
| 54
|
[] | true
| 1
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
vsplit
|
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> import numpy as np
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[12., 13., 14., 15.]]),
array([], shape=(0, 4), dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]),
array([[[4., 5.],
[6., 7.]]])]
"""
if _nx.ndim(ary) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
|
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> import numpy as np
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[12., 13., 14., 15.]]),
array([], shape=(0, 4), dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]),
array([[[4., 5.],
[6., 7.]]])]
|
python
|
numpy/lib/_shape_base_impl.py
| 935
|
[
"ary",
"indices_or_sections"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
toString
|
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("StoreTrustConfig{");
sb.append("path=").append(truststorePath);
sb.append(", password=").append(password.length == 0 ? "<empty>" : "<non-empty>");
sb.append(", type=").append(type);
sb.append(", algorithm=").append(algorithm);
sb.append('}');
return sb.toString();
}
|
Verifies that the keystore contains at least 1 trusted certificate entry.
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/StoreTrustConfig.java
| 155
|
[] |
String
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
hermval3d
|
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Examples
--------
>>> from numpy.polynomial.hermite import hermval3d
>>> x = [1, 2]
>>> y = [4, 5]
>>> z = [6, 7]
>>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
>>> hermval3d(x, y, z, c)
array([ 40077., 120131.])
"""
return pu._valnd(hermval, c, x, y, z)
|
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Examples
--------
>>> from numpy.polynomial.hermite import hermval3d
>>> x = [1, 2]
>>> y = [4, 5]
>>> z = [6, 7]
>>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]
>>> hermval3d(x, y, z, c)
array([ 40077., 120131.])
|
python
|
numpy/polynomial/hermite.py
| 1,003
|
[
"x",
"y",
"z",
"c"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
parsePropPath
|
function parsePropPath(name) {
// foo[x][y][z]
// foo.x.y.z
// foo-x-y-z
// foo x y z
return utils.matchAll(/\w+|\[(\w*)]/g, name).map(match => {
return match[0] === '[]' ? '' : match[1] || match[0];
});
}
|
It takes a string like `foo[x][y][z]` and returns an array like `['foo', 'x', 'y', 'z']
@param {string} name - The name of the property to get.
@returns An array of strings.
|
javascript
|
lib/helpers/formDataToJSON.js
| 12
|
[
"name"
] | false
| 3
| 6.08
|
axios/axios
| 108,381
|
jsdoc
| false
|
|
_write_array_header
|
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version : tuple or None
None means use oldest that works. Providing an explicit version will
raise a ValueError if the format does not allow saving this data.
Default: None
"""
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append(f"'{key}': {repr(value)}, ")
header.append("}")
header = "".join(header)
# Add some spare space so that the array header can be modified in-place
# when changing the array size, e.g. when growing it by appending data at
# the end.
shape = d['shape']
header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
shape[-1 if d['fortran_order'] else 0]
))) if len(shape) > 0 else 0)
if version is None:
header = _wrap_header_guess_version(header)
else:
header = _wrap_header(header, version)
fp.write(header)
|
Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version : tuple or None
None means use oldest that works. Providing an explicit version will
raise a ValueError if the format does not allow saving this data.
Default: None
|
python
|
numpy/lib/_format_impl.py
| 445
|
[
"fp",
"d",
"version"
] | false
| 6
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
get_data_home
|
def get_data_home(data_home=None) -> str:
"""Return the path of the scikit-learn data directory.
This folder is used by some large dataset loaders to avoid downloading the
data several times.
By default the data directory is set to a folder named 'scikit_learn_data' in the
user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str or path-like, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/scikit_learn_data`.
Returns
-------
data_home: str
The path to scikit-learn data directory.
Examples
--------
>>> import os
>>> from sklearn.datasets import get_data_home
>>> data_home_path = get_data_home()
>>> os.path.exists(data_home_path)
True
"""
if data_home is None:
data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data"))
data_home = expanduser(data_home)
makedirs(data_home, exist_ok=True)
return data_home
|
Return the path of the scikit-learn data directory.
This folder is used by some large dataset loaders to avoid downloading the
data several times.
By default the data directory is set to a folder named 'scikit_learn_data' in the
user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str or path-like, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/scikit_learn_data`.
Returns
-------
data_home: str
The path to scikit-learn data directory.
Examples
--------
>>> import os
>>> from sklearn.datasets import get_data_home
>>> data_home_path = get_data_home()
>>> os.path.exists(data_home_path)
True
|
python
|
sklearn/datasets/_base.py
| 48
|
[
"data_home"
] |
str
| true
| 2
| 8.32
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
calculateLast
|
function calculateLast(field: Field, ignoreNulls: boolean, nullAsZero: boolean): FieldCalcs {
const data = field.values;
return { last: data[data.length - 1] };
}
|
@returns an object with a key for each selected stat
NOTE: This will also modify the 'field.state' object,
leaving values in a cache until cleared.
|
typescript
|
packages/grafana-data/src/transformations/fieldReducer.ts
| 622
|
[
"field",
"ignoreNulls",
"nullAsZero"
] | true
| 1
| 6.96
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
setAccessible
|
static boolean setAccessible(final AccessibleObject accessibleObject) {
if (!isAccessible(accessibleObject)) {
accessibleObject.setAccessible(true);
return true;
}
return false;
}
|
Delegates to {@link AccessibleObject#setAccessible(boolean)} only if {@link AccessibleObject#isAccessible()} returns false. This avoid a
permission check if there is a security manager.
@param accessibleObject The accessible object.
@return Whether {@link AccessibleObject#setAccessible(boolean)} was called.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/AccessibleObjects.java
| 44
|
[
"accessibleObject"
] | true
| 2
| 7.28
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
initializeConnectionProvider
|
void initializeConnectionProvider() {
final DataSource dataSourceToUse = this.dataSource;
Assert.state(dataSourceToUse != null, "DataSource must not be null");
final DataSource nonTxDataSourceToUse =
(this.nonTransactionalDataSource != null ? this.nonTransactionalDataSource : dataSourceToUse);
// Register transactional ConnectionProvider for Quartz.
DBConnectionManager.getInstance().addConnectionProvider(
TX_DATA_SOURCE_PREFIX + getInstanceName(),
new ConnectionProvider() {
@Override
public Connection getConnection() throws SQLException {
// Return a transactional Connection, if any.
return DataSourceUtils.doGetConnection(dataSourceToUse);
}
@Override
public void shutdown() {
// Do nothing - a Spring-managed DataSource has its own lifecycle.
}
@Override
public void initialize() {
// Do nothing - a Spring-managed DataSource has its own lifecycle.
}
}
);
// Register non-transactional ConnectionProvider for Quartz.
DBConnectionManager.getInstance().addConnectionProvider(
NON_TX_DATA_SOURCE_PREFIX + getInstanceName(),
new ConnectionProvider() {
@Override
public Connection getConnection() throws SQLException {
// Always return a non-transactional Connection.
return nonTxDataSourceToUse.getConnection();
}
@Override
public void shutdown() {
// Do nothing - a Spring-managed DataSource has its own lifecycle.
}
@Override
public void initialize() {
// Do nothing - a Spring-managed DataSource has its own lifecycle.
}
}
);
}
|
Name used for the non-transactional ConnectionProvider for Quartz.
This provider will delegate to the local Spring-managed DataSource.
@see org.quartz.utils.DBConnectionManager#addConnectionProvider
@see SchedulerFactoryBean#setDataSource
|
java
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/LocalDataSourceJobStore.java
| 132
|
[] |
void
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toScaledBigDecimal
|
public static BigDecimal toScaledBigDecimal(final BigDecimal value) {
return toScaledBigDecimal(value, INTEGER_TWO, RoundingMode.HALF_EVEN);
}
|
Converts a {@link BigDecimal} to a {@link BigDecimal} with a scale of two that has been rounded using {@code RoundingMode.HALF_EVEN}. If the supplied
{@code value} is null, then {@code BigDecimal.ZERO} is returned.
<p>
Note, the scale of a {@link BigDecimal} is the number of digits to the right of the decimal point.
</p>
@param value the {@link BigDecimal} to convert, may be null.
@return the scaled, with appropriate rounding, {@link BigDecimal}.
@since 3.8
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,629
|
[
"value"
] |
BigDecimal
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_commands
|
def get_commands():
"""
Return a dictionary mapping command names to their callback applications.
Look for a management.commands package in django.core, and in each
installed application -- if a commands package exists, register all
commands in that package.
Core commands are always included. If a settings module has been
specified, also include user-defined commands.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: "django.core" for name in find_commands(__path__[0])}
if not settings.configured:
return commands
for app_config in reversed(apps.get_app_configs()):
path = os.path.join(app_config.path, "management")
commands.update({name: app_config.name for name in find_commands(path)})
return commands
|
Return a dictionary mapping command names to their callback applications.
Look for a management.commands package in django.core, and in each
installed application -- if a commands package exists, register all
commands in that package.
Core commands are always included. If a settings module has been
specified, also include user-defined commands.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
The dictionary is cached on the first call and reused on subsequent
calls.
|
python
|
django/core/management/__init__.py
| 53
|
[] | false
| 3
| 6.24
|
django/django
| 86,204
|
unknown
| false
|
|
fromValid
|
public static HostSpecifier fromValid(String specifier) {
// Verify that no port was specified, and strip optional brackets from
// IPv6 literals.
HostAndPort parsedHost = HostAndPort.fromString(specifier);
Preconditions.checkArgument(!parsedHost.hasPort());
String host = parsedHost.getHost();
// Try to interpret the specifier as an IP address. Note we build
// the address rather than using the .is* methods because we want to
// use InetAddresses.toUriString to convert the result to a string in
// canonical form.
InetAddress addr = null;
try {
addr = InetAddresses.forString(host);
} catch (IllegalArgumentException e) {
// It is not an IPv4 or IPv6 literal
}
if (addr != null) {
return new HostSpecifier(InetAddresses.toUriString(addr));
}
// It is not any kind of IP address; must be a domain name or invalid.
// TODO(user): different versions of this for different factories?
InternetDomainName domain = InternetDomainName.from(host);
if (domain.hasPublicSuffix()) {
return new HostSpecifier(domain.toString());
}
throw new IllegalArgumentException(
"Domain name does not have a recognized public suffix: " + host);
}
|
Returns a {@code HostSpecifier} built from the provided {@code specifier}, which is already
known to be valid. If the {@code specifier} might be invalid, use {@link #from(String)}
instead.
<p>The specifier must be in one of these formats:
<ul>
<li>A domain name, like {@code google.com}
<li>A IPv4 address string, like {@code 127.0.0.1}
<li>An IPv6 address string with or without brackets, like {@code [2001:db8::1]} or {@code
2001:db8::1}
</ul>
@throws IllegalArgumentException if the specifier is not valid.
|
java
|
android/guava/src/com/google/common/net/HostSpecifier.java
| 71
|
[
"specifier"
] |
HostSpecifier
| true
| 4
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
toBigInteger
|
public static BigInteger toBigInteger(InetAddress address) {
return new BigInteger(1, address.getAddress());
}
|
Returns a BigInteger representing the address.
<p>Unlike {@code coerceToInteger}, IPv6 addresses are not coerced to IPv4 addresses.
@param address {@link InetAddress} to convert
@return {@code BigInteger} representation of the address
@since 28.2
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 1,072
|
[
"address"
] |
BigInteger
| true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
__from_arrow__
|
def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray:
"""
Construct DatetimeArray from pyarrow Array/ChunkedArray.
Note: If the units in the pyarrow Array are the same as this
DatetimeDtype, then values corresponding to the integer representation
of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
are converted to ``NaT``, regardless of the null indicator in the
pyarrow array.
Parameters
----------
array : pyarrow.Array or pyarrow.ChunkedArray
The Arrow array to convert to DatetimeArray.
Returns
-------
extension array : DatetimeArray
"""
import pyarrow
from pandas.core.arrays import DatetimeArray
array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True)
if isinstance(array, pyarrow.Array):
np_arr = array.to_numpy(zero_copy_only=False)
else:
np_arr = array.to_numpy()
return DatetimeArray._simple_new(np_arr, dtype=self)
|
Construct DatetimeArray from pyarrow Array/ChunkedArray.
Note: If the units in the pyarrow Array are the same as this
DatetimeDtype, then values corresponding to the integer representation
of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`)
are converted to ``NaT``, regardless of the null indicator in the
pyarrow array.
Parameters
----------
array : pyarrow.Array or pyarrow.ChunkedArray
The Arrow array to convert to DatetimeArray.
Returns
-------
extension array : DatetimeArray
|
python
|
pandas/core/dtypes/dtypes.py
| 933
|
[
"self",
"array"
] |
DatetimeArray
| true
| 3
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
sum
|
@Override
public long sum() {
long sum = base;
Cell[] as = cells;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) sum += a.value;
}
}
return sum;
}
|
Returns the current sum. The returned value is <em>NOT</em> an atomic snapshot; invocation in
the absence of concurrent updates returns an accurate result, but concurrent updates that occur
while the sum is being calculated might not be incorporated.
@return the sum
|
java
|
android/guava/src/com/google/common/cache/LongAdder.java
| 97
|
[] | true
| 4
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
on
|
public static Splitter on(String separator) {
checkArgument(separator.length() != 0, "The separator may not be the empty string.");
if (separator.length() == 1) {
return Splitter.on(separator.charAt(0));
}
return new Splitter(
(splitter, toSplit) ->
new SplittingIterator(splitter, toSplit) {
@Override
public int separatorStart(int start) {
int separatorLength = separator.length();
positions:
for (int p = start, last = toSplit.length() - separatorLength; p <= last; p++) {
for (int i = 0; i < separatorLength; i++) {
if (toSplit.charAt(i + p) != separator.charAt(i)) {
continue positions;
}
}
return p;
}
return -1;
}
@Override
public int separatorEnd(int separatorPosition) {
return separatorPosition + separator.length();
}
});
}
|
Returns a splitter that uses the given fixed string as a separator. For example, {@code
Splitter.on(", ").split("foo, bar,baz")} returns an iterable containing {@code ["foo",
"bar,baz"]}.
@param separator the literal, nonempty string to recognize as a separator
@return a splitter, with default settings, that recognizes that separator
|
java
|
android/guava/src/com/google/common/base/Splitter.java
| 166
|
[
"separator"
] |
Splitter
| true
| 5
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
nextGraph
|
public String nextGraph(final int count) {
return next(count, 33, 126, false, false);
}
|
Creates a random string whose length is the number of characters specified.
<p>
Characters will be chosen from the set of characters which match the POSIX [:graph:] regular expression character
class. This class contains all visible ASCII characters (i.e. anything except spaces and control characters).
</p>
@param count the length of random string to create.
@return the random string.
@throws IllegalArgumentException if {@code count} < 0.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 906
|
[
"count"
] |
String
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
combine
|
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
return totalCount();
}
|
Return the computed frequency describing the number of occurrences of the values in the bucket for the given
center point, relative to the total number of occurrences in the samples.
@param config the metric configuration
@param now the current time in milliseconds
@param centerValue the value corresponding to the center point of the bucket
@return the frequency of the values in the bucket relative to the total number of samples
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java
| 156
|
[
"samples",
"config",
"now"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
selectReadReplica
|
Node selectReadReplica(final TopicPartition partition, final Node leaderReplica, final long currentTimeMs) {
Optional<Integer> nodeId = subscriptions.preferredReadReplica(partition, currentTimeMs);
if (nodeId.isPresent()) {
Optional<Node> node = nodeId.flatMap(id -> metadata.fetch().nodeIfOnline(partition, id));
if (node.isPresent()) {
return node.get();
} else {
log.trace("Not fetching from {} for partition {} since it is marked offline or is missing from our metadata," +
" using the leader instead.", nodeId, partition);
// Note that this condition may happen due to stale metadata, so we clear preferred replica and
// refresh metadata.
requestMetadataUpdate(metadata, subscriptions, partition);
return leaderReplica;
}
} else {
return leaderReplica;
}
}
|
Determine from which replica to read: the <i>preferred</i> or the <i>leader</i>. The preferred replica is used
iff:
<ul>
<li>A preferred replica was previously set</li>
<li>We're still within the lease time for the preferred replica</li>
<li>The replica is still online/available</li>
</ul>
If any of the above are not met, the leader node is returned.
@param partition {@link TopicPartition} for which we want to fetch data
@param leaderReplica {@link Node} for the leader of the given partition
@param currentTimeMs Current time in milliseconds; used to determine if we're within the optional lease window
@return Replica {@link Node node} from which to request the data
@see SubscriptionState#preferredReadReplica
@see SubscriptionState#updatePreferredReadReplica
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
| 374
|
[
"partition",
"leaderReplica",
"currentTimeMs"
] |
Node
| true
| 3
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
markAsUninitialized
|
private void markAsUninitialized(LoggerContext loggerContext) {
loggerContext.setExternalContext(null);
}
|
Return the configuration location. The result may be:
<ul>
<li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li>
<li>A file path: if provided explicitly by the user</li>
<li>A URI: if loaded from the classpath default or a custom location</li>
</ul>
@param configuration the source configuration
@return the config location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
| 497
|
[
"loggerContext"
] |
void
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
minimizeCapacity
|
public StrBuilder minimizeCapacity() {
if (buffer.length > length()) {
buffer = ArrayUtils.arraycopy(buffer, 0, 0, size, () -> new char[length()]);
}
return this;
}
|
Minimizes the capacity to the actual length of the string.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 2,475
|
[] |
StrBuilder
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
readString
|
static String readString(DataBlock data, long pos, long len) {
try {
if (len > Integer.MAX_VALUE) {
throw new IllegalStateException("String is too long to read");
}
ByteBuffer buffer = ByteBuffer.allocate((int) len);
buffer.order(ByteOrder.LITTLE_ENDIAN);
data.readFully(buffer, pos);
return new String(buffer.array(), StandardCharsets.UTF_8);
}
catch (IOException ex) {
throw new UncheckedIOException(ex);
}
}
|
Read a string value from the given data block.
@param data the source data
@param pos the position to read from
@param len the number of bytes to read
@return the contents as a string
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipString.java
| 261
|
[
"data",
"pos",
"len"
] |
String
| true
| 3
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
permitted_dag_filter_factory
|
def permitted_dag_filter_factory(
method: ResourceMethod, filter_class=PermittedDagFilter
) -> Callable[[BaseUser, BaseAuthManager], PermittedDagFilter]:
"""
Create a callable for Depends in FastAPI that returns a filter of the permitted dags for the user.
:param method: whether filter readable or writable.
:return: The callable that can be used as Depends in FastAPI.
"""
def depends_permitted_dags_filter(
user: GetUserDep,
auth_manager: AuthManagerDep,
) -> PermittedDagFilter:
authorized_dags: set[str] = auth_manager.get_authorized_dag_ids(user=user, method=method)
return filter_class(authorized_dags)
return depends_permitted_dags_filter
|
Create a callable for Depends in FastAPI that returns a filter of the permitted dags for the user.
:param method: whether filter readable or writable.
:return: The callable that can be used as Depends in FastAPI.
|
python
|
airflow-core/src/airflow/api_fastapi/core_api/security.py
| 212
|
[
"method",
"filter_class"
] |
Callable[[BaseUser, BaseAuthManager], PermittedDagFilter]
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
resolveBeforeInstantiation
|
@SuppressWarnings("deprecation")
protected @Nullable Object resolveBeforeInstantiation(String beanName, RootBeanDefinition mbd) {
Object bean = null;
if (!Boolean.FALSE.equals(mbd.beforeInstantiationResolved)) {
// Make sure bean class is actually resolved at this point.
if (!mbd.isSynthetic() && hasInstantiationAwareBeanPostProcessors()) {
Class<?> targetType = determineTargetType(beanName, mbd);
if (targetType != null) {
bean = applyBeanPostProcessorsBeforeInstantiation(targetType, beanName);
if (bean != null) {
bean = applyBeanPostProcessorsAfterInitialization(bean, beanName);
}
}
}
mbd.beforeInstantiationResolved = (bean != null);
}
return bean;
}
|
Apply before-instantiation post-processors, resolving whether there is a
before-instantiation shortcut for the specified bean.
@param beanName the name of the bean
@param mbd the bean definition for the bean
@return the shortcut-determined bean instance, or {@code null} if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
| 1,125
|
[
"beanName",
"mbd"
] |
Object
| true
| 6
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getCodePointSize
|
private static int getCodePointSize(byte[] bytes, int i) {
int b = Byte.toUnsignedInt(bytes[i]);
if ((b & 0b1_0000000) == 0b0_0000000) {
return 1;
}
if ((b & 0b111_00000) == 0b110_00000) {
return 2;
}
if ((b & 0b1111_0000) == 0b1110_0000) {
return 3;
}
return 4;
}
|
Read a string value from the given data block.
@param data the source data
@param pos the position to read from
@param len the number of bytes to read
@return the contents as a string
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipString.java
| 294
|
[
"bytes",
"i"
] | true
| 4
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
argsort
|
def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True,
fill_value=None, *, stable=False):
"""
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. If None, the default, the flattened array
is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
The sorting algorithm used.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : scalar or None, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
stable : bool, optional
Only for compatibility with ``np.argsort``. Ignored.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
ma.MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
numpy.ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> import numpy as np
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data=[3, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.argsort()
array([1, 0, 2])
"""
if stable:
raise ValueError(
"`stable` parameter is not supported for masked arrays."
)
# 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default
if axis is np._NoValue:
axis = _deprecate_argsort_axis(self)
if fill_value is None:
if endwith:
# nan > inf
if np.issubdtype(self.dtype, np.floating):
fill_value = np.nan
else:
fill_value = minimum_fill_value(self)
else:
fill_value = maximum_fill_value(self)
filled = self.filled(fill_value)
return filled.argsort(axis=axis, kind=kind, order=order)
|
Return an ndarray of indices that sort the array along the
specified axis. Masked values are filled beforehand to
`fill_value`.
Parameters
----------
axis : int, optional
Axis along which to sort. If None, the default, the flattened array
is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
The sorting algorithm used.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
When the array contains unmasked values at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : scalar or None, optional
Value used internally for the masked values.
If ``fill_value`` is not None, it supersedes ``endwith``.
stable : bool, optional
Only for compatibility with ``np.argsort``. Ignored.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
ma.MaskedArray.sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
numpy.ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
Examples
--------
>>> import numpy as np
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
masked_array(data=[3, 2, --],
mask=[False, False, True],
fill_value=999999)
>>> a.argsort()
array([1, 0, 2])
|
python
|
numpy/ma/core.py
| 5,605
|
[
"self",
"axis",
"kind",
"order",
"endwith",
"fill_value",
"stable"
] | false
| 8
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
offload_chosen_sets
|
def offload_chosen_sets(
fwd_module: fx.GraphModule,
bwd_module: fx.GraphModule,
) -> None:
"""
Add offload and reload nodes to the forward and backward graphs.
This function adds device_put operations without any stream handling.
Args:
fwd_module: Forward module graph
bwd_module: Backward module graph
"""
# Add offload nodes in forward graph
offload_activation_fw(fwd_module.graph)
# Update backward graph inputs to be offloaded tensors
bwd_inputs: dict[str, fx.Node] = {
node.name: node for node in bwd_module.graph.find_nodes(op="placeholder")
}
for fwd_node in fwd_module.graph.find_nodes(op="output")[0].args[0]:
if CPU_OFFLOAD_PREFIX not in fwd_node.name:
continue
bwd_node: fx.Node = bwd_inputs[fwd_node.name.replace(CPU_OFFLOAD_PREFIX, "")]
with bwd_module.graph.inserting_after(bwd_node):
bwd_offload_node: fx.Node = bwd_module.graph.placeholder(name=fwd_node.name)
bwd_offload_node.meta.update(fwd_node.meta)
bwd_offload_node.meta["saved_for_offloading"] = True
bwd_offload_node.meta["original_device"] = bwd_node.meta["val"].device
bwd_node.replace_all_uses_with(bwd_offload_node)
bwd_module.graph.erase_node(bwd_node)
# Add reload nodes in backward graph
reload_activation_bw(bwd_module.graph)
|
Add offload and reload nodes to the forward and backward graphs.
This function adds device_put operations without any stream handling.
Args:
fwd_module: Forward module graph
bwd_module: Backward module graph
|
python
|
torch/_functorch/_activation_offloading/activation_offloading.py
| 306
|
[
"fwd_module",
"bwd_module"
] |
None
| true
| 3
| 6.4
|
pytorch/pytorch
| 96,034
|
google
| false
|
execute
|
private @Nullable Object execute(CacheOperationInvoker invoker, Method method, CacheOperationContexts contexts) {
if (contexts.isSynchronized()) {
// Special handling of synchronized invocation
return executeSynchronized(invoker, method, contexts);
}
// Process any early evictions
processCacheEvicts(contexts.get(CacheEvictOperation.class), true,
CacheOperationExpressionEvaluator.NO_RESULT);
// Check if we have a cached value matching the conditions
Object cacheHit = findCachedValue(invoker, method, contexts);
if (cacheHit == null || cacheHit instanceof Cache.ValueWrapper) {
return evaluate(cacheHit, invoker, method, contexts);
}
return cacheHit;
}
|
Execute the underlying operation (typically in case of cache miss) and return
the result of the invocation. If an exception occurs it will be wrapped in a
{@link CacheOperationInvoker.ThrowableWrapper}: the exception can be handled
or modified but it <em>must</em> be wrapped in a
{@link CacheOperationInvoker.ThrowableWrapper} as well.
@param invoker the invoker handling the operation being cached
@return the result of the invocation
@see CacheOperationInvoker#invoke()
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 427
|
[
"invoker",
"method",
"contexts"
] |
Object
| true
| 4
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
elementSet
|
@Override
public ImmutableSet<E> elementSet() {
ImmutableSet<E> result = elementSet;
return (result == null) ? elementSet = new ElementSet<>(Arrays.asList(entries), this) : result;
}
|
Maximum allowed length of a hash table bucket before falling back to a j.u.HashMap based
implementation. Experimentally determined.
|
java
|
guava/src/com/google/common/collect/RegularImmutableMultiset.java
| 182
|
[] | true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
compress_rowcols
|
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(
data=[[--, 1, 2],
[--, 4, 5],
[6, 7, 8]],
mask=[[ True, False, False],
[ True, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
|
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(
data=[[--, 1, 2],
[--, 4, 5],
[6, 7, 8]],
mask=[[ True, False, False],
[ True, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
|
python
|
numpy/ma/extras.py
| 899
|
[
"x",
"axis"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
synchronizedBiMap
|
@J2ktIncompatible // Synchronized
public static <K extends @Nullable Object, V extends @Nullable Object>
BiMap<K, V> synchronizedBiMap(BiMap<K, V> bimap) {
return Synchronized.biMap(bimap, null);
}
|
Returns a synchronized (thread-safe) bimap backed by the specified bimap. In order to guarantee
serial access, it is critical that <b>all</b> access to the backing bimap is accomplished
through the returned bimap.
<p>It is imperative that the user manually synchronize on the returned map when accessing any
of its collection views:
{@snippet :
BiMap<Long, String> map = Maps.synchronizedBiMap(HashBiMap.create());
...
Set<Long> set = map.keySet(); // Needn't be in synchronized block
...
synchronized (map) { // Synchronizing on map, not set!
Iterator<Long> it = set.iterator(); // Must be in synchronized block
while (it.hasNext()) {
foo(it.next());
}
}
}
<p>Failure to follow this advice may result in non-deterministic behavior.
<p>The returned bimap will be serializable if the specified bimap is serializable.
@param bimap the bimap to be wrapped in a synchronized view
@return a synchronized view of the specified bimap
|
java
|
android/guava/src/com/google/common/collect/Maps.java
| 1,623
|
[
"bimap"
] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
execute
|
def execute(self, context: Context) -> None:
"""
Transfers Google APIs json data to S3.
:param context: The context that is being provided when executing.
"""
self.log.info("Transferring data from %s to s3", self.google_api_service_name)
if self.google_api_endpoint_params_via_xcom:
self._update_google_api_endpoint_params_via_xcom(context["task_instance"])
data = self._retrieve_data_from_google_api()
self._load_data_to_s3(data)
if self.google_api_response_via_xcom:
self._expose_google_api_response_via_xcom(context["task_instance"], data)
|
Transfers Google APIs json data to S3.
:param context: The context that is being provided when executing.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/transfers/google_api_to_s3.py
| 140
|
[
"self",
"context"
] |
None
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
reset_option
|
def reset_option(pat: str) -> None:
"""
Reset one or more options to their default value.
This method resets the specified pandas option(s) back to their default
values. It allows partial string matching for convenience, but users should
exercise caution to avoid unintended resets due to changes in option names
in future versions.
Parameters
----------
pat : str/regex
If specified only options matching ``pat*`` will be reset.
Pass ``"all"`` as argument to reset all options.
.. warning::
Partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
No return value.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option or options.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the
:ref:`User Guide <options.available>`.
Examples
--------
>>> pd.reset_option("display.max_columns") # doctest: +SKIP
"""
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError(f"No such keys(s) for {pat=}")
if len(keys) > 1 and len(pat) < 4 and pat != "all":
raise ValueError(
"You must specify at least 4 characters when "
"resetting multiple keys, use the special keyword "
'"all" to reset all the options to their default value'
)
for k in keys:
set_option(k, _registered_options[k].defval)
|
Reset one or more options to their default value.
This method resets the specified pandas option(s) back to their default
values. It allows partial string matching for convenience, but users should
exercise caution to avoid unintended resets due to changes in option names
in future versions.
Parameters
----------
pat : str/regex
If specified only options matching ``pat*`` will be reset.
Pass ``"all"`` as argument to reset all options.
.. warning::
Partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
No return value.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option or options.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the
:ref:`User Guide <options.available>`.
Examples
--------
>>> pd.reset_option("display.max_columns") # doctest: +SKIP
|
python
|
pandas/_config/config.py
| 344
|
[
"pat"
] |
None
| true
| 6
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_maybe_convert_timedelta
|
def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:
"""
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
"""
if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):
if isinstance(self.freq, (Tick, Day)):
# _check_timedeltalike_freq_compat will raise if incompatible
delta = self._data._check_timedeltalike_freq_compat(other)
return delta
elif isinstance(other, BaseOffset):
if other.base == self.freq.base:
return other.n
raise raise_on_incompatible(self, other)
elif is_integer(other):
assert isinstance(other, int)
return other
# raise when input doesn't have freq
raise raise_on_incompatible(self, None)
|
Convert timedelta-like input to an integer multiple of self.freq
Parameters
----------
other : timedelta, np.timedelta64, DateOffset, int, np.ndarray
Returns
-------
converted : int, np.ndarray[int64]
Raises
------
IncompatibleFrequency : if the input cannot be written as a multiple
of self.freq. Note IncompatibleFrequency subclasses ValueError.
|
python
|
pandas/core/indexes/period.py
| 363
|
[
"self",
"other"
] |
int | npt.NDArray[np.int64]
| true
| 6
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
from_tuples
|
def from_tuples(
cls,
data,
closed: IntervalClosedType = "right",
name: Hashable | None = None,
copy: bool = False,
dtype: Dtype | None = None,
) -> IntervalIndex:
"""
Construct an IntervalIndex from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
name : str, optional
Name of the resulting IntervalIndex.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalIndex
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array.
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits.
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
dtype='interval[int64, right]')
"""
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
|
Construct an IntervalIndex from an array-like of tuples.
Parameters
----------
data : array-like (1-dimensional)
Array of tuples.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
name : str, optional
Name of the resulting IntervalIndex.
copy : bool, default False
By-default copy the data, this is compat only and ignored.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalIndex
See Also
--------
interval_range : Function to create a fixed frequency IntervalIndex.
IntervalIndex.from_arrays : Construct an IntervalIndex from a left and
right array.
IntervalIndex.from_breaks : Construct an IntervalIndex from an array of
splits.
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
dtype='interval[int64, right]')
|
python
|
pandas/core/indexes/interval.py
| 397
|
[
"cls",
"data",
"closed",
"name",
"copy",
"dtype"
] |
IntervalIndex
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
match
|
public Optional<String> match() {
return this.match;
}
|
@return the optional match string, where:
if present, the name that's matched exactly
if empty, matches the default name
if null, matches any specified name
|
java
|
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaFilterComponent.java
| 88
|
[] | true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getAnnotation
|
AnnotationMirror getAnnotation(Element element, String type) {
if (element != null) {
for (AnnotationMirror annotation : element.getAnnotationMirrors()) {
if (type.equals(annotation.getAnnotationType().toString())) {
return annotation;
}
}
}
return null;
}
|
Resolve the {@link SourceMetadata} for the specified property.
@param field the field of the property (can be {@code null})
@param getter the getter of the property (can be {@code null})
@return the {@link SourceMetadata} for the specified property
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
| 258
|
[
"element",
"type"
] |
AnnotationMirror
| true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
requestUpdateForNewTopics
|
public synchronized int requestUpdateForNewTopics() {
// Override the timestamp of last refresh to let immediate update.
this.lastRefreshMs = 0;
this.needPartialUpdate = true;
this.equivalentResponseCount = 0;
this.requestVersion++;
return this.updateVersion;
}
|
Request an immediate update of the current cluster metadata info, because the caller is interested in
metadata that is being newly requested.
@return The current updateVersion before the update
|
java
|
clients/src/main/java/org/apache/kafka/clients/Metadata.java
| 213
|
[] | true
| 1
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
fix_invalid
|
def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
"""
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data=[--, -1.0, nan, inf],
mask=[ True, False, False, False],
fill_value=1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data=[--, -1.0, --, --],
mask=[ True, False, True, True],
fill_value=1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
>>> x.data
array([ 1., -1., nan, inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
invalid = np.logical_not(np.isfinite(a._data))
if not invalid.any():
return a
a._mask |= invalid
if fill_value is None:
fill_value = a.fill_value
a._data[invalid] = fill_value
return a
|
Return input with invalid data masked and replaced by a fill value.
Invalid data means values of `nan`, `inf`, etc.
Parameters
----------
a : array_like
Input array, a (subclass of) ndarray.
mask : sequence, optional
Mask. Must be convertible to an array of booleans with the same
shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
fill_value : scalar, optional
Value used for fixing invalid data. Default is None, in which case
the ``a.fill_value`` is used.
Returns
-------
b : MaskedArray
The input array with invalid entries fixed.
Notes
-----
A copy is performed by default.
Examples
--------
>>> import numpy as np
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
masked_array(data=[--, -1.0, nan, inf],
mask=[ True, False, False, False],
fill_value=1e+20)
>>> np.ma.fix_invalid(x)
masked_array(data=[--, -1.0, --, --],
mask=[ True, False, True, True],
fill_value=1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
>>> x.data
array([ 1., -1., nan, inf])
|
python
|
numpy/ma/core.py
| 763
|
[
"a",
"mask",
"copy",
"fill_value"
] | false
| 3
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
parseProperties
|
public Set<Property> parseProperties(@Nullable final List<String> propertyNames) {
if (propertyNames != null) {
final Set<Property> parsedProperties = new HashSet<>();
for (String propertyName : propertyNames) {
parsedProperties.add(Property.parseProperty(this.properties, propertyName)); // n.b. this throws if a property is invalid
}
return Set.copyOf(parsedProperties);
} else {
// if propertyNames is null, then use the default properties
return this.defaultProperties;
}
}
|
Parse the given list of property names.
@param propertyNames a list of property names to parse, or null to use the default properties for this database
@throws IllegalArgumentException if any of the property names are not valid
@return a set of parsed and validated properties
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/Database.java
| 232
|
[
"propertyNames"
] | true
| 2
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
mean
|
def mean(self, axis: Axis = 0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
nv.validate_mean(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
|
Mean of non-NA/null values
Returns
-------
mean : float
|
python
|
pandas/core/arrays/sparse/array.py
| 1,581
|
[
"self",
"axis"
] | true
| 3
| 6.56
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
servicesByState
|
ImmutableSetMultimap<State, Service> servicesByState() {
ImmutableSetMultimap.Builder<State, Service> builder = ImmutableSetMultimap.builder();
monitor.enter();
try {
for (Entry<State, Service> entry : servicesByState.entries()) {
if (!(entry.getValue() instanceof NoOpService)) {
builder.put(entry);
}
}
} finally {
monitor.leave();
}
return builder.build();
}
|
Marks the {@link State} as ready to receive transitions. Returns true if no transitions have
been observed yet.
|
java
|
android/guava/src/com/google/common/util/concurrent/ServiceManager.java
| 630
|
[] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
has_fit_parameter
|
def has_fit_parameter(estimator, parameter):
"""Check whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter : str
The searched parameter.
Returns
-------
is_parameter : bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.utils.validation import has_fit_parameter
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return (
# This is used during test collection in common tests. The
# hasattr(estimator, "fit") makes it so that we don't fail for an estimator
# that does not have a `fit` method during collection of checks. The right
# checks will fail later.
hasattr(estimator, "fit") and parameter in signature(estimator.fit).parameters
)
|
Check whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter : str
The searched parameter.
Returns
-------
is_parameter : bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.utils.validation import has_fit_parameter
>>> has_fit_parameter(SVC(), "sample_weight")
True
|
python
|
sklearn/utils/validation.py
| 1,472
|
[
"estimator",
"parameter"
] | false
| 2
| 7.2
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
visitForInStatement
|
function visitForInStatement(node: ForInStatement): VisitResult<Statement> {
if (isVariableDeclarationList(node.initializer) && !(node.initializer.flags & NodeFlags.BlockScoped)) {
const exportStatements = appendExportsOfVariableDeclarationList(/*statements*/ undefined, node.initializer, /*isForInOrOfInitializer*/ true);
if (some(exportStatements)) {
const initializer = visitNode(node.initializer, discardedValueVisitor, isForInitializer);
const expression = visitNode(node.expression, visitor, isExpression);
const body = visitIterationBody(node.statement, topLevelNestedVisitor, context);
const mergedBody = isBlock(body) ?
factory.updateBlock(body, [...exportStatements, ...body.statements]) :
factory.createBlock([...exportStatements, body], /*multiLine*/ true);
return factory.updateForInStatement(node, initializer, expression, mergedBody);
}
}
return factory.updateForInStatement(
node,
visitNode(node.initializer, discardedValueVisitor, isForInitializer),
visitNode(node.expression, visitor, isExpression),
visitIterationBody(node.statement, topLevelNestedVisitor, context),
);
}
|
Visits the body of a ForInStatement to hoist declarations.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/module.ts
| 932
|
[
"node"
] | true
| 5
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
check_status
|
def check_status(
self,
job_name: str,
key: str,
describe_function: Callable,
check_interval: int,
max_ingestion_time: int | None = None,
non_terminal_states: set | None = None,
) -> dict:
"""
Check status of a SageMaker resource.
:param job_name: name of the resource to check status, can be a job but
also pipeline for instance.
:param key: the key of the response dict that points to the state
:param describe_function: the function used to retrieve the status
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker resource
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker resources that run longer than this will fail. Setting
this to None implies no timeout for any SageMaker resource.
:param non_terminal_states: the set of nonterminal states
:return: response of describe call after resource is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
while True:
time.sleep(check_interval)
sec += check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info("Resource still running for %s seconds... current status is %s", sec, status)
except KeyError:
raise AirflowException("Could not get status of the SageMaker resource")
except ClientError:
raise AirflowException("AWS request failed, check logs for more info")
if status in self.failed_states:
raise AirflowException(f"SageMaker resource failed because {response['FailureReason']}")
if status not in non_terminal_states:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the resource gets killed if the max ingestion time is exceeded
raise AirflowException(f"SageMaker resource took more than {max_ingestion_time} seconds")
self.log.info("SageMaker resource completed")
return response
|
Check status of a SageMaker resource.
:param job_name: name of the resource to check status, can be a job but
also pipeline for instance.
:param key: the key of the response dict that points to the state
:param describe_function: the function used to retrieve the status
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker resource
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker resources that run longer than this will fail. Setting
this to None implies no timeout for any SageMaker resource.
:param non_terminal_states: the set of nonterminal states
:return: response of describe call after resource is done
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 712
|
[
"self",
"job_name",
"key",
"describe_function",
"check_interval",
"max_ingestion_time",
"non_terminal_states"
] |
dict
| true
| 7
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
restrict
|
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self : object
DictVectorizer class instance.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names_out()
array(['bar', 'baz', 'foo'], ...)
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names_out()
array(['bar', 'foo'], ...)
"""
check_is_fitted(self, "feature_names_")
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [
f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
]
return self
|
Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self : object
DictVectorizer class instance.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names_out()
array(['bar', 'baz', 'foo'], ...)
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names_out()
array(['bar', 'foo'], ...)
|
python
|
sklearn/feature_extraction/_dict_vectorizer.py
| 403
|
[
"self",
"support",
"indices"
] | false
| 3
| 7.04
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
bucket_all_gather_by_mb
|
def bucket_all_gather_by_mb(
gm: torch.fx.GraphModule,
bucket_cap_mb_by_bucket_idx: Callable[[int], float],
filter_wait_node: Callable[[torch.fx.Node], bool] | None = None,
mode: BucketMode = "default",
) -> list[list[torch.fx.Node]]:
"""
Identifies all all_gather nodes and groups them into buckets,
based on size limit `bucket_cap_mb_by_bucket_idx`.
Args:
gm (torch.fx.GraphModule): GraphModule where to bucket all_gathers.
bucket_cap_mb_by_bucket_idx (Callable[[int], float]): Callable to specify cap of the bucket
in megabytes by bucket idx. The idea of `bucket_cap_mb_by_bucket_idx` is to allow
to specify different sizes of the buckets at the start,
as first all_gather is usually exposed. Interface of bucket_cap_mb_by_bucket_idx
is `bucket_cap_mb_by_bucket_idx_default` function that is default value for `bucket_cap_mb_by_bucket_idx`.
filter_wait_node (Callable[[torch.fx.Node], bool] | None): If specified,
only all_gather nodes with wait_node that satisfy `filter_wait_node` will be bucketed.
Returns:
list[list[torch.fx.Node]]: List of buckets, where each bucket is a list of all_gather nodes.
"""
group_key_fn = (
_ag_group_key_multidtype if mode and "multidtype" in mode else _ag_group_key
)
return greedy_bucket_collective_by_mb(
gm,
bucket_cap_mb_by_bucket_idx,
is_all_gather_into_tensor,
group_key_fn,
filter_wait_node,
)
|
Identifies all all_gather nodes and groups them into buckets,
based on size limit `bucket_cap_mb_by_bucket_idx`.
Args:
gm (torch.fx.GraphModule): GraphModule where to bucket all_gathers.
bucket_cap_mb_by_bucket_idx (Callable[[int], float]): Callable to specify cap of the bucket
in megabytes by bucket idx. The idea of `bucket_cap_mb_by_bucket_idx` is to allow
to specify different sizes of the buckets at the start,
as first all_gather is usually exposed. Interface of bucket_cap_mb_by_bucket_idx
is `bucket_cap_mb_by_bucket_idx_default` function that is default value for `bucket_cap_mb_by_bucket_idx`.
filter_wait_node (Callable[[torch.fx.Node], bool] | None): If specified,
only all_gather nodes with wait_node that satisfy `filter_wait_node` will be bucketed.
Returns:
list[list[torch.fx.Node]]: List of buckets, where each bucket is a list of all_gather nodes.
|
python
|
torch/_inductor/fx_passes/bucketing.py
| 350
|
[
"gm",
"bucket_cap_mb_by_bucket_idx",
"filter_wait_node",
"mode"
] |
list[list[torch.fx.Node]]
| true
| 3
| 7.44
|
pytorch/pytorch
| 96,034
|
google
| false
|
offsetsForTimes
|
@Override
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch, Duration timeout) {
return delegate.offsetsForTimes(timestampsToSearch, timeout);
}
|
Look up the offsets for the given partitions by timestamp. The returned offset for each partition is the
earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition.
This is a blocking call. The consumer does not have to be assigned the partitions.
If the message format version in a partition is before 0.10.0, i.e. the messages do not have timestamps, null
will be returned for that partition.
@param timestampsToSearch the mapping from partition to the timestamp to look up.
@param timeout The maximum amount of time to await retrieval of the offsets
@return a mapping from partition to the timestamp and offset of the first message with timestamp greater
than or equal to the target timestamp. If the timestamp and offset for a specific partition cannot be found within
timeout, and no corresponding message exists, the entry in the returned map will be {@code null}
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details
@throws IllegalArgumentException if the target timestamp is negative
@throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before
expiration of the passed timeout
@throws org.apache.kafka.common.errors.UnsupportedVersionException if the broker does not support looking up
the offsets by timestamp
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,613
|
[
"timestampsToSearch",
"timeout"
] | true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
argmax
|
def argmax(self, axis=None, fill_value=None, out=None, *,
keepdims=np._NoValue):
"""
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : scalar or None, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> import numpy as np
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
"""
if fill_value is None:
fill_value = maximum_fill_value(self._data)
d = self.filled(fill_value).view(ndarray)
keepdims = False if keepdims is np._NoValue else bool(keepdims)
return d.argmax(axis, out=out, keepdims=keepdims)
|
Returns array of indices of the maximum values along the given axis.
Masked values are treated as if they had the value fill_value.
Parameters
----------
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis
fill_value : scalar or None, optional
Value used to fill in the masked values. If None, the output of
maximum_fill_value(self._data) is used instead.
out : {None, array}, optional
Array into which the result can be placed. Its type is preserved
and it must be of the right shape to hold the output.
Returns
-------
index_array : {integer_array}
Examples
--------
>>> import numpy as np
>>> a = np.arange(6).reshape(2,3)
>>> a.argmax()
5
>>> a.argmax(0)
array([1, 1, 1])
>>> a.argmax(1)
array([2, 2])
|
python
|
numpy/ma/core.py
| 5,733
|
[
"self",
"axis",
"fill_value",
"out",
"keepdims"
] | false
| 3
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
make_sparse_coded_signal
|
def make_sparse_coded_signal(
n_samples,
*,
n_components,
n_features,
n_nonzero_coefs,
random_state=None,
):
"""Generate a signal as a sparse combination of dictionary elements.
Returns matrices `Y`, `D` and `X` such that `Y = XD` where `X` is of shape
`(n_samples, n_components)`, `D` is of shape `(n_components, n_features)`, and
each row of `X` has exactly `n_nonzero_coefs` non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate.
n_components : int
Number of components in the dictionary.
n_features : int
Number of features of the dataset to generate.
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
The encoded signal (Y).
dictionary : ndarray of shape (n_components, n_features)
The dictionary with normalized components (D).
code : ndarray of shape (n_samples, n_components)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
Examples
--------
>>> from sklearn.datasets import make_sparse_coded_signal
>>> data, dictionary, code = make_sparse_coded_signal(
... n_samples=50,
... n_components=100,
... n_features=10,
... n_nonzero_coefs=4,
... random_state=0
... )
>>> data.shape
(50, 10)
>>> dictionary.shape
(100, 10)
>>> code.shape
(50, 100)
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.standard_normal(size=(n_features, n_components))
D /= np.sqrt(np.sum((D**2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.standard_normal(size=n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
# Transpose to have shapes consistent with the rest of the API
Y, D, X = Y.T, D.T, X.T
return map(np.squeeze, (Y, D, X))
|
Generate a signal as a sparse combination of dictionary elements.
Returns matrices `Y`, `D` and `X` such that `Y = XD` where `X` is of shape
`(n_samples, n_components)`, `D` is of shape `(n_components, n_features)`, and
each row of `X` has exactly `n_nonzero_coefs` non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
Number of samples to generate.
n_components : int
Number of components in the dictionary.
n_features : int
Number of features of the dataset to generate.
n_nonzero_coefs : int
Number of active (non-zero) coefficients in each sample.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
The encoded signal (Y).
dictionary : ndarray of shape (n_components, n_features)
The dictionary with normalized components (D).
code : ndarray of shape (n_samples, n_components)
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
Examples
--------
>>> from sklearn.datasets import make_sparse_coded_signal
>>> data, dictionary, code = make_sparse_coded_signal(
... n_samples=50,
... n_components=100,
... n_features=10,
... n_nonzero_coefs=4,
... random_state=0
... )
>>> data.shape
(50, 10)
>>> dictionary.shape
(100, 10)
>>> code.shape
(50, 100)
|
python
|
sklearn/datasets/_samples_generator.py
| 1,529
|
[
"n_samples",
"n_components",
"n_features",
"n_nonzero_coefs",
"random_state"
] | false
| 2
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_init_file
|
def _init_file(self, ti, *, identifier: str | None = None):
"""
Create log directory and give it permissions that are configured.
See above _prepare_log_folder method for more detailed explanation.
:param ti: task instance object
:return: relative log path of the given task instance
"""
new_file_permissions = int(
conf.get("logging", "file_task_handler_new_file_permissions", fallback="0o664"), 8
)
local_relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, local_relative_path)
if identifier:
full_path += f".{identifier}.log"
elif ti.is_trigger_log_context is True:
# if this is true, we're invoked via set_context in the context of
# setting up individual trigger logging. return trigger log path.
full_path = self.add_triggerer_suffix(full_path=full_path, job_id=ti.triggerer_job.id)
new_folder_permissions = int(
conf.get("logging", "file_task_handler_new_folder_permissions", fallback="0o775"), 8
)
self._prepare_log_folder(Path(full_path).parent, new_folder_permissions)
if not os.path.exists(full_path):
open(full_path, "a").close()
try:
os.chmod(full_path, new_file_permissions)
except OSError as e:
logger.warning("OSError while changing ownership of the log file. ", e)
return full_path
|
Create log directory and give it permissions that are configured.
See above _prepare_log_folder method for more detailed explanation.
:param ti: task instance object
:return: relative log path of the given task instance
|
python
|
airflow-core/src/airflow/utils/log/file_task_handler.py
| 821
|
[
"self",
"ti",
"identifier"
] | true
| 4
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
max
|
public static short max(short a, final short b, final short c) {
if (b > a) {
a = b;
}
if (c > a) {
a = c;
}
return a;
}
|
Gets the maximum of three {@code short} values.
@param a value 1.
@param b value 2.
@param c value 3.
@return the largest of the values.
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,084
|
[
"a",
"b",
"c"
] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
merge
|
def merge(*lists):
"""
Merge lists while trying to keep the relative order of the elements.
Warn if the lists have the same elements in a different relative order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
"""
ts = TopologicalSorter()
for head, *tail in filter(None, lists):
ts.add(head) # Ensure that the first items are included.
for item in tail:
if head != item: # Avoid circular dependency to self.
ts.add(item, head)
head = item
try:
return list(ts.static_order())
except CycleError:
warnings.warn(
"Detected duplicate Media files in an opposite order: {}".format(
", ".join(repr(list_) for list_ in lists)
),
MediaOrderConflictWarning,
)
return list(dict.fromkeys(chain.from_iterable(filter(None, lists))))
|
Merge lists while trying to keep the relative order of the elements.
Warn if the lists have the same elements in a different relative order.
For static assets it can be important to have them included in the DOM
in a certain order. In JavaScript you may not be able to reference a
global or in CSS you might want to override a style.
|
python
|
django/forms/widgets.py
| 201
|
[] | false
| 4
| 6.08
|
django/django
| 86,204
|
unknown
| false
|
|
prependIfMissing
|
@Deprecated
public static String prependIfMissing(final String str, final CharSequence prefix, final CharSequence... prefixes) {
return Strings.CS.prependIfMissing(str, prefix, prefixes);
}
|
Prepends the prefix to the start of the string if the string does not already start with any of the prefixes.
<pre>
StringUtils.prependIfMissing(null, null) = null
StringUtils.prependIfMissing("abc", null) = "abc"
StringUtils.prependIfMissing("", "xyz") = "xyz"
StringUtils.prependIfMissing("abc", "xyz") = "xyzabc"
StringUtils.prependIfMissing("xyzabc", "xyz") = "xyzabc"
StringUtils.prependIfMissing("XYZabc", "xyz") = "xyzXYZabc"
</pre>
<p>
With additional prefixes,
</p>
<pre>
StringUtils.prependIfMissing(null, null, null) = null
StringUtils.prependIfMissing("abc", null, null) = "abc"
StringUtils.prependIfMissing("", "xyz", null) = "xyz"
StringUtils.prependIfMissing("abc", "xyz", new CharSequence[]{null}) = "xyzabc"
StringUtils.prependIfMissing("abc", "xyz", "") = "abc"
StringUtils.prependIfMissing("abc", "xyz", "mno") = "xyzabc"
StringUtils.prependIfMissing("xyzabc", "xyz", "mno") = "xyzabc"
StringUtils.prependIfMissing("mnoabc", "xyz", "mno") = "mnoabc"
StringUtils.prependIfMissing("XYZabc", "xyz", "mno") = "xyzXYZabc"
StringUtils.prependIfMissing("MNOabc", "xyz", "mno") = "xyzMNOabc"
</pre>
@param str The string.
@param prefix The prefix to prepend to the start of the string.
@param prefixes Additional prefixes that are valid.
@return A new String if prefix was prepended, the same string otherwise.
@since 3.2
@deprecated Use {@link Strings#prependIfMissing(String, CharSequence, CharSequence...) Strings.CS.prependIfMissing(String, CharSequence,
CharSequence...)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,607
|
[
"str",
"prefix"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isTypeAnnotationContext
|
function isTypeAnnotationContext(context: FormattingContext): boolean {
const contextKind = context.contextNode.kind;
return contextKind === SyntaxKind.PropertyDeclaration ||
contextKind === SyntaxKind.PropertySignature ||
contextKind === SyntaxKind.Parameter ||
contextKind === SyntaxKind.VariableDeclaration ||
isFunctionLikeKind(contextKind);
}
|
A rule takes a two tokens (left/right) and a particular context
for which you're meant to look at them. You then declare what should the
whitespace annotation be between these tokens via the action param.
@param debugName Name to print
@param left The left side of the comparison
@param right The right side of the comparison
@param context A set of filters to narrow down the space in which this formatter rule applies
@param action a declaration of the expected whitespace
@param flags whether the rule deletes a line or not, defaults to no-op
|
typescript
|
src/services/formatting/rules.ts
| 558
|
[
"context"
] | true
| 5
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
close
|
@Override
public void close() {
lock.lock();
try {
idempotentCloser.close(
this::drainAll,
() -> log.warn("The fetch buffer was already closed")
);
} finally {
lock.unlock();
}
}
|
Return the set of {@link TopicIdPartition partitions} for which we have data in the buffer.
@return {@link TopicIdPartition Partition} set
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchBuffer.java
| 206
|
[] |
void
| true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
AST_MATCHER
|
AST_MATCHER(Decl, declHasNoReturnAttr) {
return Node.hasAttr<NoReturnAttr>() || Node.hasAttr<CXX11NoReturnAttr>() ||
Node.hasAttr<C11NoReturnAttr>();
}
|
matches a Decl if it has a "no return" attribute of any kind
|
cpp
|
clang-tools-extra/clang-tidy/bugprone/InfiniteLoopCheck.cpp
| 25
|
[] | true
| 3
| 6.48
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
toString
|
@Override
public String toString() {
ToStringCreator creator = new ToStringCreator(this);
KeyStore keyStore = this.keyStore.get();
creator.append("keyStore.type", (keyStore != null) ? keyStore.getType() : "none");
String keyStorePassword = getKeyStorePassword();
creator.append("keyStorePassword", (keyStorePassword != null) ? "******" : null);
KeyStore trustStore = this.trustStore.get();
creator.append("trustStore.type", (trustStore != null) ? trustStore.getType() : "none");
return creator.toString();
}
|
Create a new {@link JksSslStoreBundle} instance.
@param keyStoreDetails the key store details
@param trustStoreDetails the trust store details
@param resourceLoader the resource loader used to load content
@since 3.3.5
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/jks/JksSslStoreBundle.java
| 146
|
[] |
String
| true
| 4
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
joinWith
|
public static String joinWith(final String delimiter, final Object... array) {
if (array == null) {
throw new IllegalArgumentException("Object varargs must not be null");
}
return join(array, delimiter);
}
|
Joins the elements of the provided varargs into a single String containing the provided elements.
<p>
No delimiter is added before or after the list. {@code null} elements and separator are treated as empty Strings ("").
</p>
<pre>
StringUtils.joinWith(",", {"a", "b"}) = "a,b"
StringUtils.joinWith(",", {"a", "b",""}) = "a,b,"
StringUtils.joinWith(",", {"a", null, "b"}) = "a,,b"
StringUtils.joinWith(null, {"a", "b"}) = "ab"
</pre>
@param delimiter the separator character to use, null treated as "".
@param array the varargs providing the values to join together. {@code null} elements are treated as "".
@return the joined String.
@throws IllegalArgumentException if a null varargs is provided.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 4,725
|
[
"delimiter"
] |
String
| true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
invokeExactMethod
|
public static Object invokeExactMethod(final Object object, final String methodName, final Object[] args, final Class<?>[] parameterTypes)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
final Class<?> cls = Objects.requireNonNull(object, "object").getClass();
final Class<?>[] paramTypes = ArrayUtils.nullToEmpty(parameterTypes);
final Method method = getAccessibleMethod(cls, methodName, paramTypes);
requireNonNull(method, cls, methodName, paramTypes);
return method.invoke(object, ArrayUtils.nullToEmpty(args));
}
|
Invokes a method whose parameter types match exactly the parameter types given.
<p>
This uses reflection to invoke the method obtained from a call to {@link #getAccessibleMethod(Class, String, Class[])}.
</p>
@param object Invokes a method on this object.
@param methodName Gets a method with this name.
@param args Method arguments - treat null as empty array.
@param parameterTypes Match these parameters - treat {@code null} as empty array.
@return The value returned by the invoked method.
@throws NoSuchMethodException Thrown if there is no such accessible method.
@throws IllegalAccessException Thrown if this found {@code Method} is enforcing Java language access control and the underlying method is
inaccessible.
@throws IllegalArgumentException Thrown if:
<ul>
<li>the found {@code Method} is an instance method and the specified {@code object} argument is not an instance of
the class or interface declaring the underlying method (or of a subclass or interface implementor);</li>
<li>the number of actual and formal parameters differ;</li>
<li>an unwrapping conversion for primitive arguments fails; or</li>
<li>after possible unwrapping, a parameter value can't be converted to the corresponding formal parameter type by a
method invocation conversion.</li>
</ul>
@throws InvocationTargetException Thrown if the underlying method throws an exception.
@throws NullPointerException Thrown if the specified {@code object} is null.
@throws ExceptionInInitializerError Thrown if the initialization provoked by this method fails.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 625
|
[
"object",
"methodName",
"args",
"parameterTypes"
] |
Object
| true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
visitor
|
function visitor(node: Node): VisitResult<Node | undefined> {
if (node.transformFlags & TransformFlags.ContainsJsx) {
return visitorWorker(node);
}
else {
return node;
}
}
|
Transform JSX-specific syntax in a SourceFile.
@param node A SourceFile node.
|
typescript
|
src/compiler/transformers/jsx.ts
| 207
|
[
"node"
] | true
| 3
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
nullToEmpty
|
public static String[] nullToEmpty(final String[] array) {
return nullTo(array, EMPTY_STRING_ARRAY);
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,622
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
registerJavaDate
|
private void registerJavaDate(DateTimeFormatters dateTimeFormatters) {
DateFormatterRegistrar dateFormatterRegistrar = new DateFormatterRegistrar();
String datePattern = dateTimeFormatters.getDatePattern();
if (datePattern != null) {
DateFormatter dateFormatter = new DateFormatter(datePattern);
dateFormatterRegistrar.setFormatter(dateFormatter);
}
dateFormatterRegistrar.registerFormatters(this);
}
|
Create a new WebConversionService that configures formatters with the provided
date, time, and date-time formats, or registers the default if no custom format is
provided.
@param dateTimeFormatters the formatters to use for date, time, and date-time
formatting
@since 2.3.0
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/format/WebConversionService.java
| 91
|
[
"dateTimeFormatters"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
channelBuilderConfigs
|
@SuppressWarnings("unchecked")
static Map<String, Object> channelBuilderConfigs(final AbstractConfig config, final ListenerName listenerName) {
Map<String, Object> parsedConfigs;
if (listenerName == null)
parsedConfigs = (Map<String, Object>) config.values();
else
parsedConfigs = config.valuesWithPrefixOverride(listenerName.configPrefix());
config.originals().entrySet().stream()
.filter(e -> !parsedConfigs.containsKey(e.getKey())) // exclude already parsed configs
// exclude already parsed listener prefix configs
.filter(e -> !(listenerName != null && e.getKey().startsWith(listenerName.configPrefix()) &&
parsedConfigs.containsKey(e.getKey().substring(listenerName.configPrefix().length()))))
// exclude keys like `{mechanism}.some.prop` if "listener.name." prefix is present and key `some.prop` exists in parsed configs.
.filter(e -> !(listenerName != null && parsedConfigs.containsKey(e.getKey().substring(e.getKey().indexOf('.') + 1))))
.forEach(e -> parsedConfigs.put(e.getKey(), e.getValue()));
return parsedConfigs;
}
|
@return a mutable RecordingMap. The elements got from RecordingMap are marked as "used".
|
java
|
clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java
| 195
|
[
"config",
"listenerName"
] | true
| 5
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
put
|
@CanIgnoreReturnValue
@Override
public boolean put(@ParametricNullness K key, @ParametricNullness V value) {
addNode(key, value, null);
return true;
}
|
Stores a key-value pair in the multimap.
@param key key to store in the multimap
@param value value to store in the multimap
@return {@code true} always
|
java
|
android/guava/src/com/google/common/collect/LinkedListMultimap.java
| 601
|
[
"key",
"value"
] | true
| 1
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
advisorsPreFiltered
|
protected boolean advisorsPreFiltered() {
return false;
}
|
Return whether the Advisors returned by the subclass are pre-filtered
to match the bean's target class already, allowing the ClassFilter check
to be skipped when building advisors chains for AOP invocations.
<p>Default is {@code false}. Subclasses may override this if they
will always return pre-filtered Advisors.
@return whether the Advisors are pre-filtered
@see #getAdvicesAndAdvisorsForBean
@see org.springframework.aop.framework.Advised#setPreFiltered
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/autoproxy/AbstractAutoProxyCreator.java
| 520
|
[] | true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
clientInstanceId
|
@Override
public Uuid clientInstanceId(Duration timeout) {
return delegate.clientInstanceId(timeout);
}
|
Determines the client's unique client instance ID used for telemetry. This ID is unique to
this specific client instance and will not change after it is initially generated.
The ID is useful for correlating client operations with telemetry sent to the broker and
to its eventual monitoring destinations.
<p>
If telemetry is enabled, this will first require a connection to the cluster to generate
the unique client instance ID. This method waits up to {@code timeout} for the consumer
client to complete the request.
<p>
Client telemetry is controlled by the {@link ConsumerConfig#ENABLE_METRICS_PUSH_CONFIG}
configuration option.
@param timeout The maximum time to wait for consumer client to determine its client instance ID.
The value must be non-negative. Specifying a timeout of zero means do not
wait for the initial request to complete if it hasn't already.
@throws InterruptException If the thread is interrupted while blocked.
@throws KafkaException If an unexpected error occurs while trying to determine the client
instance ID, though this error does not necessarily imply the
consumer client is otherwise unusable.
@throws IllegalArgumentException If the {@code timeout} is negative.
@throws IllegalStateException If telemetry is not enabled ie, config `{@code enable.metrics.push}`
is set to `{@code false}`.
@return The client's assigned instance id used for metrics collection.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,390
|
[
"timeout"
] |
Uuid
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
get_config
|
def get_config(self, key: str) -> str | None:
"""
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key, self.config_lookup_pattern)
|
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/secrets/secrets_manager.py
| 241
|
[
"self",
"key"
] |
str | None
| true
| 2
| 7.44
|
apache/airflow
| 43,597
|
sphinx
| false
|
createMap
|
protected Map<String, Object> createMap() {
Map<String, Object> result = new LinkedHashMap<>();
process((properties, map) -> merge(result, map));
return result;
}
|
Template method that subclasses may override to construct the object
returned by this factory.
<p>Invoked lazily the first time {@link #getObject()} is invoked in
case of a shared singleton; else, on each {@link #getObject()} call.
<p>The default implementation returns the merged {@code Map} instance.
@return the object returned by this factory
@see #process(MatchCallback)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/YamlMapFactoryBean.java
| 121
|
[] | true
| 1
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.