function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
moveToNextAddress
|
private void moveToNextAddress() {
if (addresses.isEmpty())
return; // Avoid div0. List will initialize on next currentAddress() call
addressIndex = (addressIndex + 1) % addresses.size();
if (addressIndex == 0)
clearAddresses(); // Exhausted list. Re-resolve on next currentAddress() call
}
|
Jumps to the next available resolved address for this node. If no other addresses are available, marks the
list to be refreshed on the next {@link #currentAddress()} call.
|
java
|
clients/src/main/java/org/apache/kafka/clients/ClusterConnectionStates.java
| 525
|
[] |
void
| true
| 3
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
createEntrySet
|
@Override
ImmutableSet<Entry<K, V>> createEntrySet() {
return new EntrySet<>(this, alternatingKeysAndValues, 0, size);
}
|
Returns a hash table for the specified keys and values, and ensures that neither keys nor
values are null. This method may update {@code alternatingKeysAndValues} if there are duplicate
keys. If so, the return value will indicate how many entries are still valid, and will also
include a {@link Builder.DuplicateKey} in case duplicate keys are not allowed now or will not
be allowed on a later {@link Builder#buildOrThrow()} call.
@param keyOffset 1 if this is the reverse direction of a BiMap, 0 otherwise.
@return an {@code Object} that is a {@code byte[]}, {@code short[]}, or {@code int[]}, the
smallest possible to fit {@code tableSize}; or an {@code Object[]} where [0] is one of
these; [1] indicates how many element pairs in {@code alternatingKeysAndValues} are valid;
and [2] is a {@link Builder.DuplicateKey} for the first duplicate key encountered.
|
java
|
android/guava/src/com/google/common/collect/RegularImmutableMap.java
| 375
|
[] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
maybeRecordMetrics
|
private void maybeRecordMetrics(TopicPartition partition) {
unrecordedPartitions.remove(partition);
if (!unrecordedPartitions.isEmpty())
return;
// Record the metrics aggregated at the fetch level.
metricsManager.recordBytesFetched(fetchFetchMetrics.bytes);
metricsManager.recordRecordsFetched(fetchFetchMetrics.records);
// Also record the metrics aggregated on a per-topic basis.
for (Map.Entry<String, FetchMetrics> entry: perTopicFetchMetrics.entrySet()) {
String topic = entry.getKey();
FetchMetrics fetchMetrics = entry.getValue();
metricsManager.recordBytesFetched(topic, fetchMetrics.bytes);
metricsManager.recordRecordsFetched(topic, fetchMetrics.records);
}
}
|
Once we've detected that all of the {@link TopicPartition partitions} for the fetch have been handled, we
can then record the aggregated metrics values. This is done at the fetch level and on a per-topic basis.
@param partition {@link TopicPartition}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsAggregator.java
| 64
|
[
"partition"
] |
void
| true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
frequency
|
public double frequency(MetricConfig config, long now, double centerValue) {
purgeObsoleteSamples(config, now);
long totalCount = 0;
for (Sample sample : samples) {
totalCount += sample.eventCount;
}
if (totalCount == 0) {
return 0.0d;
}
// Add up all of the counts in the bin corresponding to the center value
float count = 0.0f;
int binNum = binScheme.toBin(centerValue);
for (Sample s : samples) {
HistogramSample sample = (HistogramSample) s;
float[] hist = sample.histogram.counts();
count += hist[binNum];
}
// Compute the ratio of counts to total counts
return count / (double) totalCount;
}
|
Return the computed frequency describing the number of occurrences of the values in the bucket for the given
center point, relative to the total number of occurrences in the samples.
@param config the metric configuration
@param now the current time in milliseconds
@param centerValue the value corresponding to the center point of the bucket
@return the frequency of the values in the bucket relative to the total number of samples
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/stats/Frequencies.java
| 127
|
[
"config",
"now",
"centerValue"
] | true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
operation_graph_to_networkx
|
def operation_graph_to_networkx(graph: OperationGraph):
"""
Convert operation graph to NetworkX graph for Python visualization.
Requires: pip install networkx matplotlib
"""
try:
import matplotlib.pyplot as plt
import networkx as nx
except ImportError:
print(
"⚠️ NetworkX/Matplotlib not installed. Run: pip install networkx matplotlib"
)
return
# Create directed graph
G = nx.DiGraph()
# Add nodes
for node_id, node in graph.nodes.items():
label = f"{node_id}\n{node.op_name}\ndepth {node.depth}"
G.add_node(node_id, label=label, node=node)
# Add edges based on the graph structure
for node_id, node in graph.nodes.items():
for input_node_id in node.input_nodes:
if input_node_id in graph.nodes: # Only add edges to nodes in the graph
G.add_edge(input_node_id, node_id)
# Plot
plt.figure(figsize=(12, 8))
pos = nx.spring_layout(G, k=2, iterations=50)
# Draw nodes with colors based on operation type
node_colors = []
for node_id in G.nodes():
node = graph.nodes[node_id]
if node.op_name.startswith("arg_"):
node_colors.append("lightblue")
elif node.op_name == "constant":
node_colors.append("lightgreen")
elif "aten" in node.op_name:
node_colors.append("lightyellow")
else:
node_colors.append("lightgray")
# Highlight root node
node_sizes = []
for node_id in G.nodes():
if node_id == graph.root_node_id:
node_sizes.append(2000) # Larger size for root
else:
node_sizes.append(1500)
nx.draw_networkx_nodes(G, pos, node_color=node_colors, node_size=node_sizes)
nx.draw_networkx_edges(G, pos, edge_color="gray", arrows=True, arrowsize=20)
# Draw labels
labels = {
node_id: f"{node_id}\n{graph.nodes[node_id].op_name}" for node_id in G.nodes()
}
nx.draw_networkx_labels(G, pos, labels, font_size=8)
plt.title("Operation Graph Visualization")
plt.axis("off")
plt.tight_layout()
plt.savefig("operation_graph_networkx.png", dpi=300, bbox_inches="tight")
plt.show()
print("✓ NetworkX graph visualization saved as operation_graph_networkx.png")
|
Convert operation graph to NetworkX graph for Python visualization.
Requires: pip install networkx matplotlib
|
python
|
tools/experimental/torchfuzz/visualize_graph.py
| 159
|
[
"graph"
] | true
| 13
| 6.16
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
subName
|
public ConfigurationPropertyName subName(int offset) {
if (offset == 0) {
return this;
}
if (offset == getNumberOfElements()) {
return EMPTY;
}
if (offset < 0 || offset > getNumberOfElements()) {
throw new IndexOutOfBoundsException("Offset: " + offset + ", NumberOfElements: " + getNumberOfElements());
}
return new ConfigurationPropertyName(this.elements.subElements(offset));
}
|
Return a new {@link ConfigurationPropertyName} by based on this name offset by
specific element index. For example, {@code subName(1)} on the name {@code foo.bar}
will return {@code bar}.
@param offset the element offset
@return the sub name
@since 2.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 266
|
[
"offset"
] |
ConfigurationPropertyName
| true
| 5
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
position
|
@Override
public long position(TopicPartition partition, final Duration timeout) {
return delegate.position(partition, timeout);
}
|
Get the offset of the <i>next record</i> that will be fetched (if a record with that offset exists).
This method may issue a remote call to the server if there is no current position
for the given partition.
<p>
This call will block until the position can be determined, an unrecoverable error is
encountered (in which case it is thrown to the caller), or the timeout expires.
@param partition The partition to get the position for
@param timeout The maximum amount of time to await determination of the current position
@return The current position of the consumer (that is, the offset of the next record to be fetched)
@throws IllegalStateException if the provided TopicPartition is not assigned to this consumer
@throws org.apache.kafka.clients.consumer.InvalidOffsetException if no offset is currently defined for
the partition
@throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
function is called
@throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
this function is called
@throws org.apache.kafka.common.errors.TimeoutException if the position cannot be determined before the
passed timeout expires
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the
configured groupId. See the exception for more details
@throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,300
|
[
"partition",
"timeout"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
writeDefaultBatchHeader
|
private int writeDefaultBatchHeader() {
ensureOpenForRecordBatchWrite();
ByteBuffer buffer = bufferStream.buffer();
int pos = buffer.position();
buffer.position(initialPosition);
int size = pos - initialPosition;
int writtenCompressed = size - DefaultRecordBatch.RECORD_BATCH_OVERHEAD;
int offsetDelta = (int) (lastOffset - baseOffset);
final long maxTimestamp;
if (timestampType == TimestampType.LOG_APPEND_TIME)
maxTimestamp = logAppendTime;
else
maxTimestamp = this.maxTimestamp;
DefaultRecordBatch.writeHeader(buffer, baseOffset, offsetDelta, size, magic, compression.type(), timestampType,
baseTimestamp, maxTimestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch,
hasDeleteHorizonMs(), partitionLeaderEpoch, numRecords);
buffer.position(pos);
return writtenCompressed;
}
|
Write the header to the default batch.
@return the written compressed bytes.
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 408
|
[] | true
| 2
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
close
|
@Override
public void close() throws IOException {
if (generator.isClosed()) {
return;
}
JsonStreamContext context = generator.getOutputContext();
if ((context != null) && (context.inRoot() == false)) {
throw new IOException("Unclosed object or array found");
}
if (writeLineFeedAtEnd) {
flush();
// Bypass generator to always write the line feed
getLowLevelGenerator().writeRaw(LF);
}
generator.close();
}
|
Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}.
|
java
|
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java
| 593
|
[] |
void
| true
| 5
| 6.08
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
configKeys
|
public Map<String, ConfigKey> configKeys() {
return configKeys;
}
|
Get the configuration keys
@return a map containing all configuration keys
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 477
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
set24BitColor
|
function set24BitColor(styleCodes: number[], colorType: 'foreground' | 'background' | 'underline'): void {
if (styleCodes.length >= 5 &&
styleCodes[2] >= 0 && styleCodes[2] <= 255 &&
styleCodes[3] >= 0 && styleCodes[3] <= 255 &&
styleCodes[4] >= 0 && styleCodes[4] <= 255) {
const customColor = new RGBA(styleCodes[2], styleCodes[3], styleCodes[4]);
changeColor(colorType, customColor);
}
}
|
Calculate and set styling for complicated 24-bit ANSI color codes.
@param styleCodes Full list of integer codes that make up the full ANSI
sequence, including the two defining codes and the three RGB codes.
@param colorType If `'foreground'`, will set foreground color, if
`'background'`, will set background color, and if it is `'underline'`
will set the underline color.
@see {@link https://en.wikipedia.org/wiki/ANSI_escape_code#24-bit }
|
typescript
|
extensions/notebook-renderers/src/ansi.ts
| 304
|
[
"styleCodes",
"colorType"
] | true
| 8
| 6.56
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
staged_score
|
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
"""
X = self._check_X(X)
for y_pred in self.staged_predict(X):
if is_classifier(self):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
|
Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
|
python
|
sklearn/ensemble/_weight_boosting.py
| 244
|
[
"self",
"X",
"y",
"sample_weight"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
proxy_manager_for
|
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith("socks"):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs,
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs,
)
return manager
|
Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
|
python
|
src/requests/adapters.py
| 242
|
[
"self",
"proxy"
] | false
| 4
| 6.64
|
psf/requests
| 53,586
|
sphinx
| false
|
|
add
|
@Override
@CanIgnoreReturnValue
public boolean add(E e) {
checkNotNull(e); // check before removing
if (maxSize == 0) {
return true;
}
if (size() == maxSize) {
delegate.remove();
}
delegate.add(e);
return true;
}
|
Adds the given element to this queue. If the queue is currently full, the element at the head
of the queue is evicted to make room.
@return {@code true} always
|
java
|
android/guava/src/com/google/common/collect/EvictingQueue.java
| 103
|
[
"e"
] | true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
transform
|
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations. `copy=False` will only be effective with CSR sparse matrix.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
copy=copy,
reset=False,
)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=X.dtype)
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1.0
if hasattr(self, "idf_"):
# the columns of X (CSR matrix) can be accessed with `X.indices `and
# multiplied with the corresponding `idf` value
X.data *= self.idf_[X.indices]
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
|
Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations. `copy=False` will only be effective with CSR sparse matrix.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
|
python
|
sklearn/feature_extraction/text.py
| 1,692
|
[
"self",
"X",
"copy"
] | false
| 5
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
execute_in_subprocess_with_kwargs
|
def execute_in_subprocess_with_kwargs(cmd: list[str], **kwargs) -> None:
"""
Execute a process and stream output to logger.
:param cmd: command and arguments to run
All other keyword args will be passed directly to subprocess.Popen
.. deprecated:: 3.2.0
This function is deprecated. Please implement your own subprocess execution logic,
reference the one present in standard / google providers.
"""
import warnings
warnings.warn(
"The function `execute_in_subprocess_with_kwargs` is deprecated and will be removed in a future version.",
DeprecatedImportWarning,
stacklevel=2,
)
log.info("Executing cmd: %s", " ".join(shlex.quote(c) for c in cmd))
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, close_fds=True, **kwargs
) as proc:
log.info("Output:")
if proc.stdout:
with proc.stdout:
for line in iter(proc.stdout.readline, b""):
log.info("%s", line.decode().rstrip())
exit_code = proc.wait()
if exit_code != 0:
raise subprocess.CalledProcessError(exit_code, cmd)
|
Execute a process and stream output to logger.
:param cmd: command and arguments to run
All other keyword args will be passed directly to subprocess.Popen
.. deprecated:: 3.2.0
This function is deprecated. Please implement your own subprocess execution logic,
reference the one present in standard / google providers.
|
python
|
airflow-core/src/airflow/utils/process_utils.py
| 192
|
[
"cmd"
] |
None
| true
| 4
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
northPolarH3
|
public static long northPolarH3(int res) {
checkResolution(res);
return NORTH[res];
}
|
Find the h3 index containing the North Pole at the given resolution.
@param res the provided resolution.
@return the h3 index containing the North Pole.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 538
|
[
"res"
] | true
| 1
| 6.96
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
asSystemEnvironmentLegacyName
|
@Nullable ConfigurationPropertyName asSystemEnvironmentLegacyName() {
ConfigurationPropertyName name = this.systemEnvironmentLegacyName;
if (name == null) {
name = ConfigurationPropertyName
.ofIfValid(buildSimpleToString('.', (i) -> getElement(i, Form.DASHED).replace('-', '.')));
this.systemEnvironmentLegacyName = (name != null) ? name : EMPTY;
}
return (name != EMPTY) ? name : null;
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 540
|
[] |
ConfigurationPropertyName
| true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
waitForWebpack
|
async function waitForWebpack() {
while (true) {
try {
readFileSync(path.resolve(__dirname, '../build/main.js'));
return;
} catch (err) {
console.log(
'Could not find webpack build output. Will retry in a second...'
);
await new Promise(resolve => setTimeout(resolve, 1000));
}
}
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
|
javascript
|
fixtures/fizz/server/server.js
| 96
|
[] | false
| 3
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| true
|
|
abortIncompleteBatches
|
public void abortIncompleteBatches() {
// We need to keep aborting the incomplete batch until no thread is trying to append to
// 1. Avoid losing batches.
// 2. Free up memory in case appending threads are blocked on buffer full.
// This is a tight loop but should be able to get through very quickly.
do {
abortBatches();
} while (appendsInProgress());
// After this point, no thread will append any messages because they will see the close
// flag set. We need to do the last abort after no thread was appending in case there was a new
// batch appended by the last appending thread.
abortBatches();
this.topicInfoMap.clear();
}
|
This function is only called when sender is closed forcefully. It will fail all the
incomplete batches and return.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
| 1,102
|
[] |
void
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
timestampsToSearch
|
public Map<TopicPartition, Long> timestampsToSearch() {
return timestampsToSearch;
}
|
Build result representing that no offsets were found as part of the current event.
@return Map containing all the partitions the event was trying to get offsets for, and
null {@link OffsetAndTimestamp} as value
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ListOffsetsEvent.java
| 59
|
[] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
determineBeanType
|
@Override
public Class<?> determineBeanType(Class<?> beanClass, String beanName) {
if (this.advisor != null && isEligible(beanClass)) {
ProxyFactory proxyFactory = new ProxyFactory();
proxyFactory.copyFrom(this);
proxyFactory.setTargetClass(beanClass);
if (!proxyFactory.isProxyTargetClass()) {
evaluateProxyInterfaces(beanClass, proxyFactory);
}
proxyFactory.addAdvisor(this.advisor);
customizeProxyFactory(proxyFactory);
// Use original ClassLoader if bean class not locally loaded in overriding class loader
ClassLoader classLoader = getProxyClassLoader();
if (classLoader instanceof SmartClassLoader smartClassLoader &&
classLoader != beanClass.getClassLoader()) {
classLoader = smartClassLoader.getOriginalClassLoader();
}
return proxyFactory.getProxyClass(classLoader);
}
return beanClass;
}
|
Set whether this post-processor's advisor is supposed to apply before
existing advisors when encountering a pre-advised object.
<p>Default is "false", applying the advisor after existing advisors, i.e.
as close as possible to the target method. Switch this to "true" in order
for this post-processor's advisor to wrap existing advisors as well.
<p>Note: Check the concrete post-processor's javadoc whether it possibly
changes this flag by default, depending on the nature of its advisor.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AbstractAdvisingBeanPostProcessor.java
| 62
|
[
"beanClass",
"beanName"
] | true
| 6
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
tryEnterIf
|
public boolean tryEnterIf(Guard guard) {
if (guard.monitor != this) {
throw new IllegalMonitorStateException();
}
ReentrantLock lock = this.lock;
if (!lock.tryLock()) {
return false;
}
boolean satisfied = false;
try {
return satisfied = guard.isSatisfied();
} finally {
if (!satisfied) {
lock.unlock();
}
}
}
|
Enters this monitor if it is possible to do so immediately and the guard is satisfied. Does not
block acquiring the lock and does not wait for the guard to be satisfied.
<p><b>Note:</b> This method disregards the fairness setting of this monitor.
@return whether the monitor was entered, which guarantees that the guard is now satisfied
|
java
|
android/guava/src/com/google/common/util/concurrent/Monitor.java
| 806
|
[
"guard"
] | true
| 4
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
_optimal_path
|
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
for curr in full_results:
cost, positions, remaining = curr
for con in itertools.combinations(
range(len(input_sets) - iteration), 2
):
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Build (total_cost, positions, indices_remaining)
total_cost = cost + _flop_count(
idx_contract, idx_removed, len(con), idx_dict
)
new_pos = positions + [con]
iter_results.append((total_cost, new_pos, new_input_sets))
# Update combinatorial list, if we did not find anything return best
# path + remaining contractions
if iter_results:
full_results = iter_results
else:
path = min(full_results, key=lambda x: x[0])[1]
path += [tuple(range(len(input_sets) - iteration))]
return path
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
|
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Parameters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
|
python
|
numpy/_core/einsumfunc.py
| 150
|
[
"input_sets",
"output_set",
"idx_dict",
"memory_limit"
] | false
| 8
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
configureProtocolVersion
|
void configureProtocolVersion(SSL_CTX* ctx, SSLContext::SSLVersion version) {
/*
* From the OpenSSL docs https://fburl.com/ii9k29qw:
* Setting the minimum or maximum version to 0, will enable protocol versions
* down to the lowest version, or up to the highest version supported by the
* library, respectively.
*
* We can use that as the default/fallback.
*/
int minVersion = 0;
switch (version) {
case SSLContext::SSLVersion::TLSv1:
minVersion = TLS1_VERSION;
break;
case SSLContext::SSLVersion::SSLv3:
minVersion = SSL3_VERSION;
break;
case SSLContext::SSLVersion::TLSv1_2:
minVersion = TLS1_2_VERSION;
break;
case SSLContext::SSLVersion::TLSv1_3:
minVersion = TLS1_3_VERSION;
break;
case SSLContext::SSLVersion::SSLv2:
default:
// do nothing
break;
}
const auto setMinProtoResult = SSL_CTX_set_min_proto_version(ctx, minVersion);
DCHECK(setMinProtoResult == 1)
<< sformat("unsupported min TLS protocol version: 0x{:04x}", minVersion);
}
|
Configure the given SSL context to use the given version.
|
cpp
|
folly/io/async/SSLContext.cpp
| 43
|
[
"version"
] | true
| 7
| 6
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
get_text_list
|
def get_text_list(list_, last_word=gettext_lazy("or")):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if not list_:
return ""
if len(list_) == 1:
return str(list_[0])
return "%s %s %s" % (
# Translators: This string is used as a separator between list elements
_(", ").join(str(i) for i in list_[:-1]),
str(last_word),
str(list_[-1]),
)
|
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
|
python
|
django/utils/text.py
| 287
|
[
"list_",
"last_word"
] | false
| 3
| 6.32
|
django/django
| 86,204
|
unknown
| false
|
|
convertElement
|
protected Object convertElement(Object element) {
return element;
}
|
Hook to convert each encountered Collection/array element.
The default implementation simply returns the passed-in element as-is.
<p>Can be overridden to perform conversion of certain elements,
for example String to Integer if a String array comes in and
should be converted to a Set of Integer objects.
<p>Only called if actually creating a new Collection!
This is by default not the case if the type of the passed-in Collection
already matches. Override {@link #alwaysCreateNewCollection()} to
enforce creating a new Collection in every case.
@param element the source element
@return the element to be used in the target Collection
@see #alwaysCreateNewCollection()
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomCollectionEditor.java
| 200
|
[
"element"
] |
Object
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
randomLong
|
public long randomLong(final long startInclusive, final long endExclusive) {
Validate.isTrue(endExclusive >= startInclusive, "Start value must be smaller or equal to end value.");
Validate.isTrue(startInclusive >= 0, "Both range values must be non-negative.");
if (startInclusive == endExclusive) {
return startInclusive;
}
return startInclusive + randomLong(endExclusive - startInclusive);
}
|
Generates a random long within the specified range.
@param startInclusive the smallest value that can be returned, must be non-negative.
@param endExclusive the upper bound (not included).
@throws IllegalArgumentException if {@code startInclusive > endExclusive} or if {@code startInclusive} is negative.
@return the random long.
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 445
|
[
"startInclusive",
"endExclusive"
] | true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
check_consistent_length
|
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
Examples
--------
>>> from sklearn.utils.validation import check_consistent_length
>>> a = [1, 2, 3]
>>> b = [2, 3, 4]
>>> check_consistent_length(a, b)
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
if len(set(lengths)) > 1:
raise ValueError(
"Found input variables with inconsistent numbers of samples: %r"
% [int(l) for l in lengths]
)
|
Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
Examples
--------
>>> from sklearn.utils.validation import check_consistent_length
>>> a = [1, 2, 3]
>>> b = [2, 3, 4]
>>> check_consistent_length(a, b)
|
python
|
sklearn/utils/validation.py
| 445
|
[] | false
| 2
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
poll
|
CompletedFetch poll() {
try {
lock.lock();
return completedFetches.poll();
} finally {
lock.unlock();
}
}
|
Return whether we have any completed fetches pending return to the user. This method is thread-safe. Has
visibility for testing.
@return {@code true} if there are completed fetches that match the {@link Predicate}, {@code false} otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java
| 143
|
[] |
CompletedFetch
| true
| 1
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
adjustRes
|
private static int adjustRes(CoordIJK coord, int res) {
if (H3Index.isResolutionClassIII(res)) {
coord.downAp7r();
res += 1;
}
return res;
}
|
Generates the cell boundary in spherical coordinates for a cell given by this
FaceIJK address at a specified resolution.
@param res The H3 resolution of the cell.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java
| 549
|
[
"coord",
"res"
] | true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
andThen
|
default <V> FailableBiFunction<T, U, V, E> andThen(final FailableFunction<? super R, ? extends V, E> after) {
Objects.requireNonNull(after);
return (final T t, final U u) -> after.apply(apply(t, u));
}
|
Returns a composed {@link FailableBiFunction} that like {@link BiFunction#andThen(Function)}.
@param <V> the output type of the {@code after} function, and of the composed function.
@param after the operation to perform after this one.
@return a composed {@link FailableBiFunction} that like {@link BiFunction#andThen(Function)}.
@throws NullPointerException when {@code after} is null.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableBiFunction.java
| 62
|
[
"after"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
isEmpty
|
public boolean isEmpty() throws IOException {
Optional<Long> sizeIfKnown = sizeIfKnown();
if (sizeIfKnown.isPresent()) {
return sizeIfKnown.get() == 0L;
}
Closer closer = Closer.create();
try {
InputStream in = closer.register(openStream());
return in.read() == -1;
} catch (Throwable e) {
throw closer.rethrow(e);
} finally {
closer.close();
}
}
|
Returns whether the source has zero bytes. The default implementation first checks {@link
#sizeIfKnown}, returning true if it's known to be zero and false if it's known to be non-zero.
If the size is not known, it falls back to opening a stream and checking for EOF.
<p>Note that, in cases where {@code sizeIfKnown} returns zero, it is <i>possible</i> that bytes
are actually available for reading. (For example, some special files may return a size of 0
despite actually having content when read.) This means that a source may return {@code true}
from {@code isEmpty()} despite having readable content.
@throws IOException if an I/O error occurs
@since 15.0
|
java
|
android/guava/src/com/google/common/io/ByteSource.java
| 151
|
[] | true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
set_closed
|
def set_closed(self, closed: IntervalClosedType) -> Self:
"""
Return an identical IntervalArray closed on the specified side.
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalArray
A new IntervalArray with the specified side closures.
See Also
--------
IntervalArray.closed : Returns inclusive side of the Interval.
arrays.IntervalArray.closed : Returns inclusive side of the IntervalArray.
Examples
--------
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
>>> index.set_closed("both")
<IntervalArray>
[[0, 1], [1, 2], [2, 3]]
Length: 3, dtype: interval[int64, both]
"""
if closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)
left, right = self._left, self._right
dtype = IntervalDtype(left.dtype, closed=closed)
return self._simple_new(left, right, dtype=dtype)
|
Return an identical IntervalArray closed on the specified side.
Parameters
----------
closed : {'left', 'right', 'both', 'neither'}
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalArray
A new IntervalArray with the specified side closures.
See Also
--------
IntervalArray.closed : Returns inclusive side of the Interval.
arrays.IntervalArray.closed : Returns inclusive side of the IntervalArray.
Examples
--------
>>> index = pd.arrays.IntervalArray.from_breaks(range(4))
>>> index
<IntervalArray>
[(0, 1], (1, 2], (2, 3]]
Length: 3, dtype: interval[int64, right]
>>> index.set_closed("both")
<IntervalArray>
[[0, 1], [1, 2], [2, 3]]
Length: 3, dtype: interval[int64, both]
|
python
|
pandas/core/arrays/interval.py
| 1,626
|
[
"self",
"closed"
] |
Self
| true
| 2
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
producerEpoch
|
short producerEpoch();
|
Get the producer epoch for this log record batch.
@return The producer epoch, or -1 if there is none
|
java
|
clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java
| 153
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
of
|
public static IntegerRange of(final int fromInclusive, final int toInclusive) {
return of(Integer.valueOf(fromInclusive), Integer.valueOf(toInclusive));
}
|
Creates a closed range with the specified minimum and maximum values (both inclusive).
<p>
The range uses the natural ordering of the elements to determine where values lie in the range.
</p>
<p>
The arguments may be passed in the order (min,max) or (max,min). The getMinimum and getMaximum methods will return the correct values.
</p>
@param fromInclusive the first value that defines the edge of the range, inclusive.
@param toInclusive the second value that defines the edge of the range, inclusive.
@return the range object, not null.
|
java
|
src/main/java/org/apache/commons/lang3/IntegerRange.java
| 50
|
[
"fromInclusive",
"toInclusive"
] |
IntegerRange
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
maybeThrowInvalidGroupIdException
|
private void maybeThrowInvalidGroupIdException() {
if (groupId == null || groupId.isEmpty()) {
throw new InvalidGroupIdException(
"You must provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration.");
}
}
|
Release the light lock protecting the consumer from multithreaded access.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,112
|
[] |
void
| true
| 3
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
resolveItemMetadata
|
final ItemMetadata resolveItemMetadata(String prefix, MetadataGenerationEnvironment environment) {
if (isNested(environment)) {
return resolveItemMetadataGroup(prefix, environment);
}
if (isProperty(environment)) {
return resolveItemMetadataProperty(prefix, environment);
}
return null;
}
|
Resolve the {@link ItemMetadata} for this property.
@param prefix the property prefix
@param environment the metadata generation environment
@return the item metadata or {@code null}
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/PropertyDescriptor.java
| 100
|
[
"prefix",
"environment"
] |
ItemMetadata
| true
| 3
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
appendPadding
|
public StrBuilder appendPadding(final int length, final char padChar) {
if (length >= 0) {
ensureCapacity(size + length);
for (int i = 0; i < length; i++) {
buffer[size++] = padChar;
}
}
return this;
}
|
Appends the pad character to the builder the specified number of times.
@param length the length to append, negative means no append
@param padChar the character to append
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,189
|
[
"length",
"padChar"
] |
StrBuilder
| true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_maybe_restore_index_levels
|
def _maybe_restore_index_levels(self, result: DataFrame) -> None:
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(
self.join_names, self.left_on, self.right_on, strict=True
):
if (
# Argument 1 to "_is_level_reference" of "NDFrame" has incompatible
# type "Union[Hashable, ExtensionArray, Index, Series]"; expected
# "Hashable"
self.orig_left._is_level_reference(left_key) # type: ignore[arg-type]
# Argument 1 to "_is_level_reference" of "NDFrame" has incompatible
# type "Union[Hashable, ExtensionArray, Index, Series]"; expected
# "Hashable"
and self.orig_right._is_level_reference(
right_key # type: ignore[arg-type]
)
and left_key == right_key
and name not in result.index.names
):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
|
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
|
python
|
pandas/core/reshape/merge.py
| 1,229
|
[
"self",
"result"
] |
None
| true
| 7
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
fuzz_operation_graph
|
def fuzz_operation_graph(
target_spec: Spec,
max_depth: int = 7,
seed: int | None = None,
template: str = "default",
supported_ops: list[str] | None = None,
) -> OperationGraph:
"""
Generate a graph of operations that produces the target specification.
The graph-based approach allows for better visualization, debugging, and
potential optimizations like common subexpression elimination.
Args:
target_spec: The desired output specification (TensorSpec or ScalarSpec)
max_depth: Maximum depth of operations. At depth 0, only leaf operations (constant, arg) are used.
seed: Random seed for reproducible generation. If None, uses current random state.
template: Template name to determine configuration
Returns:
OperationGraph with nodes organized in a DAG structure
"""
# Set seed for reproducible generation
if seed is not None:
import random
random.seed(seed)
torch.manual_seed(seed)
# Reset global arg counter for deterministic behavior
global _next_arg_id
_next_arg_id = 0
# Global counter for unique node IDs - start from 0 for deterministic behavior
node_counter = 0
# Dictionary to store all nodes: node_id -> OperationNode
nodes: dict[str, OperationNode] = {}
def _generate_node(spec: Spec, depth: int, stack_size: int = 0) -> str:
"""
Generate a node for the given spec and return its node_id.
"""
nonlocal node_counter
# Generate new operation
op_name, input_specs = fuzz_op(spec, depth, stack_size, template, supported_ops)
# Create unique node ID
node_id = f"node_{node_counter}"
node_counter += 1
# Generate input nodes
input_node_ids = []
if input_specs: # Non-leaf operations
for input_spec in input_specs:
input_node_id = _generate_node(
input_spec, max(0, depth - 1), stack_size + len(input_node_ids) + 1
)
input_node_ids.append(input_node_id)
# Create the operation node
node = OperationNode(
node_id=node_id,
op_name=op_name,
input_specs=input_specs,
output_spec=spec,
input_nodes=input_node_ids,
depth=depth,
)
# Store the node
nodes[node_id] = node
return node_id
# Generate the root node
root_node_id = _generate_node(target_spec, max_depth, 0)
# Create and return the operation graph
graph = OperationGraph(
nodes=nodes, root_node_id=root_node_id, target_spec=target_spec
)
# Verify that the root node produces the target spec
root_node = nodes[root_node_id]
if not specs_compatible(root_node.output_spec, target_spec):
raise ValueError(
f"Generated graph root node produces {root_node.output_spec}, "
f"but target spec is {target_spec}"
)
return graph
|
Generate a graph of operations that produces the target specification.
The graph-based approach allows for better visualization, debugging, and
potential optimizations like common subexpression elimination.
Args:
target_spec: The desired output specification (TensorSpec or ScalarSpec)
max_depth: Maximum depth of operations. At depth 0, only leaf operations (constant, arg) are used.
seed: Random seed for reproducible generation. If None, uses current random state.
template: Template name to determine configuration
Returns:
OperationGraph with nodes organized in a DAG structure
|
python
|
tools/experimental/torchfuzz/ops_fuzzer.py
| 428
|
[
"target_spec",
"max_depth",
"seed",
"template",
"supported_ops"
] |
OperationGraph
| true
| 5
| 7.52
|
pytorch/pytorch
| 96,034
|
google
| false
|
difference
|
def difference(self, other, sort: bool | None = None):
"""
Return a new Index with elements of index not in `other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
Index object or an array-like object containing elements to be compared
with the elements of the original Index.
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object containing elements that are in the original
Index but not in the `other` Index.
See Also
--------
Index.symmetric_difference : Compute the symmetric difference of two Index
objects.
Index.intersection : Form the intersection of two Index objects.
Examples
--------
>>> idx1 = pd.Index([2, 1, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Index([2, 1], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
# Note: we do NOT call _dti_setop_align_tzs here, as there
# is no requirement that .difference be commutative, so it does
# not cast to object.
if self.equals(other):
# Note: we do not (yet) sort even if sort=None GH#24959
return self[:0].rename(result_name)
if len(other) == 0:
# Note: we do not (yet) sort even if sort=None GH#24959
result = self.unique().rename(result_name)
if sort is True:
return result.sort_values()
return result
if not self._should_compare(other):
# Nothing matches -> difference is everything
result = self.unique().rename(result_name)
if sort is True:
return result.sort_values()
return result
result = self._difference(other, sort=sort)
return self._wrap_difference_result(other, result)
|
Return a new Index with elements of index not in `other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
Index object or an array-like object containing elements to be compared
with the elements of the original Index.
sort : bool or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
* True : Sort the result (which may raise TypeError).
Returns
-------
Index
Returns a new Index object containing elements that are in the original
Index but not in the `other` Index.
See Also
--------
Index.symmetric_difference : Compute the symmetric difference of two Index
objects.
Index.intersection : Form the intersection of two Index objects.
Examples
--------
>>> idx1 = pd.Index([2, 1, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Index([2, 1], dtype='int64')
|
python
|
pandas/core/indexes/base.py
| 3,403
|
[
"self",
"other",
"sort"
] | true
| 6
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
tryGetModuleNameFromAmbientModule
|
function tryGetModuleNameFromAmbientModule(moduleSymbol: Symbol, checker: TypeChecker): string | undefined {
const decl = moduleSymbol.declarations?.find(
d => isNonGlobalAmbientModule(d) && (!isExternalModuleAugmentation(d) || !isExternalModuleNameRelative(getTextOfIdentifierOrLiteral(d.name))),
) as (ModuleDeclaration & { name: StringLiteral; }) | undefined;
if (decl) {
return decl.name.text;
}
// the module could be a namespace, which is export through "export=" from an ambient module.
/**
* declare module "m" {
* namespace ns {
* class c {}
* }
* export = ns;
* }
*/
// `import {c} from "m";` is valid, in which case, `moduleSymbol` is "ns", but the module name should be "m"
const ambientModuleDeclareCandidates = mapDefined(moduleSymbol.declarations, d => {
if (!isModuleDeclaration(d)) return;
const topNamespace = getTopNamespace(d);
if (
!(topNamespace?.parent?.parent
&& isModuleBlock(topNamespace.parent)
&& isAmbientModule(topNamespace.parent.parent)
&& isSourceFile(topNamespace.parent.parent.parent))
) return;
const exportAssignment = (topNamespace.parent.parent.symbol.exports?.get("export=" as __String)?.valueDeclaration as ExportAssignment)?.expression as PropertyAccessExpression | Identifier;
if (!exportAssignment) return;
const exportSymbol = checker.getSymbolAtLocation(exportAssignment);
if (!exportSymbol) return;
const originalExportSymbol = exportSymbol?.flags & SymbolFlags.Alias ? checker.getAliasedSymbol(exportSymbol) : exportSymbol;
if (originalExportSymbol === d.symbol) return topNamespace.parent.parent;
function getTopNamespace(namespaceDeclaration: ModuleDeclaration) {
while (namespaceDeclaration.flags & NodeFlags.NestedNamespace) {
namespaceDeclaration = namespaceDeclaration.parent as ModuleDeclaration;
}
return namespaceDeclaration;
}
});
const ambientModuleDeclare = ambientModuleDeclareCandidates[0] as (AmbientModuleDeclaration & { name: StringLiteral; }) | undefined;
if (ambientModuleDeclare) {
return ambientModuleDeclare.name.text;
}
}
|
Looks for existing imports that use symlinks to this module.
Symlinks will be returned first so they are preferred over the real path.
|
typescript
|
src/compiler/moduleSpecifiers.ts
| 879
|
[
"moduleSymbol",
"checker"
] | true
| 15
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
isMatchingExecutable
|
private static boolean isMatchingExecutable(final Executable method, final Class<?>[] parameterTypes) {
final Class<?>[] methodParameterTypes = method.getParameterTypes();
if (ClassUtils.isAssignable(parameterTypes, methodParameterTypes, true)) {
return true;
}
if (method.isVarArgs()) {
int i;
for (i = 0; i < methodParameterTypes.length - 1 && i < parameterTypes.length; i++) {
if (!ClassUtils.isAssignable(parameterTypes[i], methodParameterTypes[i], true)) {
return false;
}
}
final Class<?> varArgParameterType = methodParameterTypes[methodParameterTypes.length - 1].getComponentType();
for (; i < parameterTypes.length; i++) {
if (!ClassUtils.isAssignable(parameterTypes[i], varArgParameterType, true)) {
return false;
}
}
return true;
}
return false;
}
|
Tests whether a {@link Member} is accessible.
@param member Member to test, may be null.
@return {@code true} if {@code m} is accessible.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MemberUtils.java
| 250
|
[
"method",
"parameterTypes"
] | true
| 8
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkExcludedClasses
|
private void checkExcludedClasses(List<String> configurations, Set<String> exclusions) {
List<String> invalidExcludes = new ArrayList<>(exclusions.size());
ClassLoader classLoader = (this.beanClassLoader != null) ? this.beanClassLoader : getClass().getClassLoader();
for (String exclusion : exclusions) {
if (ClassUtils.isPresent(exclusion, classLoader) && !configurations.contains(exclusion)) {
invalidExcludes.add(exclusion);
}
}
if (!invalidExcludes.isEmpty()) {
handleInvalidExcludes(invalidExcludes);
}
}
|
Return the auto-configuration class names that should be considered. By default,
this method will load candidates using {@link ImportCandidates}.
@param metadata the source metadata
@param attributes the {@link #getAttributes(AnnotationMetadata) annotation
attributes}
@return a list of candidate configurations
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
| 212
|
[
"configurations",
"exclusions"
] |
void
| true
| 5
| 7.28
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
inverse_transform
|
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
X_original : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
check_is_fitted(self, "feature_names_")
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=["csr", "csc"])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
|
Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
X_original : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
|
python
|
sklearn/feature_extraction/_dict_vectorizer.py
| 320
|
[
"self",
"X",
"dict_type"
] | false
| 7
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_safe_parse_datetime
|
def _safe_parse_datetime(date_to_check: str) -> datetime:
"""
Parse datetime and raise error for invalid dates.
:param date_to_check: the string value to be parsed
"""
if not date_to_check:
raise ValueError(f"{date_to_check} cannot be None.")
return _safe_parse_datetime_optional(date_to_check)
|
Parse datetime and raise error for invalid dates.
:param date_to_check: the string value to be parsed
|
python
|
airflow-core/src/airflow/api_fastapi/common/parameters.py
| 521
|
[
"date_to_check"
] |
datetime
| true
| 2
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
glob
|
function glob(pattern, options, callback) {
if (typeof options === 'function') {
callback = options;
options = undefined;
}
callback = makeCallback(callback);
const Glob = lazyGlob();
PromisePrototypeThen(
ArrayFromAsync(new Glob(pattern, options).glob()),
(res) => callback(null, res),
callback,
);
}
|
Creates a write stream.
@param {string | Buffer | URL} path
@param {string | {
flags?: string;
encoding?: string;
fd?: number | FileHandle;
mode?: number;
autoClose?: boolean;
emitClose?: boolean;
start: number;
fs?: object | null;
signal?: AbortSignal | null;
highWaterMark?: number;
flush?: boolean;
}} [options]
@returns {WriteStream}
|
javascript
|
lib/fs.js
| 3,184
|
[
"pattern",
"options",
"callback"
] | false
| 2
| 6.96
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
getExceptionReporters
|
private Collection<SpringBootExceptionReporter> getExceptionReporters(
@Nullable ConfigurableApplicationContext context) {
try {
ArgumentResolver argumentResolver = (context != null)
? ArgumentResolver.of(ConfigurableApplicationContext.class, context) : ArgumentResolver.none();
return getSpringFactoriesInstances(SpringBootExceptionReporter.class, argumentResolver);
}
catch (Throwable ex) {
return Collections.emptyList();
}
}
|
Called after the context has been refreshed.
@param context the application context
@param args the application arguments
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 830
|
[
"context"
] | true
| 3
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getTypeArguments
|
private static Map<TypeVariable<?>, Type> getTypeArguments(Class<?> cls, final Class<?> toClass, final Map<TypeVariable<?>, Type> subtypeVarAssigns) {
// make sure they're assignable
if (!isAssignable(cls, toClass)) {
return null;
}
// can't work with primitives
if (cls.isPrimitive()) {
// both classes are primitives?
if (toClass.isPrimitive()) {
// dealing with widening here. No type arguments to be
// harvested with these two types.
return new HashMap<>();
}
// work with wrapper the wrapper class instead of the primitive
cls = ClassUtils.primitiveToWrapper(cls);
}
// create a copy of the incoming map, or an empty one if it's null
final HashMap<TypeVariable<?>, Type> typeVarAssigns = subtypeVarAssigns == null ? new HashMap<>() : new HashMap<>(subtypeVarAssigns);
// has target class been reached?
if (toClass.equals(cls)) {
return typeVarAssigns;
}
// walk the inheritance hierarchy until the target class is reached
return getTypeArguments(getClosestParentType(cls, toClass), toClass, typeVarAssigns);
}
|
Gets a map of the type arguments of a class in the context of {@code toClass}.
@param cls the class in question.
@param toClass the context class.
@param subtypeVarAssigns a map with type variables.
@return the {@link Map} with type arguments.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 768
|
[
"cls",
"toClass",
"subtypeVarAssigns"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
lastOrdinalIndexOf
|
public static int lastOrdinalIndexOf(final CharSequence str, final CharSequence searchStr, final int ordinal) {
return ordinalIndexOf(str, searchStr, ordinal, true);
}
|
Finds the n-th last index within a String, handling {@code null}. This method uses {@link String#lastIndexOf(String)}.
<p>
A {@code null} String will return {@code -1}.
</p>
<pre>
StringUtils.lastOrdinalIndexOf(null, *, *) = -1
StringUtils.lastOrdinalIndexOf(*, null, *) = -1
StringUtils.lastOrdinalIndexOf("", "", *) = 0
StringUtils.lastOrdinalIndexOf("aabaabaa", "a", 1) = 7
StringUtils.lastOrdinalIndexOf("aabaabaa", "a", 2) = 6
StringUtils.lastOrdinalIndexOf("aabaabaa", "b", 1) = 5
StringUtils.lastOrdinalIndexOf("aabaabaa", "b", 2) = 2
StringUtils.lastOrdinalIndexOf("aabaabaa", "ab", 1) = 4
StringUtils.lastOrdinalIndexOf("aabaabaa", "ab", 2) = 1
StringUtils.lastOrdinalIndexOf("aabaabaa", "", 1) = 8
StringUtils.lastOrdinalIndexOf("aabaabaa", "", 2) = 8
</pre>
<p>
Note that 'tail(CharSequence str, int n)' may be implemented as:
</p>
<pre>
str.substring(lastOrdinalIndexOf(str, "\n", n) + 1)
</pre>
@param str the CharSequence to check, may be null.
@param searchStr the CharSequence to find, may be null.
@param ordinal the n-th last {@code searchStr} to find.
@return the n-th last index of the search CharSequence, {@code -1} ({@code INDEX_NOT_FOUND}) if no match or {@code null} string input.
@since 2.5
@since 3.0 Changed signature from lastOrdinalIndexOf(String, String, int) to lastOrdinalIndexOf(CharSequence, CharSequence, int)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,033
|
[
"str",
"searchStr",
"ordinal"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
get_run_data_interval
|
def get_run_data_interval(timetable: Timetable, run: DagRun) -> DataInterval:
"""
Get the data interval of this run.
For compatibility, this method infers the data interval from the DAG's
schedule if the run does not have an explicit one set, which is possible for
runs created prior to AIP-39.
This function is private to Airflow core and should not be depended on as a
part of the Python API.
:meta private:
"""
if (
data_interval := _get_model_data_interval(run, "data_interval_start", "data_interval_end")
) is not None:
return data_interval
if (
data_interval := timetable.infer_manual_data_interval(run_after=pendulum.instance(run.run_after))
) is not None:
return data_interval
# Compatibility: runs created before AIP-39 implementation don't have an
# explicit data interval. Try to infer from the logical date.
if TYPE_CHECKING:
assert run.logical_date is not None
return infer_automated_data_interval(timetable, run.logical_date)
|
Get the data interval of this run.
For compatibility, this method infers the data interval from the DAG's
schedule if the run does not have an explicit one set, which is possible for
runs created prior to AIP-39.
This function is private to Airflow core and should not be depended on as a
part of the Python API.
:meta private:
|
python
|
airflow-core/src/airflow/models/dag.py
| 131
|
[
"timetable",
"run"
] |
DataInterval
| true
| 4
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
add_perf_start_date_env_to_conf
|
def add_perf_start_date_env_to_conf(performance_dag_conf: dict[str, str]) -> None:
"""
Calculate start date based on configuration.
Calculates value for PERF_START_DATE environment variable and adds it to the performance_dag_conf
if it is not already present there.
:param performance_dag_conf: dict with environment variables as keys and their values as values
"""
if "PERF_START_DATE" not in performance_dag_conf:
start_ago = get_performance_dag_environment_variable(performance_dag_conf, "PERF_START_AGO")
perf_start_date = airflow.utils.timezone.utcnow - check_and_parse_time_delta(
"PERF_START_AGO", start_ago
)
performance_dag_conf["PERF_START_DATE"] = str(perf_start_date)
|
Calculate start date based on configuration.
Calculates value for PERF_START_DATE environment variable and adds it to the performance_dag_conf
if it is not already present there.
:param performance_dag_conf: dict with environment variables as keys and their values as values
|
python
|
performance/src/performance_dags/performance_dag/performance_dag_utils.py
| 71
|
[
"performance_dag_conf"
] |
None
| true
| 2
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
substituteExpressionIdentifier
|
function substituteExpressionIdentifier(node: Identifier): Expression {
if (getEmitFlags(node) & EmitFlags.HelperName) {
const externalHelpersModuleName = getExternalHelpersModuleName(currentSourceFile);
if (externalHelpersModuleName) {
return factory.createPropertyAccessExpression(externalHelpersModuleName, node);
}
return node;
}
// When we see an identifier in an expression position that
// points to an imported symbol, we should substitute a qualified
// reference to the imported symbol if one is needed.
//
// - We do not substitute generated identifiers for any reason.
// - We do not substitute identifiers tagged with the LocalName flag.
if (!isGeneratedIdentifier(node) && !isLocalName(node)) {
const importDeclaration = resolver.getReferencedImportDeclaration(node);
if (importDeclaration) {
if (isImportClause(importDeclaration)) {
return setTextRange(
factory.createPropertyAccessExpression(
factory.getGeneratedNameForNode(importDeclaration.parent),
factory.createIdentifier("default"),
),
/*location*/ node,
);
}
else if (isImportSpecifier(importDeclaration)) {
const importedName = importDeclaration.propertyName || importDeclaration.name;
const target = factory.getGeneratedNameForNode(importDeclaration.parent?.parent?.parent || importDeclaration);
return setTextRange(
importedName.kind === SyntaxKind.StringLiteral
? factory.createElementAccessExpression(target, factory.cloneNode(importedName))
: factory.createPropertyAccessExpression(target, factory.cloneNode(importedName)),
/*location*/ node,
);
}
}
}
return node;
}
|
Substitution for an Identifier expression that may contain an imported or exported symbol.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/module/system.ts
| 1,897
|
[
"node"
] | true
| 12
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
whenNotNull
|
public Member<T> whenNotNull() {
return when(Objects::nonNull);
}
|
Only include this member when its value is not {@code null}.
@return a {@link Member} which may be configured further
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 391
|
[] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
compact
|
function compact(array) {
var index = -1,
length = array == null ? 0 : array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (value) {
result[resIndex++] = value;
}
}
return result;
}
|
Creates an array with all falsey values removed. The values `false`, `null`,
`0`, `-0', '0n`, `""`, `undefined`, and `NaN` are falsy.
@static
@memberOf _
@since 0.1.0
@category Array
@param {Array} array The array to compact.
@returns {Array} Returns the new array of filtered values.
@example
_.compact([0, 1, false, 2, '', 3]);
// => [1, 2, 3]
|
javascript
|
lodash.js
| 6,977
|
[
"array"
] | false
| 4
| 7.68
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
issubclass_
|
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, int)
False
>>> np.issubclass_(np.int32, float)
False
>>> np.issubclass_(np.float64, float)
True
"""
try:
return issubclass(arg1, arg2)
except TypeError:
return False
|
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
----------
arg1 : class
Input class. True is returned if `arg1` is a subclass of `arg2`.
arg2 : class or tuple of classes.
Input class. If a tuple of classes, True is returned if `arg1` is a
subclass of any of the tuple elements.
Returns
-------
out : bool
Whether `arg1` is a subclass of `arg2` or not.
See Also
--------
issubsctype, issubdtype, issctype
Examples
--------
>>> np.issubclass_(np.int32, int)
False
>>> np.issubclass_(np.int32, float)
False
>>> np.issubclass_(np.float64, float)
True
|
python
|
numpy/_core/numerictypes.py
| 230
|
[
"arg1",
"arg2"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
addAll
|
public void addAll(Iterator<? extends Number> values) {
while (values.hasNext()) {
add(values.next().doubleValue());
}
}
|
Adds the given values to the dataset.
@param values a series of values, which will be converted to {@code double} values (this may
cause loss of precision)
|
java
|
android/guava/src/com/google/common/math/StatsAccumulator.java
| 96
|
[
"values"
] |
void
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
findAutowiredAnnotation
|
private @Nullable MergedAnnotation<?> findAutowiredAnnotation(AccessibleObject ao) {
MergedAnnotations annotations = MergedAnnotations.from(ao);
for (Class<? extends Annotation> type : this.autowiredAnnotationTypes) {
MergedAnnotation<?> annotation = annotations.get(type);
if (annotation.isPresent()) {
return annotation;
}
}
return null;
}
|
<em>Native</em> processing method for direct calls with an arbitrary target
instance, resolving all of its fields and methods which are annotated with
one of the configured 'autowired' annotation types.
@param bean the target instance to process
@throws BeanCreationException if autowiring failed
@see #setAutowiredAnnotationTypes(Set)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessor.java
| 608
|
[
"ao"
] | true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
codegen_call_resume
|
def codegen_call_resume(
resume_codes: list[types.CodeType], resume_names: list[str], cg: PyCodegen
) -> None:
"""
Calls the provided resume functions.
Expects the TOS to be in the state:
[frame N cells, ..., frame 1 cells],
[
frame N stack + locals,
frame N-1 stack + locals,
...,
frame 1 stack + locals
]
Pops the cells and frame values, leaving the result of calling the resume functions on TOS.
Args:
- resume_codes: list of resume function code objects to call
- resume_names: list of the corresponding names of the resume functions
- cg: PyCodegen object to output instructions to
"""
# NOTE: We will load cells as we load resume functions
# load resume functions except the root's
cg.extend_output(create_copy(2))
for i, (name, code) in enumerate(zip(resume_names, resume_codes)):
if i == len(resume_names) - 1:
break
# stack: cells, frames, *(resume 1, ...), cells
if code.co_freevars:
cg.extend_output(
[
create_dup_top(),
cg.create_load_const(i),
cg.create_binary_subscr(),
]
)
cg.make_function_with_closure(name, code)
else:
cg.extend_output(cg.load_function_name(name, False, 0))
cg.extend_output(create_swap(2))
cg.extend_output(
[
create_instruction("POP_TOP"),
create_instruction("BUILD_LIST", arg=len(resume_codes) - 1),
]
)
# stack: cells, frames, [resume 1, ..., resume N - 1]
# load root resume function
cg.extend_output(create_swap(3))
if resume_codes[-1].co_freevars:
cg.extend_output(
[
cg.create_load_const(-1),
cg.create_binary_subscr(),
]
)
cg.make_function_with_closure(resume_names[-1], resume_codes[-1])
cg.extend_output(
[
*create_rot_n(3),
]
)
else:
cg.extend_output(
[
create_instruction("POP_TOP"),
*cg.load_function_name(resume_names[-1], False),
*create_rot_n(3),
]
)
# resume 1, [resume N, ..., resume 2], frames
# load top level-frame; final stack state should be:
# first resume function (+ NULL),
# [
# [resume N, ..., resume 2],
# [
# frame N stack + locals,
# ...,
# frame 2 stack + locals,
# ], *(frame 1 stack + locals)
# ]
cg.extend_output(
[
create_dup_top(),
create_dup_top(),
# frames, frames, frames
cg.create_load_const(-1),
cg.create_binary_subscr(),
# frames, frames, frames[-1]
*create_swap(2),
# frames, frames[-1], frames
cg.create_load_const(-1),
create_instruction("DELETE_SUBSCR"),
]
)
# TOS: resume 1, remaining resumes, frames (popped), frame 1 stack + locals
cg.extend_output(
[
*create_rot_n(3),
create_instruction("BUILD_LIST", arg=2),
*create_swap(2),
# [resumes, frames (popped)], frame 1 stack + locals
create_instruction("LIST_EXTEND", arg=1),
]
)
# TOS: resume 1, [remaining resumes, frames, *(frame 1 stack + locals)]
cg.extend_output(create_call_function_ex(False, True))
|
Calls the provided resume functions.
Expects the TOS to be in the state:
[frame N cells, ..., frame 1 cells],
[
frame N stack + locals,
frame N-1 stack + locals,
...,
frame 1 stack + locals
]
Pops the cells and frame values, leaving the result of calling the resume functions on TOS.
Args:
- resume_codes: list of resume function code objects to call
- resume_names: list of the corresponding names of the resume functions
- cg: PyCodegen object to output instructions to
|
python
|
torch/_dynamo/symbolic_convert.py
| 3,118
|
[
"resume_codes",
"resume_names",
"cg"
] |
None
| true
| 7
| 6.8
|
pytorch/pytorch
| 96,034
|
google
| false
|
membershipManager
|
public StreamsMembershipManager membershipManager() {
return membershipManager;
}
|
Generate a heartbeat request to leave the group if the state is still LEAVING when this is
called to close the consumer.
<p/>
Note that when closing the consumer, even though an event to Unsubscribe is generated
(triggers callbacks and sends leave group), it could be the case that the Unsubscribe event
processing does not complete in time and moves on to close the managers (ex. calls to
close with zero timeout). So we could end up on this pollOnClose with the member in
{@link MemberState#PREPARE_LEAVING} (ex. app thread did not have the time to process the
event to execute callbacks), or {@link MemberState#LEAVING} (ex. the leave request could
not be sent due to coordinator not available at that time). In all cases, the pollOnClose
will be triggered right before sending the final requests, so we ensure that we generate
the request to leave if needed.
@param currentTimeMs The current system time in milliseconds at which the method was called
@return PollResult containing the request to send
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java
| 420
|
[] |
StreamsMembershipManager
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
rowKeySet
|
@Override
public ImmutableSet<R> rowKeySet() {
return rowKeyToIndex.keySet();
}
|
Returns an immutable set of the valid row keys, including those that are associated with null
values only.
@return immutable set of row keys
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 727
|
[] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
isStringConversionBetter
|
private boolean isStringConversionBetter(TypeDescriptor sourceType, TypeDescriptor targetType) {
if (this.conversionService instanceof ApplicationConversionService applicationConversionService) {
if (applicationConversionService.isConvertViaObjectSourceType(sourceType, targetType)) {
// If an ObjectTo... converter is being used then there might be a
// better StringTo... version
return true;
}
}
// StringToArrayConverter / StringToCollectionConverter are better than
// ObjectToArrayConverter / ObjectToCollectionConverter
return (targetType.isArray() || targetType.isCollection()) && !targetType.equals(BYTE_ARRAY);
}
|
Return if String based conversion is better based on the target type. This is
required when ObjectTo... conversion produces incorrect results.
@param sourceType the source type to test
@param targetType the target type to test
@return if string conversion is better
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesCharSequenceToObjectConverter.java
| 87
|
[
"sourceType",
"targetType"
] | true
| 5
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
load
|
def load(fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
"""Deserialize data as JSON read from a file.
If :data:`~flask.current_app` is available, it will use its
:meth:`app.json.load() <flask.json.provider.JSONProvider.load>`
method, otherwise it will use :func:`json.load`.
:param fp: A file opened for reading text or UTF-8 bytes.
:param kwargs: Arguments passed to the ``load`` implementation.
.. versionchanged:: 2.3
The ``app`` parameter was removed.
.. versionchanged:: 2.2
Calls ``current_app.json.load``, allowing an app to override
the behavior.
.. versionchanged:: 2.2
The ``app`` parameter will be removed in Flask 2.3.
.. versionchanged:: 2.0
``encoding`` will be removed in Flask 2.1. The file must be text
mode, or binary mode with UTF-8 bytes.
"""
if current_app:
return current_app.json.load(fp, **kwargs)
return _json.load(fp, **kwargs)
|
Deserialize data as JSON read from a file.
If :data:`~flask.current_app` is available, it will use its
:meth:`app.json.load() <flask.json.provider.JSONProvider.load>`
method, otherwise it will use :func:`json.load`.
:param fp: A file opened for reading text or UTF-8 bytes.
:param kwargs: Arguments passed to the ``load`` implementation.
.. versionchanged:: 2.3
The ``app`` parameter was removed.
.. versionchanged:: 2.2
Calls ``current_app.json.load``, allowing an app to override
the behavior.
.. versionchanged:: 2.2
The ``app`` parameter will be removed in Flask 2.3.
.. versionchanged:: 2.0
``encoding`` will be removed in Flask 2.1. The file must be text
mode, or binary mode with UTF-8 bytes.
|
python
|
src/flask/json/__init__.py
| 108
|
[
"fp"
] |
t.Any
| true
| 2
| 6.4
|
pallets/flask
| 70,946
|
sphinx
| false
|
forInitializerClasses
|
static <C extends ConfigurableApplicationContext> AotApplicationContextInitializer<C> forInitializerClasses(
String... initializerClassNames) {
Assert.noNullElements(initializerClassNames, "'initializerClassNames' must not contain null elements");
return applicationContext -> initialize(applicationContext, initializerClassNames);
}
|
Factory method to create a new {@link AotApplicationContextInitializer}
instance that delegates to other initializers loaded from the given set
of class names.
@param <C> the application context type
@param initializerClassNames the class names of the initializers to load
@return a new {@link AotApplicationContextInitializer} instance
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AotApplicationContextInitializer.java
| 57
|
[] | true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
argBinding
|
protected @Nullable Object[] argBinding(JoinPoint jp, @Nullable JoinPointMatch jpMatch,
@Nullable Object returnValue, @Nullable Throwable ex) {
calculateArgumentBindings();
// AMC start
@Nullable Object[] adviceInvocationArgs = new Object[this.parameterTypes.length];
int numBound = 0;
if (this.joinPointArgumentIndex != -1) {
adviceInvocationArgs[this.joinPointArgumentIndex] = jp;
numBound++;
}
else if (this.joinPointStaticPartArgumentIndex != -1) {
adviceInvocationArgs[this.joinPointStaticPartArgumentIndex] = jp.getStaticPart();
numBound++;
}
if (!CollectionUtils.isEmpty(this.argumentBindings)) {
// binding from pointcut match
if (jpMatch != null) {
PointcutParameter[] parameterBindings = jpMatch.getParameterBindings();
for (PointcutParameter parameter : parameterBindings) {
String name = parameter.getName();
Integer index = this.argumentBindings.get(name);
Assert.state(index != null, "Index must not be null");
adviceInvocationArgs[index] = parameter.getBinding();
numBound++;
}
}
// binding from returning clause
if (this.returningName != null) {
Integer index = this.argumentBindings.get(this.returningName);
Assert.state(index != null, "Index must not be null");
adviceInvocationArgs[index] = returnValue;
numBound++;
}
// binding from thrown exception
if (this.throwingName != null) {
Integer index = this.argumentBindings.get(this.throwingName);
Assert.state(index != null, "Index must not be null");
adviceInvocationArgs[index] = ex;
numBound++;
}
}
if (numBound != this.parameterTypes.length) {
throw new IllegalStateException("Required to bind " + this.parameterTypes.length +
" arguments, but only bound " + numBound + " (JoinPointMatch " +
(jpMatch == null ? "was NOT" : "WAS") + " bound in invocation)");
}
return adviceInvocationArgs;
}
|
Take the arguments at the method execution join point and output a set of arguments
to the advice method.
@param jp the current JoinPoint
@param jpMatch the join point match that matched this execution join point
@param returnValue the return value from the method execution (may be null)
@param ex the exception thrown by the method execution (may be null)
@return the empty array if there are no arguments
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AbstractAspectJAdvice.java
| 556
|
[
"jp",
"jpMatch",
"returnValue",
"ex"
] | true
| 9
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
retrieveApplicationListeners
|
@SuppressWarnings("NullAway") // Dataflow analysis limitation
private Collection<ApplicationListener<?>> retrieveApplicationListeners(
ResolvableType eventType, @Nullable Class<?> sourceType, @Nullable CachedListenerRetriever retriever) {
List<ApplicationListener<?>> allListeners = new ArrayList<>();
Set<ApplicationListener<?>> filteredListeners = (retriever != null ? new LinkedHashSet<>() : null);
Set<String> filteredListenerBeans = (retriever != null ? new LinkedHashSet<>() : null);
Set<ApplicationListener<?>> listeners;
Set<String> listenerBeans;
synchronized (this.defaultRetriever) {
listeners = new LinkedHashSet<>(this.defaultRetriever.applicationListeners);
listenerBeans = new LinkedHashSet<>(this.defaultRetriever.applicationListenerBeans);
}
// Add programmatically registered listeners, including ones coming
// from ApplicationListenerDetector (singleton beans and inner beans).
for (ApplicationListener<?> listener : listeners) {
if (supportsEvent(listener, eventType, sourceType)) {
if (retriever != null) {
filteredListeners.add(listener);
}
allListeners.add(listener);
}
}
// Add listeners by bean name, potentially overlapping with programmatically
// registered listeners above - but here potentially with additional metadata.
if (!listenerBeans.isEmpty()) {
ConfigurableBeanFactory beanFactory = getBeanFactory();
for (String listenerBeanName : listenerBeans) {
try {
if (supportsEvent(beanFactory, listenerBeanName, eventType)) {
ApplicationListener<?> listener =
beanFactory.getBean(listenerBeanName, ApplicationListener.class);
// Despite best efforts to avoid it, unwrapped proxies (singleton targets) can end up in the
// list of programmatically registered listeners. In order to avoid duplicates, we need to find
// and replace them by their proxy counterparts, because if both a proxy and its target end up
// in 'allListeners', listeners will fire twice.
ApplicationListener<?> unwrappedListener =
(ApplicationListener<?>) AopProxyUtils.getSingletonTarget(listener);
if (listener != unwrappedListener) {
if (filteredListeners != null && filteredListeners.contains(unwrappedListener)) {
filteredListeners.remove(unwrappedListener);
filteredListeners.add(listener);
}
if (allListeners.contains(unwrappedListener)) {
allListeners.remove(unwrappedListener);
allListeners.add(listener);
}
}
if (!allListeners.contains(listener) && supportsEvent(listener, eventType, sourceType)) {
if (retriever != null) {
if (beanFactory.isSingleton(listenerBeanName)) {
filteredListeners.add(listener);
}
else {
filteredListenerBeans.add(listenerBeanName);
}
}
allListeners.add(listener);
}
}
else {
// Remove non-matching listeners that originally came from
// ApplicationListenerDetector, possibly ruled out by additional
// BeanDefinition metadata (for example, factory method generics) above.
Object listener = beanFactory.getSingleton(listenerBeanName);
if (retriever != null) {
filteredListeners.remove(listener);
}
allListeners.remove(listener);
}
}
catch (NoSuchBeanDefinitionException ex) {
// Singleton listener instance (without backing bean definition) disappeared -
// probably in the middle of the destruction phase
}
}
}
AnnotationAwareOrderComparator.sort(allListeners);
if (retriever != null) {
if (CollectionUtils.isEmpty(filteredListenerBeans)) {
retriever.applicationListeners = new LinkedHashSet<>(allListeners);
retriever.applicationListenerBeans = filteredListenerBeans;
}
else {
retriever.applicationListeners = filteredListeners;
retriever.applicationListenerBeans = filteredListenerBeans;
}
}
return allListeners;
}
|
Actually retrieve the application listeners for the given event and source type.
@param eventType the event type
@param sourceType the event source type
@param retriever the ListenerRetriever, if supposed to populate one (for caching purposes)
@return the pre-filtered list of application listeners for the given event and source type
|
java
|
spring-context/src/main/java/org/springframework/context/event/AbstractApplicationEventMulticaster.java
| 232
|
[
"eventType",
"sourceType",
"retriever"
] | true
| 19
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
isConvertViaObjectSourceType
|
public boolean isConvertViaObjectSourceType(TypeDescriptor sourceType, TypeDescriptor targetType) {
GenericConverter converter = getConverter(sourceType, targetType);
Set<ConvertiblePair> pairs = (converter != null) ? converter.getConvertibleTypes() : null;
if (pairs != null) {
for (ConvertiblePair pair : pairs) {
if (Object.class.equals(pair.getSourceType())) {
return true;
}
}
}
return false;
}
|
Return {@code true} if objects of {@code sourceType} can be converted to the
{@code targetType} and the converter has {@code Object.class} as a supported source
type.
@param sourceType the source type to test
@param targetType the target type to test
@return if conversion happens through an {@code ObjectTo...} converter
@since 2.4.3
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/convert/ApplicationConversionService.java
| 179
|
[
"sourceType",
"targetType"
] | true
| 4
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
hasName
|
private boolean hasName(int lookupIndex, ZipCentralDirectoryFileHeaderRecord centralRecord, long pos,
CharSequence namePrefix, CharSequence name) {
int offset = this.nameOffsetLookups.get(lookupIndex);
pos += ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET + offset;
int len = centralRecord.fileNameLength() - offset;
ByteBuffer buffer = ByteBuffer.allocate(ZipString.BUFFER_SIZE);
if (namePrefix != null) {
int startsWithNamePrefix = ZipString.startsWith(buffer, this.data, pos, len, namePrefix);
if (startsWithNamePrefix == -1) {
return false;
}
pos += startsWithNamePrefix;
len -= startsWithNamePrefix;
}
return ZipString.matches(buffer, this.data, pos, len, name, true);
}
|
Return the entry at the specified index.
@param index the entry index
@return the entry
@throws IndexOutOfBoundsException if the index is out of bounds
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 293
|
[
"lookupIndex",
"centralRecord",
"pos",
"namePrefix",
"name"
] | true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
handle_event_submit
|
def handle_event_submit(event: TriggerEvent, *, task_instance: TaskInstance, session: Session) -> None:
"""
Handle the submit event for a given task instance.
This function sets the next method and next kwargs of the task instance,
as well as its state to scheduled. It also adds the event's payload
into the kwargs for the task.
:param task_instance: The task instance to handle the submit event for.
:param session: The session to be used for the database callback sink.
"""
from airflow.utils.state import TaskInstanceState
# Get the next kwargs of the task instance, or an empty dictionary if it doesn't exist
next_kwargs = task_instance.next_kwargs or {}
# Add the event's payload into the kwargs for the task
next_kwargs["event"] = event.payload
# Update the next kwargs of the task instance
task_instance.next_kwargs = next_kwargs
# Remove ourselves as its trigger
task_instance.trigger_id = None
# Set the state of the task instance to scheduled
task_instance.state = TaskInstanceState.SCHEDULED
task_instance.scheduled_dttm = timezone.utcnow()
session.flush()
|
Handle the submit event for a given task instance.
This function sets the next method and next kwargs of the task instance,
as well as its state to scheduled. It also adds the event's payload
into the kwargs for the task.
:param task_instance: The task instance to handle the submit event for.
:param session: The session to be used for the database callback sink.
|
python
|
airflow-core/src/airflow/models/trigger.py
| 409
|
[
"event",
"task_instance",
"session"
] |
None
| true
| 2
| 7.2
|
apache/airflow
| 43,597
|
sphinx
| false
|
lastIndexOf
|
public static int lastIndexOf(boolean[] array, boolean target) {
return lastIndexOf(array, target, 0, array.length);
}
|
Returns the index of the last appearance of the value {@code target} in {@code array}.
@param array an array of {@code boolean} values, possibly empty
@param target a primitive {@code boolean} value
@return the greatest index {@code i} for which {@code array[i] == target}, or {@code -1} if no
such index exists.
|
java
|
android/guava/src/com/google/common/primitives/Booleans.java
| 212
|
[
"array",
"target"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
getLast
|
@ParametricNullness
public static <T extends @Nullable Object> T getLast(Iterable<T> iterable) {
// TODO(kevinb): Support a concurrently modified collection?
if (iterable instanceof List) {
List<T> list = (List<T>) iterable;
if (list.isEmpty()) {
throw new NoSuchElementException();
}
return getLastInNonemptyList(list);
} else if (iterable instanceof SortedSet) {
return ((SortedSet<T>) iterable).last();
}
return Iterators.getLast(iterable.iterator());
}
|
Returns the last element of {@code iterable}. If {@code iterable} is a {@link List} with {@link
RandomAccess} support, then this operation is guaranteed to be {@code O(1)}.
<p><b>{@code Stream} equivalent:</b> {@link Streams#findLast Streams.findLast(stream).get()}
<p><b>Java 21+ users:</b> if {code iterable} is a {@code SequencedCollection} (e.g., any list),
consider using {@code collection.getLast()} instead.
@return the last element of {@code iterable}
@throws NoSuchElementException if the iterable is empty
|
java
|
android/guava/src/com/google/common/collect/Iterables.java
| 854
|
[
"iterable"
] |
T
| true
| 4
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
insecure
|
public static RandomStringUtils insecure() {
return INSECURE;
}
|
Gets the singleton instance based on {@link ThreadLocalRandom#current()}; <b>which is not cryptographically
secure</b>; use {@link #secure()} to use an algorithms/providers specified in the
{@code securerandom.strongAlgorithms} {@link Security} property.
<p>
The method {@link ThreadLocalRandom#current()} is called on-demand.
</p>
@return the singleton instance based on {@link ThreadLocalRandom#current()}.
@see ThreadLocalRandom#current()
@see #secure()
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomStringUtils.java
| 118
|
[] |
RandomStringUtils
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
__call__
|
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
"""
import pytest
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
pytest_args += ["--doctest-modules"]
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v" * (verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
# not importing at the top level to avoid circular import of module
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += [f"--durations={durations}"]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
|
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
|
python
|
numpy/_pytesttester.py
| 79
|
[
"self",
"label",
"verbose",
"extra_argv",
"doctests",
"coverage",
"durations",
"tests"
] | false
| 11
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
compareTo
|
@Override
public int compareTo(ConfigurationPropertyName other) {
return compare(this, other);
}
|
Returns {@code true} if this element is an ancestor (immediate or nested parent) of
the specified name.
@param name the name to check
@return {@code true} if this name is an ancestor
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 306
|
[
"other"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
run
|
public static ConfigurableApplicationContext run(Class<?>[] primarySources, String[] args) {
return new SpringApplication(primarySources).run(args);
}
|
Static helper that can be used to run a {@link SpringApplication} from the
specified sources using default settings and user supplied arguments.
@param primarySources the primary sources to load
@param args the application arguments (usually passed from a Java main method)
@return the running {@link ApplicationContext}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 1,364
|
[
"primarySources",
"args"
] |
ConfigurableApplicationContext
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getRootCauseStackTraceList
|
public static List<String> getRootCauseStackTraceList(final Throwable throwable) {
if (throwable == null) {
return Collections.emptyList();
}
final Throwable[] throwables = getThrowables(throwable);
final int count = throwables.length;
final List<String> frames = new ArrayList<>();
List<String> nextTrace = getStackFrameList(throwables[count - 1]);
for (int i = count; --i >= 0;) {
final List<String> trace = nextTrace;
if (i != 0) {
nextTrace = getStackFrameList(throwables[i - 1]);
removeCommonFrames(trace, nextTrace);
}
if (i == count - 1) {
frames.add(throwables[i].toString());
} else {
frames.add(WRAPPED_MARKER + throwables[i].toString());
}
frames.addAll(trace);
}
return frames;
}
|
Gets a compact stack trace for the root cause of the supplied {@link Throwable}.
<p>
The output of this method is consistent across JDK versions. It consists of the root exception followed by each of
its wrapping exceptions separated by '[wrapped]'. Note that this is the opposite order to the JDK1.4 display.
</p>
@param throwable the throwable to examine, may be null.
@return a list of stack trace frames, never null.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
| 366
|
[
"throwable"
] | true
| 5
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
QuorumInfo that = (QuorumInfo) o;
return leaderId == that.leaderId
&& leaderEpoch == that.leaderEpoch
&& highWatermark == that.highWatermark
&& Objects.equals(voters, that.voters)
&& Objects.equals(observers, that.observers)
&& Objects.equals(nodes, that.nodes);
}
|
@return The voter nodes in the Raft cluster, or an empty map if KIP-853 is not enabled.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/QuorumInfo.java
| 80
|
[
"o"
] | true
| 9
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
column
|
@Override
public Map<R, @Nullable V> column(C columnKey) {
checkNotNull(columnKey);
Integer columnIndex = columnKeyToIndex.get(columnKey);
if (columnIndex == null) {
return emptyMap();
} else {
return new Column(columnIndex);
}
}
|
Returns a view of all mappings that have the given column key. If the column key isn't in
{@link #columnKeySet()}, an empty immutable map is returned.
<p>Otherwise, for each row key in {@link #rowKeySet()}, the returned map associates the row key
with the corresponding value in the table. Changes to the returned map will update the
underlying table, and vice versa.
@param columnKey key of column to search for in the table
@return the corresponding map from row keys to values
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 594
|
[
"columnKey"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
put
|
public JSONArray put(int index, boolean value) throws JSONException {
return put(index, (Boolean) value);
}
|
Sets the value at {@code index} to {@code value}, null padding this array to the
required length if necessary. If a value already exists at {@code
index}, it will be replaced.
@param index the index to set the value to
@param value the value
@return this array.
@throws JSONException if processing of json failed
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 192
|
[
"index",
"value"
] |
JSONArray
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
runAndHandleErrors
|
public int runAndHandleErrors(String... args) {
String[] argsWithoutDebugFlags = removeDebugFlags(args);
boolean debug = argsWithoutDebugFlags.length != args.length;
if (debug) {
System.setProperty("debug", "true");
}
try {
ExitStatus result = run(argsWithoutDebugFlags);
// The caller will hang up if it gets a non-zero status
if (result != null && result.isHangup()) {
return (result.getCode() > 0) ? result.getCode() : 0;
}
return 0;
}
catch (NoArgumentsException ex) {
showUsage();
return 1;
}
catch (Exception ex) {
return handleError(debug, ex);
}
}
|
Run the appropriate and handle and errors.
@param args the input arguments
@return a return status code (non boot is used to indicate an error)
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/CommandRunner.java
| 166
|
[] | true
| 7
| 8.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
select_proxy
|
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get("all"))
proxy_keys = [
urlparts.scheme + "://" + urlparts.hostname,
urlparts.scheme,
"all://" + urlparts.hostname,
"all",
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
|
Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
|
python
|
src/requests/utils.py
| 828
|
[
"url",
"proxies"
] | false
| 5
| 6.24
|
psf/requests
| 53,586
|
sphinx
| false
|
|
isAssignable
|
private static boolean isAssignable(final Type type, final Class<?> toClass) {
if (type == null) {
// consistency with ClassUtils.isAssignable() behavior
return toClass == null || !toClass.isPrimitive();
}
// only a null type can be assigned to null type which
// would have cause the previous to return true
if (toClass == null) {
return false;
}
// all types are assignable to themselves
if (toClass.equals(type)) {
return true;
}
if (type instanceof Class<?>) {
// just comparing two classes
return ClassUtils.isAssignable((Class<?>) type, toClass);
}
if (type instanceof ParameterizedType) {
// only have to compare the raw type to the class
return isAssignable(getRawType((ParameterizedType) type), toClass);
}
// *
if (type instanceof TypeVariable<?>) {
// if any of the bounds are assignable to the class, then the
// type is assignable to the class.
for (final Type bound : ((TypeVariable<?>) type).getBounds()) {
if (isAssignable(bound, toClass)) {
return true;
}
}
return false;
}
// the only classes to which a generic array type can be assigned
// are class Object and array classes
if (type instanceof GenericArrayType) {
return toClass.equals(Object.class)
|| toClass.isArray() && isAssignable(((GenericArrayType) type).getGenericComponentType(), toClass.getComponentType());
}
// wildcard types are not assignable to a class (though one would think
// "? super Object" would be assignable to Object)
if (type instanceof WildcardType) {
return false;
}
throw new IllegalStateException("found an unhandled type: " + type);
}
|
Tests if the subject type may be implicitly cast to the target class following the Java generics rules.
@param type the subject type to be assigned to the target type.
@param toClass the target class.
@return {@code true} if {@code type} is assignable to {@code toClass}.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 935
|
[
"type",
"toClass"
] | true
| 13
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
loadBeanDefinitions
|
protected void loadBeanDefinitions(XmlBeanDefinitionReader reader) throws BeansException, IOException {
Resource[] configResources = getConfigResources();
if (configResources != null) {
reader.loadBeanDefinitions(configResources);
}
String[] configLocations = getConfigLocations();
if (configLocations != null) {
reader.loadBeanDefinitions(configLocations);
}
}
|
Load the bean definitions with the given XmlBeanDefinitionReader.
<p>The lifecycle of the bean factory is handled by the {@link #refreshBeanFactory}
method; hence this method is just supposed to load and/or register bean definitions.
@param reader the XmlBeanDefinitionReader to use
@throws BeansException in case of bean registration errors
@throws IOException if the required XML document isn't found
@see #refreshBeanFactory
@see #getConfigLocations
@see #getResources
@see #getResourcePatternResolver
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractXmlApplicationContext.java
| 124
|
[
"reader"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
onClientRequestBodySent
|
function onClientRequestBodySent({ request }) {
if (typeof request[kInspectorRequestId] !== 'string') {
return;
}
Network.dataSent({
requestId: request[kInspectorRequestId],
finished: true,
});
}
|
Mark a request body as fully sent.
@param {{request: undici.Request}} event
|
javascript
|
lib/internal/inspector/network_undici.js
| 133
|
[] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
get_subgraph_by_path
|
def get_subgraph_by_path(
graph_view: GraphView, paths: Union[str, list[str]]
) -> list[fx.Node]:
"""
Get subgraph by path(s).
Args:
graph_view (object): Root graph view object.
paths (str or list of str): Path(s) to subgraph.
Returns:
list[fx.Node]: fx nodes belong to the subgraph
"""
def get_node_by_path(node: GraphView, path: str) -> GraphView:
for p in path.split("."):
if p in node.children:
node = node.children[p]
else:
return GraphView("", object)
return node
if isinstance(paths, list):
nodes = list(
itertools.chain.from_iterable(
get_node_by_path(graph_view, p).data for p in paths
)
)
return nodes
else:
node = get_node_by_path(graph_view, paths)
return node.data
|
Get subgraph by path(s).
Args:
graph_view (object): Root graph view object.
paths (str or list of str): Path(s) to subgraph.
Returns:
list[fx.Node]: fx nodes belong to the subgraph
|
python
|
torch/_inductor/fx_passes/graph_view.py
| 211
|
[
"graph_view",
"paths"
] |
list[fx.Node]
| true
| 6
| 7.92
|
pytorch/pytorch
| 96,034
|
google
| false
|
of
|
static Option of(String name, String valueDescription, String description, boolean optionalValue) {
return new Option(name, valueDescription, description, optionalValue);
}
|
Factory method to create value option.
@param name the name of the option
@param valueDescription a description of the expected value
@param description a description of the option
@param optionalValue whether the value is optional
@return a new {@link Option} instance
|
java
|
loader/spring-boot-jarmode-tools/src/main/java/org/springframework/boot/jarmode/tools/Command.java
| 381
|
[
"name",
"valueDescription",
"description",
"optionalValue"
] |
Option
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getAndDecrement
|
public short getAndDecrement() {
final short last = value;
value--;
return last;
}
|
Decrements this instance's value by 1; this method returns the value associated with the instance
immediately prior to the decrement operation. This method is not thread safe.
@return the value associated with the instance before it was decremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableShort.java
| 234
|
[] | true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
updateFetchPositions
|
private boolean updateFetchPositions(final Timer timer) {
try {
CheckAndUpdatePositionsEvent checkAndUpdatePositionsEvent = new CheckAndUpdatePositionsEvent(calculateDeadlineMs(timer));
wakeupTrigger.setActiveTask(checkAndUpdatePositionsEvent.future());
applicationEventHandler.addAndGet(checkAndUpdatePositionsEvent);
} catch (TimeoutException e) {
return false;
} finally {
wakeupTrigger.clearTask();
}
return true;
}
|
Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.
@return true iff the operation completed without timing out
@throws AuthenticationException If authentication fails. See the exception for more details
@throws NoOffsetForPartitionException If no offset is stored for a given partition and no offset reset policy is
defined
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 1,960
|
[
"timer"
] | true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
splitByWholeSeparatorWorker
|
private static String[] splitByWholeSeparatorWorker(final String str, final String separator, final int max, final boolean preserveAllTokens) {
if (str == null) {
return null;
}
final int len = str.length();
if (len == 0) {
return ArrayUtils.EMPTY_STRING_ARRAY;
}
if (separator == null || EMPTY.equals(separator)) {
// Split on whitespace.
return splitWorker(str, null, max, preserveAllTokens);
}
final int separatorLength = separator.length();
final ArrayList<String> substrings = new ArrayList<>();
int numberOfSubstrings = 0;
int beg = 0;
int end = 0;
while (end < len) {
end = str.indexOf(separator, beg);
if (end > -1) {
if (end > beg) {
numberOfSubstrings += 1;
if (numberOfSubstrings == max) {
end = len;
substrings.add(str.substring(beg));
} else {
// The following is OK, because String.substring( beg, end ) excludes
// the character at the position 'end'.
substrings.add(str.substring(beg, end));
// Set the starting point for the next search.
// The following is equivalent to beg = end + (separatorLength - 1) + 1,
// which is the right calculation:
beg = end + separatorLength;
}
} else {
// We found a consecutive occurrence of the separator, so skip it.
if (preserveAllTokens) {
numberOfSubstrings += 1;
if (numberOfSubstrings == max) {
end = len;
substrings.add(str.substring(beg));
} else {
substrings.add(EMPTY);
}
}
beg = end + separatorLength;
}
} else {
// String.substring( beg ) goes from 'beg' to the end of the String.
substrings.add(str.substring(beg));
end = len;
}
}
return substrings.toArray(ArrayUtils.EMPTY_STRING_ARRAY);
}
|
Performs the logic for the {@code splitByWholeSeparatorPreserveAllTokens} methods.
@param str the String to parse, may be {@code null}.
@param separator String containing the String to be used as a delimiter, {@code null} splits on whitespace.
@param max the maximum number of elements to include in the returned array. A zero or negative value implies no limit.
@param preserveAllTokens if {@code true}, adjacent separators are treated as empty token separators; if {@code false}, adjacent separators are treated as
one separator.
@return an array of parsed Strings, {@code null} if null String input.
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,354
|
[
"str",
"separator",
"max",
"preserveAllTokens"
] | true
| 11
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
load_app
|
def load_app(self) -> Flask:
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
if self._loaded_app is not None:
return self._loaded_app
app: Flask | None = None
if self.create_app is not None:
app = self.create_app()
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, maxsplit=1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(import_name, None, raise_if_not_found=False)
if app is not None:
break
if app is None:
raise NoAppException(
"Could not locate a Flask application. Use the"
" 'flask --app' option, 'FLASK_APP' environment"
" variable, or a 'wsgi.py' or 'app.py' file in the"
" current directory."
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
|
Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
|
python
|
src/flask/cli.py
| 333
|
[
"self"
] |
Flask
| true
| 10
| 6
|
pallets/flask
| 70,946
|
unknown
| false
|
committed
|
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
return delegate.committed(partitions);
}
|
Get the last committed offsets for the given partitions (whether the commit happened by this process or
another). The returned offsets will be used as the position for the consumer in the event of a failure.
<p>
If any of the partitions requested do not exist, an exception would be thrown.
<p>
This call will do a remote call to get the latest committed offsets from the server, and will block until the
committed offsets are gotten successfully, an unrecoverable error is encountered (in which case it is thrown to
the caller), or the timeout specified by {@code default.api.timeout.ms} expires (in which case a
{@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller).
@param partitions The partitions to check
@return The latest committed offsets for the given partitions; {@code null} will be returned for the
partition if there is no such message.
@throws org.apache.kafka.common.errors.WakeupException if {@link #wakeup()} is called before or while this
function is called
@throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted before or while
this function is called
@throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details
@throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic or to the
configured groupId. See the exception for more details
@throws org.apache.kafka.common.errors.UnsupportedVersionException if the consumer attempts to fetch stable offsets
when the broker doesn't support this feature
@throws org.apache.kafka.common.KafkaException for any other unrecoverable errors
@throws org.apache.kafka.common.errors.TimeoutException if the committed offset cannot be found before
the timeout specified by {@code default.api.timeout.ms} expires.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java
| 1,332
|
[
"partitions"
] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
and
|
default FailableDoublePredicate<E> and(final FailableDoublePredicate<E> other) {
Objects.requireNonNull(other);
return t -> test(t) && other.test(t);
}
|
Returns a composed {@link FailableDoublePredicate} like {@link DoublePredicate#and(DoublePredicate)}.
@param other a predicate that will be logically-ANDed with this predicate.
@return a composed {@link FailableDoublePredicate} like {@link DoublePredicate#and(DoublePredicate)}.
@throws NullPointerException if other is null
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableDoublePredicate.java
| 69
|
[
"other"
] | true
| 2
| 7.36
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
maybeAddAcknowledgements
|
private boolean maybeAddAcknowledgements(ShareSessionHandler handler,
Node node,
TopicIdPartition tip,
Acknowledgements acknowledgements) {
if (handler.isNewSession()) {
// Failing the acknowledgements as we cannot have piggybacked acknowledgements in the initial ShareFetchRequest.
log.debug("Cannot send acknowledgements on initial epoch for ShareSession for partition {}", tip);
acknowledgements.complete(Errors.INVALID_SHARE_SESSION_EPOCH.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, acknowledgements), true, Optional.empty());
return false;
} else {
metricsManager.recordAcknowledgementSent(acknowledgements.size());
fetchAcknowledgementsInFlight.computeIfAbsent(node.id(), k -> new HashMap<>()).put(tip, acknowledgements);
return true;
}
}
|
Add acknowledgements for a topic-partition to the node's in-flight acknowledgements.
@return True if we can add acknowledgements to the share session.
If we cannot add acknowledgements, they are completed with {@link Errors#INVALID_SHARE_SESSION_EPOCH} exception.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 295
|
[
"handler",
"node",
"tip",
"acknowledgements"
] | true
| 2
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createNetworkClient
|
public static NetworkClient createNetworkClient(AbstractConfig config,
String clientId,
Metrics metrics,
String metricsGroupPrefix,
LogContext logContext,
ApiVersions apiVersions,
Time time,
int maxInFlightRequestsPerConnection,
int requestTimeoutMs,
Metadata metadata,
MetadataUpdater metadataUpdater,
HostResolver hostResolver,
Sensor throttleTimeSensor,
ClientTelemetrySender clientTelemetrySender) {
ChannelBuilder channelBuilder = null;
Selector selector = null;
try {
channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext);
selector = new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG),
metrics,
time,
metricsGroupPrefix,
channelBuilder,
logContext);
return new NetworkClient(metadataUpdater,
metadata,
selector,
clientId,
maxInFlightRequestsPerConnection,
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
requestTimeoutMs,
config.getLong(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG),
config.getLong(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG),
time,
true,
apiVersions,
throttleTimeSensor,
logContext,
hostResolver,
clientTelemetrySender,
config.getLong(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG),
MetadataRecoveryStrategy.forName(config.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG))
);
} catch (Throwable t) {
closeQuietly(selector, "Selector");
closeQuietly(channelBuilder, "ChannelBuilder");
throw new KafkaException("Failed to create new NetworkClient", t);
}
}
|
Return a list containing the first address in `allAddresses` and subsequent addresses
that are a subtype of the first address.
The outcome is that all returned addresses are either IPv4 or IPv6 (InetAddress has two
subclasses: Inet4Address and Inet6Address).
|
java
|
clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
| 206
|
[
"config",
"clientId",
"metrics",
"metricsGroupPrefix",
"logContext",
"apiVersions",
"time",
"maxInFlightRequestsPerConnection",
"requestTimeoutMs",
"metadata",
"metadataUpdater",
"hostResolver",
"throttleTimeSensor",
"clientTelemetrySender"
] |
NetworkClient
| true
| 2
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
afterPropertiesSet
|
@Override
public void afterPropertiesSet() {
if (this.target == null) {
throw new IllegalArgumentException("Property 'target' is required");
}
if (this.target instanceof String) {
throw new IllegalArgumentException("'target' needs to be a bean reference, not a bean name as value");
}
if (this.proxyClassLoader == null) {
this.proxyClassLoader = ClassUtils.getDefaultClassLoader();
}
ProxyFactory proxyFactory = new ProxyFactory();
if (this.preInterceptors != null) {
for (Object interceptor : this.preInterceptors) {
proxyFactory.addAdvisor(this.advisorAdapterRegistry.wrap(interceptor));
}
}
// Add the main interceptor (typically an Advisor).
proxyFactory.addAdvisor(this.advisorAdapterRegistry.wrap(createMainInterceptor()));
if (this.postInterceptors != null) {
for (Object interceptor : this.postInterceptors) {
proxyFactory.addAdvisor(this.advisorAdapterRegistry.wrap(interceptor));
}
}
proxyFactory.copyFrom(this);
TargetSource targetSource = createTargetSource(this.target);
proxyFactory.setTargetSource(targetSource);
if (this.proxyInterfaces != null) {
proxyFactory.setInterfaces(this.proxyInterfaces);
}
else if (!isProxyTargetClass()) {
// Rely on AOP infrastructure to tell us what interfaces to proxy.
Class<?> targetClass = targetSource.getTargetClass();
if (targetClass != null) {
proxyFactory.setInterfaces(ClassUtils.getAllInterfacesForClass(targetClass, this.proxyClassLoader));
}
}
postProcessProxyFactory(proxyFactory);
this.proxy = proxyFactory.getProxy(this.proxyClassLoader);
}
|
Set the ClassLoader to generate the proxy class in.
<p>Default is the bean ClassLoader, i.e. the ClassLoader used by the
containing BeanFactory for loading all bean classes. This can be
overridden here for specific proxies.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AbstractSingletonProxyFactoryBean.java
| 135
|
[] |
void
| true
| 9
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
loadKeyAndValue
|
private void loadKeyAndValue(boolean expandLists, Document document, CharacterReader reader, StringBuilder buffer)
throws IOException {
String key = loadKey(buffer, reader).trim();
if (expandLists && key.endsWith("[]")) {
key = key.substring(0, key.length() - 2);
int index = 0;
do {
OriginTrackedValue value = loadValue(buffer, reader, true);
document.put(key + "[" + (index++) + "]", value);
if (!reader.isEndOfLine()) {
reader.read();
}
}
while (!reader.isEndOfLine());
}
else {
OriginTrackedValue value = loadValue(buffer, reader, false);
document.put(key, value);
}
}
|
Load {@code .properties} data and return a map of {@code String} ->
{@link OriginTrackedValue}.
@param expandLists if list {@code name[]=a,b,c} shortcuts should be expanded
@return the loaded properties
@throws IOException on read error
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/env/OriginTrackedPropertiesLoader.java
| 111
|
[
"expandLists",
"document",
"reader",
"buffer"
] |
void
| true
| 4
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
containsAny
|
public static boolean containsAny(final Object[] array, final Object... objectsToFind) {
return Streams.of(objectsToFind).anyMatch(e -> contains(array, e));
}
|
Checks if any of the objects are in the given array.
<p>
The method returns {@code false} if a {@code null} array is passed in.
</p>
<p>
If the {@code array} elements you are searching implement {@link Comparator}, consider whether it is worth using
{@link Arrays#sort(Object[], Comparator)} and {@link Arrays#binarySearch(Object[], Object)}.
</p>
@param array the array to search, may be {@code null}.
@param objectsToFind any of the objects to find, may be {@code null}.
@return {@code true} if the array contains any of the objects.
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,780
|
[
"array"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "Constructor-arg" + (this.index >= 0 ? " #" + this.index : "");
}
|
Creates a new instance of the {@link ConstructorArgumentEntry} class
representing a constructor argument at the supplied {@code index}.
@param index the index of the constructor argument
@throws IllegalArgumentException if the supplied {@code index}
is less than zero
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/parsing/ConstructorArgumentEntry.java
| 55
|
[] |
String
| true
| 2
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
readFileAfterOpen
|
function readFileAfterOpen(err, fd) {
const context = this.context;
if (err) {
context.callback(err);
return;
}
context.fd = fd;
const req = new FSReqCallback();
req.oncomplete = readFileAfterStat;
req.context = context;
binding.fstat(fd, false, req);
}
|
Synchronously tests whether or not the given path exists.
@param {string | Buffer | URL} path
@returns {boolean}
|
javascript
|
lib/fs.js
| 289
|
[
"err",
"fd"
] | false
| 2
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.