function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
components
|
def components(self) -> DataFrame:
"""
Return a Dataframe of the components of the Timedeltas.
Each row of the DataFrame corresponds to a Timedelta in the original
Series and contains the individual components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedelta.
Returns
-------
DataFrame
See Also
--------
TimedeltaIndex.components : Return a DataFrame of the individual resolution
components of the Timedeltas.
Series.dt.total_seconds : Return the total number of seconds in the duration.
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="s"))
>>> s
0 0 days 00:00:00
1 0 days 00:00:01
2 0 days 00:00:02
3 0 days 00:00:03
4 0 days 00:00:04
dtype: timedelta64[ns]
>>> s.dt.components
days hours minutes seconds milliseconds microseconds nanoseconds
0 0 0 0 0 0 0 0
1 0 0 0 1 0 0 0
2 0 0 0 2 0 0 0
3 0 0 0 3 0 0 0
4 0 0 0 4 0 0 0
"""
return (
self._get_values()
.components.set_index(self._parent.index)
.__finalize__(self._parent)
)
|
Return a Dataframe of the components of the Timedeltas.
Each row of the DataFrame corresponds to a Timedelta in the original
Series and contains the individual components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedelta.
Returns
-------
DataFrame
See Also
--------
TimedeltaIndex.components : Return a DataFrame of the individual resolution
components of the Timedeltas.
Series.dt.total_seconds : Return the total number of seconds in the duration.
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="s"))
>>> s
0 0 days 00:00:00
1 0 days 00:00:01
2 0 days 00:00:02
3 0 days 00:00:03
4 0 days 00:00:04
dtype: timedelta64[ns]
>>> s.dt.components
days hours minutes seconds milliseconds microseconds nanoseconds
0 0 0 0 0 0 0 0
1 0 0 0 1 0 0 0
2 0 0 0 2 0 0 0
3 0 0 0 3 0 0 0
4 0 0 0 4 0 0 0
|
python
|
pandas/core/indexes/accessors.py
| 511
|
[
"self"
] |
DataFrame
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
atAll
|
public ConditionMessage atAll() {
return items(Collections.emptyList());
}
|
Used when no items are available. For example
{@code didNotFind("any beans").atAll()} results in the message "did not find
any beans".
@return a built {@link ConditionMessage}
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 337
|
[] |
ConditionMessage
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
tolist
|
def tolist(self) -> list:
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
Python list of values in array.
See Also
--------
Index.to_list: Return a list of the values in the Index.
Series.to_list: Return a list of the values in the Series.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr.tolist()
[1, 2, 3]
"""
if self.ndim > 1:
return [x.tolist() for x in self]
return list(self)
|
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
Python list of values in array.
See Also
--------
Index.to_list: Return a list of the values in the Index.
Series.to_list: Return a list of the values in the Series.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr.tolist()
[1, 2, 3]
|
python
|
pandas/core/arrays/base.py
| 2,414
|
[
"self"
] |
list
| true
| 2
| 7.28
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
of
|
@SafeVarargs
@SuppressWarnings("varargs")
public static <E> ManagedSet<E> of(E... elements) {
ManagedSet<E> set = new ManagedSet<>();
Collections.addAll(set, elements);
return set;
}
|
Create a new instance containing an arbitrary number of elements.
@param elements the elements to be contained in the set
@param <E> the {@code Set}'s element type
@return a {@code ManagedSet} containing the specified elements
@since 5.3.16
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/ManagedSet.java
| 64
|
[] | true
| 1
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
timezoneToOffset
|
function timezoneToOffset(timezone: string, fallback: number): number {
// Support: IE 11 only, Edge 13-15+
// IE/Edge do not "understand" colon (`:`) in timezone
timezone = timezone.replace(/:/g, '');
const requestedTimezoneOffset = Date.parse('Jan 01, 1970 00:00:00 ' + timezone) / 60000;
return isNaN(requestedTimezoneOffset) ? fallback : requestedTimezoneOffset;
}
|
Returns a date formatter that provides the week-numbering year for the input date.
|
typescript
|
packages/common/src/i18n/format_date.ts
| 877
|
[
"timezone",
"fallback"
] | true
| 2
| 6
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
dispatch
|
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
void dispatch() {
boolean scheduleEventRunner = false;
synchronized (this) {
if (!isThreadScheduled) {
isThreadScheduled = true;
scheduleEventRunner = true;
}
}
if (scheduleEventRunner) {
try {
executor.execute(this);
} catch (Exception e) { // sneaky checked exception
// reset state in case of an error so that later dispatch calls will actually do something
synchronized (this) {
isThreadScheduled = false;
}
// Log it and keep going.
logger
.get()
.log(
Level.SEVERE,
"Exception while running callbacks for " + listener + " on " + executor,
e);
throw e;
}
}
}
|
Dispatches all listeners {@linkplain #enqueue enqueued} prior to this call, serially and in
order.
|
java
|
android/guava/src/com/google/common/util/concurrent/ListenerCallQueue.java
| 160
|
[] |
void
| true
| 4
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
getNotebookCellMetadata
|
function getNotebookCellMetadata(cell: nbformat.ICell): {
[key: string]: any;
} {
// We put this only for VSC to display in diff view.
// Else we don't use this.
const cellMetadata: CellMetadata = {};
if (cell.cell_type === 'code') {
if (typeof cell['execution_count'] === 'number') {
cellMetadata.execution_count = cell['execution_count'];
} else {
cellMetadata.execution_count = null;
}
}
if (cell['metadata']) {
cellMetadata['metadata'] = JSON.parse(JSON.stringify(cell['metadata']));
}
if (typeof cell.id === 'string') {
cellMetadata.id = cell.id;
}
if (cell['attachments']) {
cellMetadata.attachments = JSON.parse(JSON.stringify(cell['attachments']));
}
return cellMetadata;
}
|
Concatenates a multiline string or an array of strings into a single string.
Also normalizes line endings to use LF (`\n`) instead of CRLF (`\r\n`).
Same is done in serializer as well.
|
typescript
|
extensions/ipynb/src/deserializers.ts
| 153
|
[
"cell"
] | true
| 7
| 6
|
microsoft/vscode
| 179,840
|
jsdoc
| false
|
|
wrapIfMissing
|
public static String wrapIfMissing(final String str, final String wrapWith) {
if (isEmpty(str) || isEmpty(wrapWith)) {
return str;
}
final boolean wrapStart = !str.startsWith(wrapWith);
final boolean wrapEnd = !str.endsWith(wrapWith);
if (!wrapStart && !wrapEnd) {
return str;
}
final StringBuilder builder = new StringBuilder(str.length() + wrapWith.length() + wrapWith.length());
if (wrapStart) {
builder.append(wrapWith);
}
builder.append(str);
if (wrapEnd) {
builder.append(wrapWith);
}
return builder.toString();
}
|
Wraps a string with a string if that string is missing from the start or end of the given string.
<p>
A new {@link String} will not be created if {@code str} is already wrapped.
</p>
<pre>
StringUtils.wrapIfMissing(null, *) = null
StringUtils.wrapIfMissing("", *) = ""
StringUtils.wrapIfMissing("ab", null) = "ab"
StringUtils.wrapIfMissing("ab", "x") = "xabx"
StringUtils.wrapIfMissing("ab", "\"") = "\"ab\""
StringUtils.wrapIfMissing("\"ab\"", "\"") = "\"ab\""
StringUtils.wrapIfMissing("ab", "'") = "'ab'"
StringUtils.wrapIfMissing("'abcd'", "'") = "'abcd'"
StringUtils.wrapIfMissing("\"abcd\"", "'") = "'\"abcd\"'"
StringUtils.wrapIfMissing("'abcd'", "\"") = "\"'abcd'\""
StringUtils.wrapIfMissing("/", "/") = "/"
StringUtils.wrapIfMissing("a/b/c", "/") = "/a/b/c/"
StringUtils.wrapIfMissing("/a/b/c", "/") = "/a/b/c/"
StringUtils.wrapIfMissing("a/b/c/", "/") = "/a/b/c/"
</pre>
@param str the string to be wrapped, may be {@code null}.
@param wrapWith the string that will wrap {@code str}.
@return the wrapped string, or {@code null} if {@code str == null}.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 9,183
|
[
"str",
"wrapWith"
] |
String
| true
| 7
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
ProtoBufferReader
|
explicit ProtoBufferReader(ByteBuffer* buffer)
: byte_count_(0), backup_count_(0), status_() {
/// Implemented through a grpc_byte_buffer_reader which iterates
/// over the slices that make up a byte buffer
if (!buffer->Valid() ||
!grpc_byte_buffer_reader_init(&reader_, buffer->c_buffer())) {
status_ = Status(StatusCode::INTERNAL,
"Couldn't initialize byte buffer reader");
}
}
|
if \a buffer is invalid (the internal buffer has not been initialized).
|
cpp
|
include/grpcpp/support/proto_buffer_reader.h
| 51
|
[] | true
| 3
| 7.04
|
grpc/grpc
| 44,113
|
doxygen
| false
|
|
destroyBean
|
protected void destroyBean(String beanName, @Nullable DisposableBean bean) {
// Trigger destruction of dependent beans first...
Set<String> dependentBeanNames;
synchronized (this.dependentBeanMap) {
// Within full synchronization in order to guarantee a disconnected Set
dependentBeanNames = this.dependentBeanMap.remove(beanName);
}
if (dependentBeanNames != null) {
if (logger.isTraceEnabled()) {
logger.trace("Retrieved dependent beans for bean '" + beanName + "': " + dependentBeanNames);
}
for (String dependentBeanName : dependentBeanNames) {
destroySingleton(dependentBeanName);
}
}
// Actually destroy the bean now...
if (bean != null) {
try {
bean.destroy();
}
catch (Throwable ex) {
if (logger.isWarnEnabled()) {
logger.warn("Destruction of bean with name '" + beanName + "' threw an exception", ex);
}
}
}
// Trigger destruction of contained beans...
Set<String> containedBeans;
synchronized (this.containedBeanMap) {
// Within full synchronization in order to guarantee a disconnected Set
containedBeans = this.containedBeanMap.remove(beanName);
}
if (containedBeans != null) {
for (String containedBeanName : containedBeans) {
destroySingleton(containedBeanName);
}
}
// Remove destroyed bean from other beans' dependencies.
synchronized (this.dependentBeanMap) {
for (Iterator<Map.Entry<String, Set<String>>> it = this.dependentBeanMap.entrySet().iterator(); it.hasNext();) {
Map.Entry<String, Set<String>> entry = it.next();
Set<String> dependenciesToClean = entry.getValue();
dependenciesToClean.remove(beanName);
if (dependenciesToClean.isEmpty()) {
it.remove();
}
}
}
// Remove destroyed bean's prepared dependency information.
this.dependenciesForBeanMap.remove(beanName);
}
|
Destroy the given bean. Must destroy beans that depend on the given
bean before the bean itself. Should not throw any exceptions.
@param beanName the name of the bean
@param bean the bean instance to destroy
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 776
|
[
"beanName",
"bean"
] |
void
| true
| 9
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
RWCursor
|
RWCursor(IOBufQueue& queue, AtEnd) noexcept : RWCursor(queue) {
if (!queue.options().cacheChainLength) {
this->advanceToEnd();
} else {
this->crtBuf_ = this->buffer_->prev();
this->crtBegin_ = this->crtBuf_->data();
this->crtEnd_ = this->crtBuf_->tail();
this->crtPos_ = this->crtEnd_;
this->absolutePos_ =
queue.chainLength() - (this->crtPos_ - this->crtBegin_);
DCHECK_EQ(this->getCurrentPosition(), queue.chainLength());
}
}
|
Create the cursor initially pointing to the end of queue.
|
cpp
|
folly/io/Cursor.h
| 1,281
|
[] | true
| 3
| 7.04
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
toMap
|
public static Map<Object, Object> toMap(final Object[] array) {
if (array == null) {
return null;
}
final Map<Object, Object> map = new HashMap<>((int) (array.length * 1.5));
for (int i = 0; i < array.length; i++) {
final Object object = array[i];
if (object instanceof Map.Entry<?, ?>) {
final Map.Entry<?, ?> entry = (Map.Entry<?, ?>) object;
map.put(entry.getKey(), entry.getValue());
} else if (object instanceof Object[]) {
final Object[] entry = (Object[]) object;
if (entry.length < 2) {
throw new IllegalArgumentException("Array element " + i + ", '"
+ object
+ "', has a length less than 2");
}
map.put(entry[0], entry[1]);
} else {
throw new IllegalArgumentException("Array element " + i + ", '"
+ object
+ "', is neither of type Map.Entry nor an Array");
}
}
return map;
}
|
Converts the given array into a {@link java.util.Map}. Each element of the array must be either a {@link java.util.Map.Entry} or an Array, containing at
least two elements, where the first element is used as key and the second as value.
<p>
This method can be used to initialize:
</p>
<pre>
// Create a Map mapping colors.
Map colorMap = ArrayUtils.toMap(new String[][] { { "RED", "#FF0000" }, { "GREEN", "#00FF00" }, { "BLUE", "#0000FF" } });
</pre>
<p>
This method returns {@code null} for a {@code null} input array.
</p>
@param array an array whose elements are either a {@link java.util.Map.Entry} or an Array containing at least two elements, may be {@code null}.
@return a {@link Map} that was created from the array.
@throws IllegalArgumentException if one element of this Array is itself an Array containing less than two elements.
@throws IllegalArgumentException if the array contains elements other than {@link java.util.Map.Entry} and an Array.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,637
|
[
"array"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
from
|
public static <T extends @Nullable Object> Ordering<T> from(Comparator<T> comparator) {
return (comparator instanceof Ordering)
? (Ordering<T>) comparator
: new ComparatorOrdering<T>(comparator);
}
|
Returns an ordering based on an <i>existing</i> comparator instance. Note that it is
unnecessary to create a <i>new</i> anonymous inner class implementing {@code Comparator} just
to pass it in here. Instead, simply subclass {@code Ordering} and implement its {@code compare}
method directly.
<p>The returned object is serializable if {@code comparator} is serializable.
<p><b>Java 8+ users:</b> this class is now obsolete as explained in the class documentation, so
there is no need to use this method.
@param comparator the comparator that defines the order
@return comparator itself if it is already an {@code Ordering}; otherwise an ordering that
wraps that comparator
|
java
|
android/guava/src/com/google/common/collect/Ordering.java
| 192
|
[
"comparator"
] | true
| 2
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
add
|
@Deprecated
public static short[] add(final short[] array, final int index, final short element) {
return (short[]) add(array, index, Short.valueOf(element), Short.TYPE);
}
|
Inserts the specified element at the specified position in the array.
Shifts the element currently at that position (if any) and any subsequent
elements to the right (adds one to their indices).
<p>
This method returns a new array with the same elements of the input
array plus the given element on the specified position. The component
type of the returned array is always the same as that of the input
array.
</p>
<p>
If the input array is {@code null}, a new one element array is returned
whose component type is the same as the element.
</p>
<pre>
ArrayUtils.add([1], 0, 2) = [2, 1]
ArrayUtils.add([2, 6], 2, 10) = [2, 6, 10]
ArrayUtils.add([2, 6], 0, -4) = [-4, 2, 6]
ArrayUtils.add([2, 6, 3], 2, 1) = [2, 6, 1, 3]
</pre>
@param array the array to add the element to, may be {@code null}.
@param index the position of the new object.
@param element the object to add.
@return A new array containing the existing elements and the new element.
@throws IndexOutOfBoundsException if the index is out of range
(index < 0 || index > array.length).
@deprecated this method has been superseded by {@link #insert(int, short[], short...)} and
may be removed in a future release. Please note the handling of {@code null} input arrays differs
in the new method: inserting {@code X} into a {@code null} array results in {@code null} not {@code X}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 711
|
[
"array",
"index",
"element"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
callWithTimeout
|
@CanIgnoreReturnValue
@Override
@ParametricNullness
public <T extends @Nullable Object> T callWithTimeout(
Callable<T> callable, long timeoutDuration, TimeUnit timeoutUnit)
throws TimeoutException, InterruptedException, ExecutionException {
checkNotNull(callable);
checkNotNull(timeoutUnit);
checkPositiveTimeout(timeoutDuration);
Future<T> future = executor.submit(callable);
try {
return future.get(timeoutDuration, timeoutUnit);
} catch (InterruptedException | TimeoutException e) {
future.cancel(true /* mayInterruptIfRunning */);
throw e;
} catch (ExecutionException e) {
wrapAndThrowExecutionExceptionOrError(e.getCause());
throw new AssertionError();
}
}
|
Creates a TimeLimiter instance using the given executor service to execute method calls.
<p><b>Warning:</b> using a bounded executor may be counterproductive! If the thread pool fills
up, any time callers spend waiting for a thread may count toward their time limit, and in this
case the call may even time out before the target method is ever invoked.
@param executor the ExecutorService that will execute the method calls on the target objects;
for example, a {@link Executors#newCachedThreadPool()}.
@since 22.0
|
java
|
android/guava/src/com/google/common/util/concurrent/SimpleTimeLimiter.java
| 135
|
[
"callable",
"timeoutDuration",
"timeoutUnit"
] |
T
| true
| 3
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
isEndOfDecoratorContextOnSameLine
|
function isEndOfDecoratorContextOnSameLine(context: FormattingContext): boolean {
return context.TokensAreOnSameLine() &&
hasDecorators(context.contextNode) &&
nodeIsInDecoratorContext(context.currentTokenParent) &&
!nodeIsInDecoratorContext(context.nextTokenParent);
}
|
A rule takes a two tokens (left/right) and a particular context
for which you're meant to look at them. You then declare what should the
whitespace annotation be between these tokens via the action param.
@param debugName Name to print
@param left The left side of the comparison
@param right The right side of the comparison
@param context A set of filters to narrow down the space in which this formatter rule applies
@param action a declaration of the expected whitespace
@param flags whether the rule deletes a line or not, defaults to no-op
|
typescript
|
src/services/formatting/rules.ts
| 805
|
[
"context"
] | true
| 4
| 6.24
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
records
|
@Override
public Iterable<Record> records() {
return records;
}
|
Get an iterator over the deep records.
@return An iterator over the records
|
java
|
clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java
| 63
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
intersection
|
public ComposablePointcut intersection(Pointcut other) {
this.classFilter = ClassFilters.intersection(this.classFilter, other.getClassFilter());
this.methodMatcher = MethodMatchers.intersection(this.methodMatcher, other.getMethodMatcher());
return this;
}
|
Apply an intersection with the given Pointcut.
@param other the Pointcut to apply an intersection with
@return this composable pointcut (for call chaining)
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/ComposablePointcut.java
| 172
|
[
"other"
] |
ComposablePointcut
| true
| 1
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
throwableMembers
|
private static void throwableMembers(Members<LogEvent> members, Extractor extractor) {
members.add("full_message", extractor::messageAndStackTrace);
members.add("_error_type", LogEvent::getThrown).whenNotNull().as(ObjectUtils::nullSafeClassName);
members.add("_error_stack_trace", extractor::stackTrace);
members.add("_error_message", (event) -> event.getThrown().getMessage());
}
|
Converts the log4j2 event level to the Syslog event level code.
@param event the log event
@return an integer representing the syslog log level code
@see Severity class from Log4j2 which contains the conversion logic
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/GraylogExtendedLogFormatStructuredLogFormatter.java
| 135
|
[
"members",
"extractor"
] |
void
| true
| 1
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
nullToEmpty
|
public static Class<?>[] nullToEmpty(final Class<?>[] array) {
return nullTo(array, EMPTY_CLASS_ARRAY);
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,394
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
generateSetBeanInstanceSupplierCode
|
@Override
public CodeBlock generateSetBeanInstanceSupplierCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode,
CodeBlock instanceSupplierCode, List<MethodReference> postProcessors) {
CodeBlock.Builder code = CodeBlock.builder();
if (postProcessors.isEmpty()) {
code.addStatement("$L.setInstanceSupplier($L)", BEAN_DEFINITION_VARIABLE, instanceSupplierCode);
return code.build();
}
code.addStatement("$T $L = $L",
ParameterizedTypeName.get(InstanceSupplier.class, this.registeredBean.getBeanClass()),
INSTANCE_SUPPLIER_VARIABLE, instanceSupplierCode);
for (MethodReference postProcessor : postProcessors) {
code.addStatement("$L = $L.andThen($L)", INSTANCE_SUPPLIER_VARIABLE,
INSTANCE_SUPPLIER_VARIABLE, postProcessor.toCodeBlock());
}
code.addStatement("$L.setInstanceSupplier($L)", BEAN_DEFINITION_VARIABLE,
INSTANCE_SUPPLIER_VARIABLE);
return code.build();
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 203
|
[
"generationContext",
"beanRegistrationCode",
"instanceSupplierCode",
"postProcessors"
] |
CodeBlock
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
asConsumer
|
public static <I> Consumer<I> asConsumer(final FailableConsumer<I, ?> consumer) {
return input -> accept(consumer, input);
}
|
Converts the given {@link FailableConsumer} into a standard {@link Consumer}.
@param <I> the type used by the consumers
@param consumer a {@link FailableConsumer}
@return a standard {@link Consumer}
@since 3.10
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 403
|
[
"consumer"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
from
|
static SslManagerBundle from(TrustManagerFactory trustManagerFactory) {
Assert.notNull(trustManagerFactory, "'trustManagerFactory' must not be null");
KeyManagerFactory defaultKeyManagerFactory = createDefaultKeyManagerFactory();
return of(defaultKeyManagerFactory, trustManagerFactory);
}
|
Factory method to create a new {@link SslManagerBundle} using the given
{@link TrustManagerFactory} and the default {@link KeyManagerFactory}.
@param trustManagerFactory the trust manager factory
@return a new {@link SslManagerBundle} instance
@since 3.5.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/SslManagerBundle.java
| 136
|
[
"trustManagerFactory"
] |
SslManagerBundle
| true
| 1
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
leaderEpochFor
|
public OptionalInt leaderEpochFor(TopicPartition tp) {
PartitionMetadata partitionMetadata = metadataByPartition.get(tp);
if (partitionMetadata == null || partitionMetadata.leaderEpoch.isEmpty()) {
return OptionalInt.empty();
} else {
return OptionalInt.of(partitionMetadata.leaderEpoch.get());
}
}
|
Get leader-epoch for partition.
@param tp partition
@return leader-epoch if known, else return OptionalInt.empty()
|
java
|
clients/src/main/java/org/apache/kafka/clients/MetadataSnapshot.java
| 134
|
[
"tp"
] |
OptionalInt
| true
| 3
| 7.28
|
apache/kafka
| 31,560
|
javadoc
| false
|
getTypeForFactoryBeanFromAttributes
|
ResolvableType getTypeForFactoryBeanFromAttributes(AttributeAccessor attributes) {
Object attribute = attributes.getAttribute(FactoryBean.OBJECT_TYPE_ATTRIBUTE);
if (attribute == null) {
return ResolvableType.NONE;
}
if (attribute instanceof ResolvableType resolvableType) {
return resolvableType;
}
if (attribute instanceof Class<?> clazz) {
return ResolvableType.forClass(clazz);
}
throw new IllegalArgumentException("Invalid value type for attribute '" +
FactoryBean.OBJECT_TYPE_ATTRIBUTE + "': " + attribute.getClass().getName());
}
|
Determine the bean type for a FactoryBean by inspecting its attributes for a
{@link FactoryBean#OBJECT_TYPE_ATTRIBUTE} value.
@param attributes the attributes to inspect
@return a {@link ResolvableType} extracted from the attributes or
{@code ResolvableType.NONE}
@since 5.2
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/FactoryBeanRegistrySupport.java
| 75
|
[
"attributes"
] |
ResolvableType
| true
| 4
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
iterations
|
public int iterations() {
return iterations;
}
|
@return the number of iterations used when creating the credential
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ScramCredentialInfo.java
| 53
|
[] | true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
createInstance
|
protected abstract T createInstance() throws Exception;
|
Template method that subclasses must override to construct
the object returned by this factory.
<p>Invoked on initialization of this FactoryBean in case of
a singleton; else, on each {@link #getObject()} call.
@return the object returned by this factory
@throws Exception if an exception occurred during object creation
@see #getObject()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/AbstractFactoryBean.java
| 216
|
[] |
T
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
format
|
String format(Date date);
|
Formats a {@link Date} object using a {@link GregorianCalendar}.
@param date the date to format
@return the formatted string
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 83
|
[
"date"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
reorder_categories
|
def reorder_categories(self, new_categories, ordered=None) -> Self:
"""
Reorder categories as specified in new_categories.
``new_categories`` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as an ordered categorical.
If not given, do not change the ordered information.
Returns
-------
Categorical
Categorical with reordered categories.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
For :class:`pandas.Series`:
>>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser = ser.cat.reorder_categories(["c", "b", "a"], ordered=True)
>>> ser
0 a
1 b
2 c
3 a
dtype: category
Categories (3, str): ['c' < 'b' < 'a']
>>> ser.sort_values()
2 c
1 b
0 a
3 a
dtype: category
Categories (3, str): ['c' < 'b' < 'a']
For :class:`pandas.CategoricalIndex`:
>>> ci = pd.CategoricalIndex(["a", "b", "c", "a"])
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> ci.reorder_categories(["c", "b", "a"], ordered=True)
CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'],
ordered=True, dtype='category')
"""
if (
len(self.categories) != len(new_categories)
or not self.categories.difference(new_categories).empty
):
raise ValueError(
"items in new_categories are not the same as in old categories"
)
return self.set_categories(new_categories, ordered=ordered)
|
Reorder categories as specified in new_categories.
``new_categories`` need to include all old categories and no new category
items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated as an ordered categorical.
If not given, do not change the ordered information.
Returns
-------
Categorical
Categorical with reordered categories.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
See Also
--------
rename_categories : Rename categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
Examples
--------
For :class:`pandas.Series`:
>>> ser = pd.Series(["a", "b", "c", "a"], dtype="category")
>>> ser = ser.cat.reorder_categories(["c", "b", "a"], ordered=True)
>>> ser
0 a
1 b
2 c
3 a
dtype: category
Categories (3, str): ['c' < 'b' < 'a']
>>> ser.sort_values()
2 c
1 b
0 a
3 a
dtype: category
Categories (3, str): ['c' < 'b' < 'a']
For :class:`pandas.CategoricalIndex`:
>>> ci = pd.CategoricalIndex(["a", "b", "c", "a"])
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> ci.reorder_categories(["c", "b", "a"], ordered=True)
CategoricalIndex(['a', 'b', 'c', 'a'], categories=['c', 'b', 'a'],
ordered=True, dtype='category')
|
python
|
pandas/core/arrays/categorical.py
| 1,268
|
[
"self",
"new_categories",
"ordered"
] |
Self
| true
| 3
| 7.76
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_format_argument_list
|
def _format_argument_list(allow_args: list[str]) -> str:
"""
Convert the allow_args argument (either string or integer) of
`deprecate_nonkeyword_arguments` function to a string describing
it to be inserted into warning message.
Parameters
----------
allowed_args : list, tuple or int
The `allowed_args` argument for `deprecate_nonkeyword_arguments`,
but None value is not allowed.
Returns
-------
str
The substring describing the argument list in best way to be
inserted to the warning message.
Examples
--------
`format_argument_list([])` -> ''
`format_argument_list(['a'])` -> "except for the arguments 'a'"
`format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'"
`format_argument_list(['a', 'b', 'c'])` ->
"except for the arguments 'a', 'b' and 'c'"
"""
if "self" in allow_args:
allow_args.remove("self")
if not allow_args:
return ""
elif len(allow_args) == 1:
return f" except for the argument '{allow_args[0]}'"
else:
last = allow_args[-1]
args = ", ".join(["'" + x + "'" for x in allow_args[:-1]])
return f" except for the arguments {args} and '{last}'"
|
Convert the allow_args argument (either string or integer) of
`deprecate_nonkeyword_arguments` function to a string describing
it to be inserted into warning message.
Parameters
----------
allowed_args : list, tuple or int
The `allowed_args` argument for `deprecate_nonkeyword_arguments`,
but None value is not allowed.
Returns
-------
str
The substring describing the argument list in best way to be
inserted to the warning message.
Examples
--------
`format_argument_list([])` -> ''
`format_argument_list(['a'])` -> "except for the arguments 'a'"
`format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'"
`format_argument_list(['a', 'b', 'c'])` ->
"except for the arguments 'a', 'b' and 'c'"
|
python
|
pandas/util/_decorators.py
| 227
|
[
"allow_args"
] |
str
| true
| 5
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
formatDuration
|
public static String formatDuration(final long durationMillis, final String format, final boolean padWithZeros) {
Validate.inclusiveBetween(0, Long.MAX_VALUE, durationMillis, "durationMillis must not be negative");
final Token[] tokens = lexx(format);
long days = 0;
long hours = 0;
long minutes = 0;
long seconds = 0;
long milliseconds = durationMillis;
if (Token.containsTokenWithValue(tokens, d)) {
days = milliseconds / DateUtils.MILLIS_PER_DAY;
milliseconds -= days * DateUtils.MILLIS_PER_DAY;
}
if (Token.containsTokenWithValue(tokens, H)) {
hours = milliseconds / DateUtils.MILLIS_PER_HOUR;
milliseconds -= hours * DateUtils.MILLIS_PER_HOUR;
}
if (Token.containsTokenWithValue(tokens, m)) {
minutes = milliseconds / DateUtils.MILLIS_PER_MINUTE;
milliseconds -= minutes * DateUtils.MILLIS_PER_MINUTE;
}
if (Token.containsTokenWithValue(tokens, s)) {
seconds = milliseconds / DateUtils.MILLIS_PER_SECOND;
milliseconds -= seconds * DateUtils.MILLIS_PER_SECOND;
}
return format(tokens, 0, 0, days, hours, minutes, seconds, milliseconds, padWithZeros);
}
|
Formats the time gap as a string, using the specified format.
Padding the left-hand side side of numbers with zeroes is optional.
<p>This method formats durations using the days and lower fields of the
format pattern. Months and larger are not used.</p>
@param durationMillis the duration to format
@param format the way in which to format the duration, not null
@param padWithZeros whether to pad the left-hand side side of numbers with 0's
@return the formatted duration, not null
@throws IllegalArgumentException if durationMillis is negative
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 358
|
[
"durationMillis",
"format",
"padWithZeros"
] |
String
| true
| 5
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
_need_to_fix_layout
|
def _need_to_fix_layout(
self,
adjusted_choices: list[KernelTemplateChoice],
op_name: str,
) -> bool:
"""
Check if we need to fix the layout instead of keeping it flexible
Args:
ktc: KernelTemplateChoice object
Returns:
True if we need to fix the layout, False otherwise
"""
# TODO: debug and fix
# NOTE: on mps, we see issues with flexible layouts on baddmm. This check just makes sure
# that for mps, everything stays as it was before this optimization
if len(adjusted_choices) > 0:
if adjusted_choices[0].inputs.device_type == "mps" and op_name not in [
"mm",
"addmm",
]:
return True
# Since the following backends are not using get_mm_configs yet through the singular call,
if not (config.max_autotune or config.max_autotune_gemm):
# no danger of using other backends than ATEN
if not config.max_autotune_allow_flexible_layouts and op_name not in [
# The historical implementation for mm and addmm allowed had flexible layouts in the
# not max-autotune world
"mm",
"addmm",
]:
# TODO: deprecate this by migrating users to the new behavior
return True
return False
if not config.max_autotune_allow_flexible_layouts:
# we always need to fix the layout
return True
# Since the following backends are not using get_template_configs yet through the singular call,
# we don't know if they are a valid choice or not. Instead, just skip the optimization
# defensively.
# TODO(coconutruben): remove this once CPP,CK,CUTLASS are supported
if _use_autotune_backend("CUTLASS"):
return True
if _use_autotune_backend("CK") or _use_autotune_backend("CKTILE"):
return True
if _use_autotune_backend("CPP"):
return True
return any(
not isinstance(ktc.template, ExternKernelChoice) for ktc in adjusted_choices
)
|
Check if we need to fix the layout instead of keeping it flexible
Args:
ktc: KernelTemplateChoice object
Returns:
True if we need to fix the layout, False otherwise
|
python
|
torch/_inductor/choices.py
| 214
|
[
"self",
"adjusted_choices",
"op_name"
] |
bool
| true
| 13
| 7.44
|
pytorch/pytorch
| 96,034
|
google
| false
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Bindable<?> other = (Bindable<?>) obj;
boolean result = true;
result = result && nullSafeEquals(this.type.resolve(), other.type.resolve());
result = result && nullSafeEquals(this.annotations, other.annotations);
result = result && nullSafeEquals(this.bindRestrictions, other.bindRestrictions);
result = result && nullSafeEquals(this.bindMethod, other.bindMethod);
return result;
}
|
Returns the {@link BindMethod method} to be used to bind this bindable, or
{@code null} if no specific binding method is required.
@return the bind method or {@code null}
@since 3.0.8
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 142
|
[
"obj"
] | true
| 8
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
socketOnClose
|
function socketOnClose() {
const session = this[kBoundSession];
if (session !== undefined) {
debugSessionObj(session, 'socket closed');
const err = session.connecting ? new ERR_SOCKET_CLOSED() : null;
const state = session[kState];
state.streams.forEach((stream) => stream.close(NGHTTP2_CANCEL));
state.pendingStreams.forEach((stream) => stream.close(NGHTTP2_CANCEL));
session.close();
closeSession(session, NGHTTP2_NO_ERROR, err);
}
}
|
This function closes all active sessions gracefully.
@param {*} server the underlying server whose sessions to be closed
|
javascript
|
lib/internal/http2/core.js
| 3,513
|
[] | false
| 3
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
fit
|
def fit(self, X, y=None):
"""Fit a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, ensure_min_samples=2, estimator="MinCovDet")
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn(
"The covariance matrix associated to your dataset is not full rank"
)
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X,
support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state,
)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(
X[raw_support], assume_centered=True
)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
|
Fit a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
|
python
|
sklearn/covariance/_robust_covariance.py
| 771
|
[
"self",
"X",
"y"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
determineImports
|
Set<Object> determineImports(AnnotationMetadata metadata);
|
Return a set of objects that represent the imports. Objects within the returned
{@code Set} must implement a valid {@link Object#hashCode() hashCode} and
{@link Object#equals(Object) equals}.
<p>
Imports from multiple {@link DeterminableImports} instances may be combined by the
caller to create a complete set.
<p>
Unlike {@link ImportSelector} and {@link ImportBeanDefinitionRegistrar} any
{@link Aware} callbacks will not be invoked before this method is called.
@param metadata the source meta-data
@return a key representing the annotations that actually drive the import
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/annotation/DeterminableImports.java
| 59
|
[
"metadata"
] | true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
builder
|
public static <T> Builder<LazyInitializer<T>, T> builder() {
return new Builder<>();
}
|
Creates a new builder.
@param <T> the type of object to build.
@return a new builder.
@since 3.14.0
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/LazyInitializer.java
| 110
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkArgument
|
public static void checkArgument(boolean expression) {
if (!expression) {
throw new IllegalArgumentException();
}
}
|
Ensures the truth of an expression involving one or more parameters to the calling method.
@param expression a boolean expression
@throws IllegalArgumentException if {@code expression} is false
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 125
|
[
"expression"
] |
void
| true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
stopForRestart
|
void stopForRestart() {
if (this.running) {
this.stoppedBeans = ConcurrentHashMap.newKeySet();
stopBeans(false);
this.running = false;
}
}
|
Stop all registered beans that implement {@link Lifecycle} and <i>are</i>
currently running. Any bean that implements {@link SmartLifecycle} will be
stopped within its 'phase', and all phases will be ordered from highest to
lowest value. All beans that do not implement {@link SmartLifecycle} will be
stopped in the default phase 0. A bean declared as dependent on another bean
will be stopped before the dependency bean regardless of the declared phase.
|
java
|
spring-context/src/main/java/org/springframework/context/support/DefaultLifecycleProcessor.java
| 349
|
[] |
void
| true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
measure
|
double measure(MetricConfig config, long now);
|
Measure this quantity and return the result as a double.
@param config The configuration for this metric
@param now The POSIX time in milliseconds the measurement is being taken
@return The measured value
|
java
|
clients/src/main/java/org/apache/kafka/common/metrics/Measurable.java
| 31
|
[
"config",
"now"
] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
stripComment
|
private static String stripComment(String line) {
int commentStart = line.indexOf(COMMENT_START);
if (commentStart == -1) {
return line;
}
return line.substring(0, commentStart);
}
|
Loads the names of import candidates from the classpath. The names of the import
candidates are stored in files named
{@code META-INF/spring/full-qualified-annotation-name.imports} on the classpath.
Every line contains the full qualified name of the candidate class. Comments are
supported using the # character.
@param annotation annotation to load
@param classLoader class loader to use for loading
@return list of names of annotated classes
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/annotation/ImportCandidates.java
| 130
|
[
"line"
] |
String
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
saturatedCast
|
public static byte saturatedCast(long value) {
if (value > toUnsignedInt(MAX_VALUE)) {
return MAX_VALUE; // -1
}
if (value < 0) {
return (byte) 0;
}
return (byte) value;
}
|
Returns the {@code byte} value that, when treated as unsigned, is nearest in value to {@code
value}.
@param value any {@code long} value
@return {@code (byte) 255} if {@code value >= 255}, {@code (byte) 0} if {@code value <= 0}, and
{@code value} cast to {@code byte} otherwise
|
java
|
android/guava/src/com/google/common/primitives/UnsignedBytes.java
| 110
|
[
"value"
] | true
| 3
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
copyDefault
|
public void copyDefault(ProxyConfig other) {
Assert.notNull(other, "Other ProxyConfig object must not be null");
if (this.proxyTargetClass == null) {
this.proxyTargetClass = other.proxyTargetClass;
}
if (this.optimize == null) {
this.optimize = other.optimize;
}
if (this.opaque == null) {
this.opaque = other.opaque;
}
if (this.exposeProxy == null) {
this.exposeProxy = other.exposeProxy;
}
if (this.frozen == null) {
this.frozen = other.frozen;
}
}
|
Copy default settings from the other config object,
for settings that have not been locally set.
@param other object to copy configuration from
@since 7.0
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyConfig.java
| 169
|
[
"other"
] |
void
| true
| 6
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
splitRecordsIntoBatches
|
private Deque<ProducerBatch> splitRecordsIntoBatches(RecordBatch recordBatch, int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch.closeForRecordAppends();
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null) {
batches.add(batch);
batch.closeForRecordAppends();
}
return batches;
}
|
Finalize the state of a batch. Final state, once set, is immutable. This function may be called
once or twice on a batch. It may be called twice if
1. An inflight batch expires before a response from the broker is received. The batch's final
state is set to FAILED. But it could succeed on the broker and second time around batch.done() may
try to set SUCCEEDED final state.
2. If a transaction abortion happens or if the producer is closed forcefully, the final state is
ABORTED but again it could succeed if broker responds with a success.
Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged.
Attempted transitions from one failure state to the same or a different failed state are ignored.
Attempted transitions from SUCCEEDED to the same or a failed state throw an exception.
@param baseOffset The base offset of the messages assigned by the server
@param logAppendTime The log append time or -1 if CreateTime is being used
@param topLevelException The exception that occurred (or null if the request was successful)
@param recordExceptions Record exception function mapping batchIndex to the respective record exception
@return true if the batch was completed successfully and false if the batch was previously aborted
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 348
|
[
"recordBatch",
"splitBatchSize"
] | true
| 4
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
nullTriple
|
@SuppressWarnings("unchecked")
public static <L, M, R> ImmutableTriple<L, M, R> nullTriple() {
return NULL;
}
|
Gets the immutable triple of nulls singleton.
@param <L> the left element of this triple. Value is {@code null}.
@param <M> the middle element of this triple. Value is {@code null}.
@param <R> the right element of this triple. Value is {@code null}.
@return an immutable triple of nulls.
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/tuple/ImmutableTriple.java
| 80
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
estimateSizeInBytes
|
public static int estimateSizeInBytes(byte magic,
CompressionType compressionType,
Iterable<SimpleRecord> records) {
int size = 0;
if (magic <= RecordBatch.MAGIC_VALUE_V1) {
for (SimpleRecord record : records)
size += Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, record.key(), record.value());
} else {
size = DefaultRecordBatch.sizeInBytes(records);
}
return estimateCompressedSizeInBytes(size, compressionType);
}
|
Get an iterator over the deep records.
@return An iterator over the records
|
java
|
clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java
| 107
|
[
"magic",
"compressionType",
"records"
] | true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
hermegauss
|
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = pu._as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0] * deg + [1])
m = hermecompanion(c)
x = np.linalg.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy / df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1 / (fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
# scale w to get the right value
w *= np.sqrt(2 * np.pi) / w.sum()
return x, w
|
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
|
python
|
numpy/polynomial/hermite_e.py
| 1,508
|
[
"deg"
] | false
| 2
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
CONST
|
public static int CONST(final int v) {
return v;
}
|
Returns the provided value unchanged. This can prevent javac from inlining a constant field, e.g.,
<pre>
public final static int MAGIC_INT = ObjectUtils.CONST(123);
</pre>
This way any jars that refer to this field do not have to recompile themselves if the field's value changes at some future date.
@param v the int value to return.
@return the int v, unchanged.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 420
|
[
"v"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
proceed
|
@Override
public @Nullable Object proceed() throws Throwable {
// We start with an index of -1 and increment early.
if (this.currentInterceptorIndex == this.interceptorsAndDynamicMethodMatchers.size() - 1) {
return invokeJoinpoint();
}
Object interceptorOrInterceptionAdvice =
this.interceptorsAndDynamicMethodMatchers.get(++this.currentInterceptorIndex);
if (interceptorOrInterceptionAdvice instanceof InterceptorAndDynamicMethodMatcher dm) {
// Evaluate dynamic method matcher here: static part will already have
// been evaluated and found to match.
Class<?> targetClass = (this.targetClass != null ? this.targetClass : this.method.getDeclaringClass());
if (dm.matcher().matches(this.method, targetClass, this.arguments)) {
return dm.interceptor().invoke(this);
}
else {
// Dynamic matching failed.
// Skip this interceptor and invoke the next in the chain.
return proceed();
}
}
else {
// It's an interceptor, so we just invoke it: The pointcut will have
// been evaluated statically before this object was constructed.
return ((MethodInterceptor) interceptorOrInterceptionAdvice).invoke(this);
}
}
|
Return the method invoked on the proxied interface.
May or may not correspond with a method invoked on an underlying
implementation of that interface.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ReflectiveMethodInvocation.java
| 154
|
[] |
Object
| true
| 5
| 7.04
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
buildRequest
|
UnsentRequest buildRequest() {
// If this is the closing request, close the share session by setting the final epoch
if (isCloseRequest()) {
sessionHandler.notifyClose();
}
Map<TopicIdPartition, Acknowledgements> finalAcknowledgementsToSend = new HashMap<>(
incompleteAcknowledgements.isEmpty() ? acknowledgementsToSend : incompleteAcknowledgements);
for (Map.Entry<TopicIdPartition, Acknowledgements> entry : finalAcknowledgementsToSend.entrySet()) {
sessionHandler.addPartitionToFetch(entry.getKey(), entry.getValue());
}
ShareAcknowledgeRequest.Builder requestBuilder = sessionHandler.newShareAcknowledgeBuilder(groupId, shareFetchConfig);
isProcessed = false;
Node nodeToSend = metadata.fetch().nodeById(nodeId);
if (requestBuilder == null) {
handleAcknowledgeShareSessionNotFound();
return null;
} else if (nodeToSend != null) {
nodesWithPendingRequests.add(nodeId);
log.trace("Building acknowledgements to send : {}", finalAcknowledgementsToSend);
inFlightAcknowledgements.putAll(finalAcknowledgementsToSend);
if (incompleteAcknowledgements.isEmpty()) {
acknowledgementsToSend.clear();
} else {
incompleteAcknowledgements.clear();
}
UnsentRequest unsentRequest = new UnsentRequest(requestBuilder, Optional.of(nodeToSend));
BiConsumer<ClientResponse, Throwable> responseHandler = (clientResponse, error) -> {
if (error != null) {
handleShareAcknowledgeFailure(nodeToSend, requestBuilder.data(), this, error, unsentRequest.handler().completionTimeMs());
} else {
handleShareAcknowledgeSuccess(nodeToSend, requestBuilder.data(), this, clientResponse, unsentRequest.handler().completionTimeMs());
}
};
return unsentRequest.whenComplete(responseHandler);
}
return null;
}
|
Timeout in milliseconds indicating how long the request would be retried if it fails with a retriable exception.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,226
|
[] |
UnsentRequest
| true
| 7
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
forceTerminateTransaction
|
default TerminateTransactionResult forceTerminateTransaction(String transactionalId) {
return forceTerminateTransaction(transactionalId, new TerminateTransactionOptions());
}
|
Force terminate a transaction for the given transactional ID with the default options.
<p>
This is a convenience method for {@link #forceTerminateTransaction(String, TerminateTransactionOptions)}
with default options.
@param transactionalId The ID of the transaction to terminate.
@return The TerminateTransactionResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 2,131
|
[
"transactionalId"
] |
TerminateTransactionResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
print_outputs_on_timeout
|
def print_outputs_on_timeout(
outputs: list[Output], results: list[ApplyResult], include_success_outputs: bool
):
"""
Print outputs of the tasks that were terminated on timeout.
This function is called when some tasks were terminated on timeout.
It prints the outputs of the tasks that were terminated on timeout,
and the outputs of the tasks that were successful if `include_success_outputs` is True.
:param outputs: list of Output objects containing file names and titles
:param results: list of ApplyResult objects containing the results of the tasks
:param include_success_outputs: whether to include outputs of successful tasks
"""
get_console().print(
"\n[warning]Some tasks were terminated on timeout. "
"Please check the logs of the tasks (below) for more details.[/]\n"
)
for i, result in enumerate(results):
try:
exit_code = result.get(timeout=0)[0]
except Exception:
exit_code = -1
if exit_code != 0:
message_type = MessageType.ERROR
else:
message_type = MessageType.SUCCESS
output = outputs[i]
if message_type == MessageType.ERROR or include_success_outputs:
from airflow_breeze.utils.ci_group import ci_group
with ci_group(f"{output.escaped_title}", message_type):
os.write(1, Path(output.file_name).read_bytes())
else:
get_console().print(f"[success]{outputs[i].escaped_title} OK[/]")
get_console().print(
"\n[warning]Some tasks were terminated on timeout. "
"Please check the logs of the tasks (above) for more details.[/]\n"
)
from airflow_breeze.utils.docker_command_utils import fix_ownership_using_docker
fix_ownership_using_docker()
|
Print outputs of the tasks that were terminated on timeout.
This function is called when some tasks were terminated on timeout.
It prints the outputs of the tasks that were terminated on timeout,
and the outputs of the tasks that were successful if `include_success_outputs` is True.
:param outputs: list of Output objects containing file names and titles
:param results: list of ApplyResult objects containing the results of the tasks
:param include_success_outputs: whether to include outputs of successful tasks
|
python
|
dev/breeze/src/airflow_breeze/utils/parallel.py
| 522
|
[
"outputs",
"results",
"include_success_outputs"
] | true
| 7
| 6.72
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
fit_transform
|
def fit_transform(self, X, y=None):
"""Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = validate_data(self, X, accept_sparse=["csr", "csc"], ensure_min_features=2)
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
# u_based_decision=False is needed to be consistent with PCA.
U, VT = svd_flip(U[:, ::-1], VT[::-1], u_based_decision=False)
elif self.algorithm == "randomized":
if self.n_components > X.shape[1]:
raise ValueError(
f"n_components({self.n_components}) must be <="
f" n_features({X.shape[1]})."
)
U, Sigma, VT = _randomized_svd(
X,
self.n_components,
n_iter=self.n_iter,
n_oversamples=self.n_oversamples,
power_iteration_normalizer=self.power_iteration_normalizer,
random_state=random_state,
flip_sign=False,
)
U, VT = svd_flip(U, VT, u_based_decision=False)
self.components_ = VT
# As a result of the SVD approximation error on X ~ U @ Sigma @ V.T,
# X @ V is not the same as U @ Sigma
if self.algorithm == "randomized" or (
self.algorithm == "arpack" and self.tol > 0
):
X_transformed = safe_sparse_dot(X, self.components_.T)
else:
X_transformed = U * Sigma
# Calculate explained variance & explained variance ratio
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
|
Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
|
python
|
sklearn/decomposition/_truncated_svd.py
| 210
|
[
"self",
"X",
"y"
] | false
| 10
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
lagmulx
|
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
lagadd, lagsub, lagmul, lagdiv, lagpow
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([-1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i] * (i + 1)
prd[i] += c[i] * (2 * i + 1)
prd[i - 1] -= c[i] * i
return prd
|
Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
lagadd, lagsub, lagmul, lagdiv, lagpow
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([-1., -1., 11., -9.])
|
python
|
numpy/polynomial/laguerre.py
| 387
|
[
"c"
] | false
| 4
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
leftPad
|
public static String leftPad(final String str, final int size, final char padChar) {
if (str == null) {
return null;
}
final int pads = size - str.length();
if (pads <= 0) {
return str; // returns original String when possible
}
if (pads > PAD_LIMIT) {
return leftPad(str, size, String.valueOf(padChar));
}
return repeat(padChar, pads).concat(str);
}
|
Left pad a String with a specified character.
<p>
Pad to a size of {@code size}.
</p>
<pre>
StringUtils.leftPad(null, *, *) = null
StringUtils.leftPad("", 3, 'z') = "zzz"
StringUtils.leftPad("bat", 3, 'z') = "bat"
StringUtils.leftPad("bat", 5, 'z') = "zzbat"
StringUtils.leftPad("bat", 1, 'z') = "bat"
StringUtils.leftPad("bat", -1, 'z') = "bat"
</pre>
@param str the String to pad out, may be null.
@param size the size to pad to.
@param padChar the character to pad with.
@return left padded String or original String if no padding is necessary, {@code null} if null String input.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 5,117
|
[
"str",
"size",
"padChar"
] |
String
| true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_pytorch_path
|
def get_pytorch_path() -> str:
"""
Retrieves the installation path of PyTorch in the current environment.
Returns:
str: The directory of the PyTorch installation.
Exits:
If PyTorch is not installed in the current Python environment, the script will exit.
"""
try:
import torch
torch_paths: list[str] = cast(list[str], torch.__path__)
torch_path: str = torch_paths[0]
parent_path: str = os.path.dirname(torch_path)
print(f"PyTorch is installed at: {torch_path}")
print(f"Parent directory for patching: {parent_path}")
return parent_path
except ImportError:
handle_import_error()
|
Retrieves the installation path of PyTorch in the current environment.
Returns:
str: The directory of the PyTorch installation.
Exits:
If PyTorch is not installed in the current Python environment, the script will exit.
|
python
|
tools/nightly_hotpatch.py
| 64
|
[] |
str
| true
| 1
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
fetchRecords
|
<K, V> List<ConsumerRecord<K, V>> fetchRecords(FetchConfig fetchConfig,
Deserializers<K, V> deserializers,
int maxRecords) {
// Error when fetching the next record before deserialization.
if (corruptLastRecord)
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", cachedRecordException);
if (isConsumed)
return Collections.emptyList();
List<ConsumerRecord<K, V>> records = new ArrayList<>();
try {
for (int i = 0; i < maxRecords; i++) {
// Only move to next record if there was no exception in the last fetch. Otherwise, we should
// use the last record to do deserialization again.
if (cachedRecordException == null) {
corruptLastRecord = true;
lastRecord = nextFetchedRecord(fetchConfig);
corruptLastRecord = false;
}
if (lastRecord == null)
break;
Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch());
TimestampType timestampType = currentBatch.timestampType();
ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord);
records.add(record);
recordsRead++;
bytesRead += lastRecord.sizeInBytes();
nextFetchOffset = lastRecord.offset() + 1;
// In some cases, the deserialization may have thrown an exception and the retry may succeed,
// we allow user to move forward in this case.
cachedRecordException = null;
}
} catch (SerializationException se) {
cachedRecordException = se;
if (records.isEmpty())
throw se;
} catch (KafkaException e) {
cachedRecordException = e;
if (records.isEmpty())
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", e);
}
return records;
}
|
The {@link RecordBatch batch} of {@link Record records} is converted to a {@link List list} of
{@link ConsumerRecord consumer records} and returned. {@link BufferSupplier Decompression} and
{@link Deserializer deserialization} of the {@link Record record's} key and value are performed in
this step.
@param fetchConfig {@link FetchConfig Configuration} to use
@param deserializers {@link Deserializer}s to use to convert the raw bytes to the expected key and value types
@param maxRecords The number of records to return; the number returned may be {@code 0 <= maxRecords}
@return {@link ConsumerRecord Consumer records}
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java
| 252
|
[
"fetchConfig",
"deserializers",
"maxRecords"
] | true
| 10
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
get_locales
|
def get_locales(
prefix: str | None = None,
normalize: bool = True,
) -> list[str]:
"""
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return an empty list (no locale available, e.g. Windows)
"""
if platform.system() in ("Linux", "Darwin"):
raw_locales = subprocess.check_output(["locale", "-a"])
else:
# Other platforms e.g. windows platforms don't define "locale -a"
# Note: is_platform_windows causes circular import here
return []
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
split_raw_locales = raw_locales.split(b"\n")
out_locales = []
for x in split_raw_locales:
try:
out_locales.append(str(x, encoding=cast(str, options.display.encoding)))
except UnicodeError:
# 'locale -a' is used to populated 'raw_locales' and on
# Redhat 7 Linux (and maybe others) prints locale names
# using windows-1252 encoding. Bug only triggered by
# a few special characters and when there is an
# extensive list of installed locales.
out_locales.append(str(x, encoding="windows-1252"))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile(f"{prefix}.*")
found = pattern.findall("\n".join(out_locales))
return _valid_locales(found, normalize)
|
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return an empty list (no locale available, e.g. Windows)
|
python
|
pandas/_config/localization.py
| 115
|
[
"prefix",
"normalize"
] |
list[str]
| true
| 5
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
completeNext
|
public synchronized boolean completeNext() {
return errorNext(null);
}
|
Complete the earliest uncompleted call successfully.
@return true if there was an uncompleted call to complete
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java
| 537
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
of
|
public static LongRange of(final Long fromInclusive, final Long toInclusive) {
return new LongRange(fromInclusive, toInclusive);
}
|
Creates a closed range with the specified minimum and maximum values (both inclusive).
<p>
The range uses the natural ordering of the elements to determine where values lie in the range.
</p>
<p>
The arguments may be passed in the order (min,max) or (max,min). The getMinimum and getMaximum methods will return the correct values.
</p>
@param fromInclusive the first value that defines the edge of the range, inclusive.
@param toInclusive the second value that defines the edge of the range, inclusive.
@return the range object, not null.
@throws IllegalArgumentException if either element is null.
|
java
|
src/main/java/org/apache/commons/lang3/LongRange.java
| 70
|
[
"fromInclusive",
"toInclusive"
] |
LongRange
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
process
|
protected void process(MatchCallback callback) {
Yaml yaml = createYaml();
for (Resource resource : this.resources) {
boolean found = process(callback, yaml, resource);
if (this.resolutionMethod == ResolutionMethod.FIRST_FOUND && found) {
return;
}
}
}
|
Provide an opportunity for subclasses to process the Yaml parsed from the supplied
resources. Each resource is parsed in turn and the documents inside checked against
the {@link #setDocumentMatchers(DocumentMatcher...) matchers}. If a document
matches it is passed into the callback, along with its representation as Properties.
Depending on the {@link #setResolutionMethod(ResolutionMethod)} not all the
documents will be parsed.
@param callback a callback to delegate to once matching documents are found
@see #createYaml()
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/YamlProcessor.java
| 166
|
[
"callback"
] |
void
| true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fit_predict
|
def fit_predict(self, X, y=None, **kwargs):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# we do not route parameters here, since consumers don't route. But
# since it's possible for a `predict` method to also consume
# metadata, we check if that's the case, and we raise a warning telling
# users that they should implement a custom `fit_predict` method
# to forward metadata to `predict` as well.
#
# For that, we calculate routing and check if anything would be routed
# to `predict` if we were to route them.
if _routing_enabled():
transform_params = self.get_metadata_routing().consumes(
method="predict", params=kwargs.keys()
)
if transform_params:
warnings.warn(
(
f"This object ({self.__class__.__name__}) has a `predict` "
"method which consumes metadata, but `fit_predict` does not "
"forward metadata to `predict`. Please implement a custom "
"`fit_predict` method to forward metadata to `predict` as well."
"Alternatively, you can explicitly do `set_predict_request`"
"and set all values to `False` to disable metadata routed to "
"`predict`, if that's an option."
),
UserWarning,
)
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X, **kwargs).predict(X)
|
Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
|
python
|
sklearn/base.py
| 1,090
|
[
"self",
"X",
"y"
] | false
| 3
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
baseClamp
|
function baseClamp(number, lower, upper) {
if (number === number) {
if (upper !== undefined) {
number = number <= upper ? number : upper;
}
if (lower !== undefined) {
number = number >= lower ? number : lower;
}
}
return number;
}
|
The base implementation of `_.clamp` which doesn't coerce arguments.
@private
@param {number} number The number to clamp.
@param {number} [lower] The lower bound.
@param {number} upper The upper bound.
@returns {number} Returns the clamped number.
|
javascript
|
lodash.js
| 2,634
|
[
"number",
"lower",
"upper"
] | false
| 6
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
_get_level_values
|
def _get_level_values(self, level) -> Index:
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list("abc"))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
"""
self._validate_index_level(level)
return self
|
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list("abc"))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
|
python
|
pandas/core/indexes/base.py
| 2,204
|
[
"self",
"level"
] |
Index
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
checkCyclicSubstitution
|
private void checkCyclicSubstitution(final String varName, final List<String> priorVariables) {
if (!priorVariables.contains(varName)) {
return;
}
final StrBuilder buf = new StrBuilder(256);
buf.append("Infinite loop in property interpolation of ");
buf.append(priorVariables.remove(0));
buf.append(": ");
buf.appendWithSeparators(priorVariables, "->");
throw new IllegalStateException(buf.toString());
}
|
Checks if the specified variable is already in the stack (list) of variables.
@param varName the variable name to check.
@param priorVariables the list of prior variables.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 407
|
[
"varName",
"priorVariables"
] |
void
| true
| 2
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
visitForStatement
|
function visitForStatement(node: ForStatement, isTopLevel: boolean): VisitResult<Statement> {
const savedEnclosingBlockScopedContainer = enclosingBlockScopedContainer;
enclosingBlockScopedContainer = node;
node = factory.updateForStatement(
node,
visitNode(node.initializer, isTopLevel ? visitForInitializer : discardedValueVisitor, isForInitializer),
visitNode(node.condition, visitor, isExpression),
visitNode(node.incrementor, discardedValueVisitor, isExpression),
visitIterationBody(node.statement, isTopLevel ? topLevelNestedVisitor : visitor, context),
);
enclosingBlockScopedContainer = savedEnclosingBlockScopedContainer;
return node;
}
|
Visits the body of a ForStatement to hoist declarations.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/module/system.ts
| 1,297
|
[
"node",
"isTopLevel"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_ensure_ndmin_ndarray
|
def _ensure_ndmin_ndarray(a, *, ndmin: int):
"""This is a helper function of loadtxt and genfromtxt to ensure
proper minimum dimension as requested
ndim : int. Supported values 1, 2, 3
^^ whenever this changes, keep in sync with
_ensure_ndmin_ndarray_check_param
"""
# Verify that the array has at least dimensions `ndmin`.
# Tweak the size and shape of the arrays - remove extraneous dimensions
if a.ndim > ndmin:
a = np.squeeze(a)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
if a.ndim < ndmin:
if ndmin == 1:
a = np.atleast_1d(a)
elif ndmin == 2:
a = np.atleast_2d(a).T
return a
|
This is a helper function of loadtxt and genfromtxt to ensure
proper minimum dimension as requested
ndim : int. Supported values 1, 2, 3
^^ whenever this changes, keep in sync with
_ensure_ndmin_ndarray_check_param
|
python
|
numpy/lib/_npyio_impl.py
| 803
|
[
"a",
"ndmin"
] | true
| 5
| 6.4
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
ensure_index
|
def ensure_index(index_like: Axes, copy: bool = False) -> Index:
"""
Ensure that we have an index from some index-like object.
Parameters
----------
index_like : sequence
An Index or other sequence
copy : bool, default False
Returns
-------
index : Index or MultiIndex
See Also
--------
ensure_index_from_sequences
Examples
--------
>>> ensure_index(["a", "b"])
Index(['a', 'b'], dtype='str')
>>> ensure_index([("a", "a"), ("b", "c")])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([["a", "a"], ["b", "c"]])
MultiIndex([('a', 'b'),
('a', 'c')],
)
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if isinstance(index_like, ABCSeries):
name = index_like.name
return Index(index_like, name=name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
if isinstance(index_like, list):
if type(index_like) is not list:
# must check for exactly list here because of strict type
# check in clean_index_list
index_like = list(index_like)
if index_like and lib.is_all_arraylike(index_like):
from pandas.core.indexes.multi import MultiIndex
return MultiIndex.from_arrays(index_like)
else:
return Index(index_like, copy=copy, tupleize_cols=False)
else:
return Index(index_like, copy=copy)
|
Ensure that we have an index from some index-like object.
Parameters
----------
index_like : sequence
An Index or other sequence
copy : bool, default False
Returns
-------
index : Index or MultiIndex
See Also
--------
ensure_index_from_sequences
Examples
--------
>>> ensure_index(["a", "b"])
Index(['a', 'b'], dtype='str')
>>> ensure_index([("a", "a"), ("b", "c")])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([["a", "a"], ["b", "c"]])
MultiIndex([('a', 'b'),
('a', 'c')],
)
|
python
|
pandas/core/indexes/base.py
| 7,737
|
[
"index_like",
"copy"
] |
Index
| true
| 11
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
forEachRight
|
function forEachRight(collection, iteratee) {
var func = isArray(collection) ? arrayEachRight : baseEachRight;
return func(collection, getIteratee(iteratee, 3));
}
|
This method is like `_.forEach` except that it iterates over elements of
`collection` from right to left.
@static
@memberOf _
@since 2.0.0
@alias eachRight
@category Collection
@param {Array|Object} collection The collection to iterate over.
@param {Function} [iteratee=_.identity] The function invoked per iteration.
@returns {Array|Object} Returns `collection`.
@see _.forEach
@example
_.forEachRight([1, 2], function(value) {
console.log(value);
});
// => Logs `2` then `1`.
|
javascript
|
lodash.js
| 9,472
|
[
"collection",
"iteratee"
] | false
| 2
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
inclusiveBetween
|
public static <T> void inclusiveBetween(final T start, final T end, final Comparable<T> value) {
// TODO when breaking BC, consider returning value
if (value.compareTo(start) < 0 || value.compareTo(end) > 0) {
throw new IllegalArgumentException(String.format(DEFAULT_INCLUSIVE_BETWEEN_EX_MESSAGE, value, start, end));
}
}
|
Validate that the specified argument object fall between the two
inclusive values specified; otherwise, throws an exception.
<pre>Validate.inclusiveBetween(0, 2, 1);</pre>
@param <T> the type of the argument object.
@param start the inclusive start value, not null.
@param end the inclusive end value, not null.
@param value the object to validate, not null.
@throws IllegalArgumentException if the value falls outside the boundaries.
@see #inclusiveBetween(Object, Object, Comparable, String, Object...)
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/Validate.java
| 353
|
[
"start",
"end",
"value"
] |
void
| true
| 3
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
fillna
|
def fillna(
self,
value,
limit: int | None = None,
copy: bool = True,
) -> Self:
"""
Fill missing values with `value`.
Parameters
----------
value : scalar
limit : int, optional
Not supported for SparseArray, must be None.
copy: bool, default True
Ignored for SparseArray.
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if limit is not None:
raise ValueError("limit must be None")
new_values = np.where(isna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentially just updating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
|
Fill missing values with `value`.
Parameters
----------
value : scalar
limit : int, optional
Not supported for SparseArray, must be None.
copy: bool, default True
Ignored for SparseArray.
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
|
python
|
pandas/core/arrays/sparse/array.py
| 795
|
[
"self",
"value",
"limit",
"copy"
] |
Self
| true
| 4
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isAlpha
|
public static boolean isAlpha(final CharSequence cs) {
if (isEmpty(cs)) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
if (!Character.isLetter(cs.charAt(i))) {
return false;
}
}
return true;
}
|
Tests if the CharSequence contains only Unicode letters.
<p>
{@code null} will return {@code false}. An empty CharSequence (length()=0) will return {@code false}.
</p>
<pre>
StringUtils.isAlpha(null) = false
StringUtils.isAlpha("") = false
StringUtils.isAlpha(" ") = false
StringUtils.isAlpha("abc") = true
StringUtils.isAlpha("ab2c") = false
StringUtils.isAlpha("ab-c") = false
</pre>
@param cs the CharSequence to check, may be null.
@return {@code true} if only contains letters, and is non-null.
@since 3.0 Changed signature from isAlpha(String) to isAlpha(CharSequence)
@since 3.0 Changed "" to return false and not true
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,264
|
[
"cs"
] | true
| 4
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
needToTriggerEpochBumpFromClient
|
boolean needToTriggerEpochBumpFromClient() {
return coordinatorSupportsBumpingEpoch && !isTransactionV2Enabled;
}
|
Determines if an epoch bump can be triggered manually based on the api versions.
<b>NOTE:</b>
This method should only be used for transactional producers.
For non-transactional producers epoch bumping is always allowed.
<ol>
<li><b>Client-Triggered Epoch Bump</b>:
If the coordinator supports epoch bumping (initProducerIdVersion.maxVersion() >= 3),
client-triggered epoch bumping is allowed, returns true.
<code>clientSideEpochBumpTriggerRequired</code> must be set to true in this case.</li>
<li><b>No Epoch Bump Allowed</b>:
If the coordinator does not support epoch bumping, returns false.</li>
<li><b>Server-Triggered Only</b>:
When TransactionV2 is enabled, epoch bumping is handled automatically
by the server in EndTxn, so manual epoch bumping is not required, returns false.</li>
</ol>
@return true if a client-triggered epoch bump is allowed, otherwise false.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,311
|
[] | true
| 2
| 7.36
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
nullToEmpty
|
public static Object[] nullToEmpty(final Object[] array) {
return nullTo(array, EMPTY_OBJECT_ARRAY);
}
|
Defensive programming technique to change a {@code null}
reference to an empty one.
<p>
This method returns an empty array for a {@code null} input array.
</p>
<p>
As a memory optimizing technique an empty array passed in will be overridden with
the empty {@code public static} references in this class.
</p>
@param array the array to check for {@code null} or empty.
@return the same array, {@code public static} empty array if {@code null} or empty input.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,565
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getIgnoreDeprecationsVersion
|
function getIgnoreDeprecationsVersion(): Version {
const ignoreDeprecations = options.ignoreDeprecations;
if (ignoreDeprecations) {
if (ignoreDeprecations === "5.0" || ignoreDeprecations === "6.0") {
return new Version(ignoreDeprecations);
}
reportInvalidIgnoreDeprecations();
}
return Version.zero;
}
|
Get the referenced project if the file is input file from that reference project
|
typescript
|
src/compiler/program.ts
| 4,397
|
[] | true
| 4
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
fromfunction
|
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` and `like` are passed to `function`.
Examples
--------
>>> import numpy as np
>>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64)
array([[0., 0.],
[1., 1.]])
>>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64)
array([[0., 1.],
[0., 1.]])
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
if like is not None:
return _fromfunction_with_like(
like, function, shape, dtype=dtype, **kwargs)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
|
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` and `like` are passed to `function`.
Examples
--------
>>> import numpy as np
>>> np.fromfunction(lambda i, j: i, (2, 2), dtype=np.float64)
array([[0., 0.],
[1., 1.]])
>>> np.fromfunction(lambda i, j: j, (2, 2), dtype=np.float64)
array([[0., 1.],
[0., 1.]])
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=np.int_)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=np.int_)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
|
python
|
numpy/_core/numeric.py
| 1,821
|
[
"function",
"shape",
"dtype",
"like"
] | false
| 2
| 7.44
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
parseQuery
|
function parseQuery(qstr) {
var query = {};
var a = qstr.slice(1).split('&');
for (var i = 0; i < a.length; i++) {
var b = a[i].split('=');
query[decodeURIComponent(b[0])] = decodeURIComponent(b[1] || '');
}
return query;
}
|
Take a version from the window query string and load a specific
version of React.
@example
http://localhost:3000?version=15.4.1
(Loads React 15.4.1)
|
javascript
|
fixtures/dom/src/react-loader.js
| 12
|
[
"qstr"
] | false
| 3
| 6.32
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
setFetchAction
|
public void setFetchAction(final FetchBuffer fetchBuffer) {
final AtomicBoolean throwWakeupException = new AtomicBoolean(false);
pendingTask.getAndUpdate(task -> {
if (task == null) {
return new FetchAction(fetchBuffer);
} else if (task instanceof WakeupFuture) {
throwWakeupException.set(true);
return null;
} else if (task instanceof DisabledWakeups) {
return task;
}
// last active state is still active
throw new IllegalStateException("Last active task is still active");
});
if (throwWakeupException.get()) {
throw new WakeupException();
}
}
|
If there is no pending task, set the pending task active.
If wakeup was called before setting an active task, the current task will complete exceptionally with
WakeupException right away.
If there is an active task, throw exception.
@param currentTask
@param <T>
@return
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java
| 92
|
[
"fetchBuffer"
] |
void
| true
| 5
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
outer
|
def outer(x1, x2, /):
"""
Compute the outer product of two vectors.
This function is Array API compatible. Compared to ``np.outer``
it accepts 1-dimensional inputs only.
Parameters
----------
x1 : (M,) array_like
One-dimensional input array of size ``N``.
Must have a numeric data type.
x2 : (N,) array_like
One-dimensional input array of size ``M``.
Must have a numeric data type.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
outer
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=np.object_)
>>> np.linalg.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
x1 = asanyarray(x1)
x2 = asanyarray(x2)
if x1.ndim != 1 or x2.ndim != 1:
raise ValueError(
"Input arrays must be one-dimensional, but they are "
f"{x1.ndim=} and {x2.ndim=}."
)
return _core_outer(x1, x2, out=None)
|
Compute the outer product of two vectors.
This function is Array API compatible. Compared to ``np.outer``
it accepts 1-dimensional inputs only.
Parameters
----------
x1 : (M,) array_like
One-dimensional input array of size ``N``.
Must have a numeric data type.
x2 : (N,) array_like
One-dimensional input array of size ``M``.
Must have a numeric data type.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
outer
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=np.object_)
>>> np.linalg.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
|
python
|
numpy/linalg/_linalg.py
| 888
|
[
"x1",
"x2"
] | false
| 3
| 6.4
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
open
|
function open(path, flags, mode, callback) {
path = getValidatedPath(path);
if (arguments.length < 3) {
callback = flags;
flags = 'r';
mode = 0o666;
} else if (typeof mode === 'function') {
callback = mode;
mode = 0o666;
} else {
mode = parseFileMode(mode, 'mode', 0o666);
}
const flagsNumber = stringToFlags(flags);
callback = makeCallback(callback);
const req = new FSReqCallback();
req.oncomplete = callback;
binding.open(path, flagsNumber, mode, req);
}
|
Asynchronously opens a file.
@param {string | Buffer | URL} path
@param {string | number} [flags]
@param {string | number} [mode]
@param {(
err?: Error,
fd?: number
) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 527
|
[
"path",
"flags",
"mode",
"callback"
] | false
| 5
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
_has_kubernetes
|
def _has_kubernetes(attempt_import: bool = False) -> bool:
"""
Check if kubernetes libraries are available.
:param attempt_import: If true, attempt to import kubernetes libraries if not already loaded. If
False, only check if already in sys.modules (avoids expensive import).
:return: True if kubernetes libraries are available, False otherwise.
"""
# Check if kubernetes is already imported before triggering expensive import
if "kubernetes.client" not in sys.modules and not attempt_import:
return False
# Loading kube modules is expensive, so delay it until the last moment
try:
from kubernetes.client import models as k8s
from airflow.providers.cncf.kubernetes.pod_generator import PodGenerator
globals()["k8s"] = k8s
globals()["PodGenerator"] = PodGenerator
return True
except ImportError:
return False
|
Check if kubernetes libraries are available.
:param attempt_import: If true, attempt to import kubernetes libraries if not already loaded. If
False, only check if already in sys.modules (avoids expensive import).
:return: True if kubernetes libraries are available, False otherwise.
|
python
|
airflow-core/src/airflow/serialization/serialized_objects.py
| 3,745
|
[
"attempt_import"
] |
bool
| true
| 3
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
getattr_with_deprecation
|
def getattr_with_deprecation(
imports: dict[str, str],
module: str,
override_deprecated_classes: dict[str, str],
extra_message: str,
name: str,
):
"""
Retrieve the imported attribute from the redirected module and raises a deprecation warning.
:param imports: dict of imports and their redirection for the module
:param module: name of the module in the package to get the attribute from
:param override_deprecated_classes: override target attributes with deprecated ones. If target attribute is
found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message
:param name: attribute name
:return:
"""
target_class_full_name = imports.get(name)
# Handle wildcard pattern "*" - redirect all attributes to target module
# Skip Python special attributes (dunder attributes) as they shouldn't be redirected
if not target_class_full_name and "*" in imports and not (name.startswith("__") and name.endswith("__")):
target_class_full_name = f"{imports['*']}.{name}"
if not target_class_full_name:
raise AttributeError(f"The module `{module!r}` has no attribute `{name!r}`")
# Determine the warning class name (may be overridden)
warning_class_name = target_class_full_name
if override_deprecated_classes and name in override_deprecated_classes:
warning_class_name = override_deprecated_classes[name]
message = f"The `{module}.{name}` attribute is deprecated. Please use `{warning_class_name!r}`."
if extra_message:
message += f" {extra_message}."
warnings.warn(message, DeprecatedImportWarning, stacklevel=2)
# Import and return the target attribute
new_module, new_class_name = target_class_full_name.rsplit(".", 1)
try:
return getattr(importlib.import_module(new_module), new_class_name)
except ImportError as e:
error_message = (
f"Could not import `{new_module}.{new_class_name}` while trying to import `{module}.{name}`."
)
if extra_message:
error_message += f" {extra_message}."
raise ImportError(error_message) from e
|
Retrieve the imported attribute from the redirected module and raises a deprecation warning.
:param imports: dict of imports and their redirection for the module
:param module: name of the module in the package to get the attribute from
:param override_deprecated_classes: override target attributes with deprecated ones. If target attribute is
found in the dictionary, it will be displayed in the warning message.
:param extra_message: extra message to display in the warning or import error message
:param name: attribute name
:return:
|
python
|
airflow-core/src/airflow/utils/deprecation_tools.py
| 37
|
[
"imports",
"module",
"override_deprecated_classes",
"extra_message",
"name"
] | true
| 10
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
toSafeInteger
|
function toSafeInteger(value) {
return value
? baseClamp(toInteger(value), -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER)
: (value === 0 ? value : 0);
}
|
Converts `value` to a safe integer. A safe integer can be compared and
represented correctly.
@static
@memberOf _
@since 4.0.0
@category Lang
@param {*} value The value to convert.
@returns {number} Returns the converted integer.
@example
_.toSafeInteger(3.2);
// => 3
_.toSafeInteger(Number.MIN_VALUE);
// => 0
_.toSafeInteger(Infinity);
// => 9007199254740991
_.toSafeInteger('3.2');
// => 3
|
javascript
|
lodash.js
| 12,637
|
[
"value"
] | false
| 3
| 7.04
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
getAsBoolean
|
private static <T extends Throwable> boolean getAsBoolean(final FailableBooleanSupplier<T> supplier) {
try {
return supplier.getAsBoolean();
} catch (final Throwable t) {
throw rethrow(t);
}
}
|
Invokes a boolean supplier, and returns the result.
@param supplier The boolean supplier to invoke.
@param <T> The type of checked exception, which the supplier can throw.
@return The boolean, which has been created by the supplier
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 491
|
[
"supplier"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
check_cv
|
def check_cv(cv=5, y=None, *, classifier=False):
"""Input checker utility for building a cross-validator.
Parameters
----------
cv : int, cross-validation generator, iterable or None, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
Examples
--------
>>> from sklearn.model_selection import check_cv
>>> check_cv(cv=5, y=None, classifier=False)
KFold(...)
>>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True)
StratifiedKFold(...)
"""
cv = 5 if cv is None else cv
if isinstance(cv, numbers.Integral):
if (
classifier
and (y is not None)
and (type_of_target(y, input_name="y") in ("binary", "multiclass"))
):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, "split") or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError(
"Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv
)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
|
Input checker utility for building a cross-validator.
Parameters
----------
cv : int, cross-validation generator, iterable or None, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value changed from 3-fold to 5-fold.
y : array-like, default=None
The target variable for supervised learning problems.
classifier : bool, default=False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
Examples
--------
>>> from sklearn.model_selection import check_cv
>>> check_cv(cv=5, y=None, classifier=False)
KFold(...)
>>> check_cv(cv=5, y=[1, 1, 0, 0, 0, 0], classifier=True)
StratifiedKFold(...)
|
python
|
sklearn/model_selection/_split.py
| 2,690
|
[
"cv",
"y",
"classifier"
] | false
| 11
| 6.96
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_decode_comment
|
def _decode_comment(self, s):
'''(INTERNAL) Decodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded comment.
'''
res = re.sub(r'^\%( )?', '', s)
return res
|
(INTERNAL) Decodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded comment.
|
python
|
sklearn/externals/_arff.py
| 674
|
[
"self",
"s"
] | false
| 1
| 6.24
|
scikit-learn/scikit-learn
| 64,340
|
sphinx
| false
|
|
evalScript
|
function evalScript(name, body, breakFirstLine, print, shouldLoadESM = false) {
const origModule = globalThis.module; // Set e.g. when called from the REPL.
const module = createModule(name);
const baseUrl = pathToFileURL(module.filename).href;
if (shouldUseModuleEntryPoint(name, body)) {
return getOptionValue('--strip-types') ?
evalTypeScriptModuleEntryPoint(body, print) :
evalModuleEntryPoint(body, print);
}
const evalFunction = () => runScriptInContext(name,
body,
breakFirstLine,
print,
module,
baseUrl,
undefined,
origModule);
if (shouldLoadESM) {
return require('internal/modules/run_main').runEntryPointWithESMLoader(evalFunction);
}
evalFunction();
}
|
Evaluate an ESM entry point and return the promise that gets fulfilled after
it finishes evaluation.
@param {string} source Source code the ESM
@param {boolean} print Whether the result should be printed.
@returns {Promise}
|
javascript
|
lib/internal/process/execution.js
| 81
|
[
"name",
"body",
"breakFirstLine",
"print"
] | false
| 4
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
index
|
public static <K, V> ImmutableListMultimap<K, V> index(
Iterator<V> values, Function<? super V, K> keyFunction) {
checkNotNull(keyFunction);
ImmutableListMultimap.Builder<K, V> builder = ImmutableListMultimap.builder();
while (values.hasNext()) {
V value = values.next();
checkNotNull(value, values);
builder.put(keyFunction.apply(value), value);
}
return builder.build();
}
|
Creates an index {@code ImmutableListMultimap} that contains the results of applying a
specified function to each item in an {@code Iterator} of values. Each value will be stored as
a value in the resulting multimap, yielding a multimap with the same size as the input
iterator. The key used to store that value in the multimap will be the result of calling the
function on that value. The resulting multimap is created as an immutable snapshot. In the
returned multimap, keys appear in the order they are first encountered, and the values
corresponding to each key appear in the same order as they are encountered.
<p>For example,
{@snippet :
List<String> badGuys =
Arrays.asList("Inky", "Blinky", "Pinky", "Pinky", "Clyde");
Function<String, Integer> stringLengthFunction = ...;
Multimap<Integer, String> index =
Multimaps.index(badGuys.iterator(), stringLengthFunction);
System.out.println(index);
}
<p>prints
{@snippet :
{4=[Inky], 6=[Blinky], 5=[Pinky, Pinky, Clyde]}
}
<p>The returned multimap is serializable if its keys and values are all serializable.
@param values the values to use when constructing the {@code ImmutableListMultimap}
@param keyFunction the function used to produce the key for each value
@return {@code ImmutableListMultimap} mapping the result of evaluating the function {@code
keyFunction} on each value in the input collection to that value
@throws NullPointerException if any element of {@code values} is {@code null}, or if {@code
keyFunction} produces {@code null} for any key
@since 10.0
|
java
|
android/guava/src/com/google/common/collect/Multimaps.java
| 1,712
|
[
"values",
"keyFunction"
] | true
| 2
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
incrementAndGet
|
public int incrementAndGet() {
value++;
return value;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately after the increment operation. This method is not thread safe.
@return the value associated with the instance after it is incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableInt.java
| 291
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
readAdditionalMetadata
|
ConfigurationMetadata readAdditionalMetadata() {
return readAdditionalMetadata(ADDITIONAL_METADATA_PATH);
}
|
Read additional {@link ConfigurationMetadata} for the current module or
{@code null}.
@return additional metadata or {@code null} if none is present
|
java
|
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataStore.java
| 136
|
[] |
ConfigurationMetadata
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getVisualListRange
|
function getVisualListRange(node: Node, list: TextRange, sourceFile: SourceFile): TextRange {
const children = node.getChildren(sourceFile);
for (let i = 1; i < children.length - 1; i++) {
if (children[i].pos === list.pos && children[i].end === list.end) {
return { pos: children[i - 1].end, end: children[i + 1].getStart(sourceFile) };
}
}
return list;
}
|
@param assumeNewLineBeforeCloseBrace
`false` when called on text from a real source file.
`true` when we need to assume `position` is on a newline.
This is useful for codefixes. Consider
```
function f() {
|}
```
with `position` at `|`.
When inserting some text after an open brace, we would like to get indentation as if a newline was already there.
By default indentation at `position` will be 0 so 'assumeNewLineBeforeCloseBrace' overrides this behavior.
|
typescript
|
src/services/formatting/smartIndenter.ts
| 535
|
[
"node",
"list",
"sourceFile"
] | true
| 4
| 8.32
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
indexer_between_time
|
def indexer_between_time(
self, start_time, end_time, include_start: bool = True, include_end: bool = True
) -> npt.NDArray[np.intp]:
"""
Return index locations of values between particular times of day.
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
Include boundaries; whether to set start bound as closed or open.
include_end : bool, default True
Include boundaries; whether to set end bound as closed or open.
Returns
-------
np.ndarray[np.intp]
Index locations of values between particular times of day.
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
Examples
--------
>>> idx = pd.date_range("2023-01-01", periods=4, freq="h")
>>> idx
DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00',
'2023-01-01 02:00:00', '2023-01-01 03:00:00'],
dtype='datetime64[us]', freq='h')
>>> idx.indexer_between_time("00:00", "2:00", include_end=False)
array([0, 1])
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
|
Return index locations of values between particular times of day.
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
Include boundaries; whether to set start bound as closed or open.
include_end : bool, default True
Include boundaries; whether to set end bound as closed or open.
Returns
-------
np.ndarray[np.intp]
Index locations of values between particular times of day.
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
Examples
--------
>>> idx = pd.date_range("2023-01-01", periods=4, freq="h")
>>> idx
DatetimeIndex(['2023-01-01 00:00:00', '2023-01-01 01:00:00',
'2023-01-01 02:00:00', '2023-01-01 03:00:00'],
dtype='datetime64[us]', freq='h')
>>> idx.indexer_between_time("00:00", "2:00", include_end=False)
array([0, 1])
|
python
|
pandas/core/indexes/datetimes.py
| 1,181
|
[
"self",
"start_time",
"end_time",
"include_start",
"include_end"
] |
npt.NDArray[np.intp]
| true
| 8
| 7.92
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
errorCounts
|
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new EnumMap<>(Errors.class);
data.topics().forEach(metadata -> {
metadata.partitions().forEach(p -> updateErrorCounts(errorCounts, Errors.forCode(p.errorCode())));
updateErrorCounts(errorCounts, Errors.forCode(metadata.errorCode()));
});
return errorCounts;
}
|
Get a map of the topicIds which had metadata errors
@return the map
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java
| 127
|
[] | true
| 1
| 6.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
to_feather
|
def to_feather(
df: DataFrame,
path: FilePath | WriteBuffer[bytes],
storage_options: StorageOptions | None = None,
**kwargs: Any,
) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
df : DataFrame
path : str, path object, or file-like object
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
**kwargs :
Additional keywords passed to `pyarrow.feather.write_feather`.
"""
import_optional_dependency("pyarrow")
from pyarrow import feather
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
with get_handle(
path, "wb", storage_options=storage_options, is_text=False
) as handles:
feather.write_feather(df, handles.handle, **kwargs)
|
Write a DataFrame to the binary Feather format.
Parameters
----------
df : DataFrame
path : str, path object, or file-like object
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc. For HTTP(S) URLs the key-value pairs
are forwarded to ``urllib.request.Request`` as header options. For other
URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are
forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more
details, and for more examples on storage options refer `here
<https://pandas.pydata.org/docs/user_guide/io.html?
highlight=storage_options#reading-writing-remote-files>`_.
**kwargs :
Additional keywords passed to `pyarrow.feather.write_feather`.
|
python
|
pandas/io/feather_format.py
| 39
|
[
"df",
"path",
"storage_options"
] |
None
| true
| 2
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
bind
|
@Contract("_, _, _, _, _, true -> !null")
private <T> @Nullable T bind(ConfigurationPropertyName name, Bindable<T> target, BindHandler handler,
Context context, boolean allowRecursiveBinding, boolean create) {
try (ConfigurationPropertyCaching.CacheOverride cacheOverride = this.configurationPropertyCaching.override()) {
try {
Bindable<T> replacementTarget = handler.onStart(name, target, context);
if (replacementTarget == null) {
return handleBindResult(name, target, handler, context, null, create);
}
target = replacementTarget;
Object bound = bindObject(name, target, handler, context, allowRecursiveBinding);
return handleBindResult(name, target, handler, context, bound, create);
}
catch (Exception ex) {
return handleBindError(name, target, handler, context, ex);
}
}
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 363
|
[
"name",
"target",
"handler",
"context",
"allowRecursiveBinding",
"create"
] |
T
| true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
angle
|
def angle(z, deg=False):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on the complex
plane in the range ``(-pi, pi]``, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Notes
-----
This function passes the imaginary and real parts of the argument to
`arctan2` to compute the result; consequently, it follows the convention
of `arctan2` when the magnitude of the argument is zero. See example.
Examples
--------
>>> import numpy as np
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816]) # may vary
>>> np.angle(1+1j, deg=True) # in degrees
45.0
>>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention
array([ 0. , 3.14159265, -0. , -3.14159265])
"""
z = asanyarray(z)
if issubclass(z.dtype.type, _nx.complexfloating):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
a = arctan2(zimag, zreal)
if deg:
a *= 180 / pi
return a
|
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on the complex
plane in the range ``(-pi, pi]``, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Notes
-----
This function passes the imaginary and real parts of the argument to
`arctan2` to compute the result; consequently, it follows the convention
of `arctan2` when the magnitude of the argument is zero. See example.
Examples
--------
>>> import numpy as np
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816]) # may vary
>>> np.angle(1+1j, deg=True) # in degrees
45.0
>>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention
array([ 0. , 3.14159265, -0. , -3.14159265])
|
python
|
numpy/lib/_function_base_impl.py
| 1,683
|
[
"z",
"deg"
] | false
| 4
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
toArray
|
public static char[] toArray(Collection<Character> collection) {
if (collection instanceof CharArrayAsList) {
return ((CharArrayAsList) collection).toCharArray();
}
Object[] boxedArray = collection.toArray();
int len = boxedArray.length;
char[] array = new char[len];
for (int i = 0; i < len; i++) {
// checkNotNull for GWT (do not optimize)
array[i] = (Character) checkNotNull(boxedArray[i]);
}
return array;
}
|
Copies a collection of {@code Character} instances into a new array of primitive {@code char}
values.
<p>Elements are copied from the argument collection as if by {@code collection.toArray()}.
Calling this method is as thread-safe as calling that method.
@param collection a collection of {@code Character} objects
@return an array containing the same values as {@code collection}, in the same order, converted
to primitives
@throws NullPointerException if {@code collection} or any of its elements is null
|
java
|
android/guava/src/com/google/common/primitives/Chars.java
| 436
|
[
"collection"
] | true
| 3
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
isConfigurationCandidate
|
static boolean isConfigurationCandidate(AnnotationMetadata metadata) {
// Do not consider an interface or an annotation...
if (metadata.isInterface()) {
return false;
}
// Any of the typical annotations found?
for (String indicator : candidateIndicators) {
if (metadata.isAnnotated(indicator)) {
return true;
}
}
// Finally, let's look for @Bean methods...
return hasBeanMethods(metadata);
}
|
Check the given metadata for a configuration class candidate
(or nested component class declared within a configuration/component class).
@param metadata the metadata of the annotated class
@return {@code true} if the given class is to be registered for
configuration class processing; {@code false} otherwise
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassUtils.java
| 174
|
[
"metadata"
] | true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
remove_extra_translations
|
def remove_extra_translations(language: str, summary: dict[str, LocaleSummary]):
"""
Remove extra translations for the selected language.
Removes keys that are present in the language file but missing in the English file.
"""
console = get_console()
for filename, diff in summary.items():
extra_keys = set(diff.extra_keys.get(language, []))
if not extra_keys:
continue
lang_path = LOCALES_DIR / language / filename
try:
lang_data = load_json(lang_path)
except Exception as e:
console.print(f"[yellow]Failed to load {language} file {lang_path}: {e}[/yellow]")
continue
# Helper to recursively remove extra keys
def remove_keys(dst, prefix=""):
keys_to_remove = []
for k, v in list(dst.items()):
full_key = f"{prefix}.{k}" if prefix else k
if full_key in extra_keys:
keys_to_remove.append(k)
elif isinstance(v, dict):
remove_keys(v, full_key)
# Remove empty dictionaries after recursion
if not v:
keys_to_remove.append(k)
for k in keys_to_remove:
del dst[k]
remove_keys(lang_data)
def sort_dict_keys(obj):
if isinstance(obj, dict):
return {k: sort_dict_keys(obj[k]) for k in sorted(obj.keys(), key=natural_sort_key)}
return obj
lang_data = sort_dict_keys(lang_data)
with open(lang_path, "w", encoding="utf-8") as f:
json.dump(lang_data, f, ensure_ascii=False, indent=2)
f.write("\n") # Ensure newline at the end of the file
console.print(f"[green]Removed {len(extra_keys)} extra translations from {lang_path}[/green]")
|
Remove extra translations for the selected language.
Removes keys that are present in the language file but missing in the English file.
|
python
|
dev/breeze/src/airflow_breeze/commands/ui_commands.py
| 515
|
[
"language",
"summary"
] | true
| 10
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
|
secureRandom
|
static SecureRandom secureRandom() {
return SECURE_RANDOM_STRONG.get();
}
|
Gets the singleton instance based on {@link SecureRandom#SecureRandom()} which uses the default algorithm
and provider of {@link SecureRandom}.
<p>
The method {@link SecureRandom#SecureRandom()} is called on-demand.
</p>
@return the singleton instance based on {@link SecureRandom#SecureRandom()}.
@see SecureRandom#SecureRandom()
@since 3.16.0
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 254
|
[] |
SecureRandom
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.