function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
above
public static NumericEntityEscaper above(final int codePoint) { return outsideOf(0, codePoint); }
Constructs a {@link NumericEntityEscaper} above the specified value (exclusive). @param codePoint above which to escape. @return the newly created {@link NumericEntityEscaper} instance.
java
src/main/java/org/apache/commons/lang3/text/translate/NumericEntityEscaper.java
39
[ "codePoint" ]
NumericEntityEscaper
true
1
6.16
apache/commons-lang
2,896
javadoc
false
handleSpecificExceptionInResponse
public boolean handleSpecificExceptionInResponse(final R response, final long currentTimeMs) { return false; }
Error handling specific response exception to a group type. @param response The heartbeat response @param currentTimeMs Current time @return true if the error was handled, else false
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java
478
[ "response", "currentTimeMs" ]
true
1
6.48
apache/kafka
31,560
javadoc
false
subscribeInternal
private void subscribeInternal(Collection<String> topics, Optional<ConsumerRebalanceListener> listener) { acquireAndEnsureOpen(); try { throwIfGroupIdNotDefined(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { // treat subscribing to empty topic list as the same as unsubscribing unsubscribe(); } else { for (String topic : topics) { if (isBlank(topic)) throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic"); } // Clear the buffered data which are not a part of newly assigned topics final Set<TopicPartition> currentTopicPartitions = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (topics.contains(tp.topic())) currentTopicPartitions.add(tp); } fetchBuffer.retainAll(currentTopicPartitions); log.info("Subscribed to topic(s): {}", String.join(", ", topics)); applicationEventHandler.addAndGet(new TopicSubscriptionChangeEvent( new HashSet<>(topics), listener, defaultApiTimeoutDeadlineMs() )); } } finally { release(); } }
Subscribe to the RE2/J pattern. This will generate an event to update the pattern in the subscription state, so it's included in the next heartbeat request sent to the broker. No validation of the pattern is performed by the client (other than null/empty checks).
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
2,155
[ "topics", "listener" ]
void
true
5
6
apache/kafka
31,560
javadoc
false
commitAsync
public CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets) { if (offsets.isEmpty()) { log.debug("Skipping commit of empty offsets"); return CompletableFuture.completedFuture(Map.of()); } maybeUpdateLastSeenEpochIfNewer(offsets); OffsetCommitRequestState commitRequest = createOffsetCommitRequest(offsets, Long.MAX_VALUE); pendingRequests.addOffsetCommitRequest(commitRequest); CompletableFuture<Map<TopicPartition, OffsetAndMetadata>> asyncCommitResult = new CompletableFuture<>(); commitRequest.future.whenComplete((committedOffsets, error) -> { if (error != null) { asyncCommitResult.completeExceptionally(commitAsyncExceptionForError(error)); } else { asyncCommitResult.complete(offsets); } }); return asyncCommitResult; }
Generate a request to commit offsets without retrying, even if it fails with a retriable error. The generated request will be added to the queue to be sent on the next call to {@link #poll(long)}. @param offsets Offsets to commit per partition. @return Future that will complete when a response is received, successfully or exceptionally depending on the response. If the request fails with a retriable error, the future will be completed with a {@link RetriableCommitFailedException}.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
401
[ "offsets" ]
true
3
7.92
apache/kafka
31,560
javadoc
false
_validate_broker_urls
def _validate_broker_urls(self, broker_urls: Union[str, List[str]]) -> Set[str]: """Validate and split broker URLs. Args: broker_urls: Broker URLs, either as a semicolon-separated string or as a list of strings Returns: Set of valid broker URLs Raises: ValueError: If no valid broker URLs are found or if invalid URLs are provided """ if not broker_urls: raise ValueError("broker_url configuration is empty") if isinstance(broker_urls, str): brokers = broker_urls.split(";") elif isinstance(broker_urls, list): if not all(isinstance(url, str) for url in broker_urls): raise ValueError("All broker URLs must be strings") brokers = broker_urls else: raise ValueError(f"broker_url must be a string or list, got {broker_urls!r}") valid_urls = {url for url in brokers} if not valid_urls: raise ValueError("No valid broker URLs found in configuration") return valid_urls
Validate and split broker URLs. Args: broker_urls: Broker URLs, either as a semicolon-separated string or as a list of strings Returns: Set of valid broker URLs Raises: ValueError: If no valid broker URLs are found or if invalid URLs are provided
python
celery/worker/consumer/delayed_delivery.py
226
[ "self", "broker_urls" ]
Set[str]
true
7
7.76
celery/celery
27,741
google
false
as
public <E extends ZipEntry> E as(BiFunction<Entry, String, E> factory) { try { E result = factory.apply(this, getName()); long pos = getCentralDirectoryFileHeaderRecordPos(this.lookupIndex); this.centralRecord.copyTo(ZipContent.this.data, pos, result); return result; } catch (IOException ex) { throw new UncheckedIOException(ex); } }
Adapt the raw entry into a {@link ZipEntry} or {@link ZipEntry} subclass. @param <E> the entry type @param factory the factory used to create the {@link ZipEntry} @return a fully populated zip entry
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
834
[ "factory" ]
E
true
2
7.76
spring-projects/spring-boot
79,428
javadoc
false
factorize_from_iterables
def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]: """ A higher-level wrapper over `factorize_from_iterable`. Parameters ---------- iterables : list-like of list-likes Returns ------- codes : list of ndarrays categories : list of Indexes Notes ----- See `factorize_from_iterable` for more info. """ if len(iterables) == 0: # For consistency, it should return two empty lists. return [], [] codes, categories = zip( *(factorize_from_iterable(it) for it in iterables), strict=True, ) return list(codes), list(categories)
A higher-level wrapper over `factorize_from_iterable`. Parameters ---------- iterables : list-like of list-likes Returns ------- codes : list of ndarrays categories : list of Indexes Notes ----- See `factorize_from_iterable` for more info.
python
pandas/core/arrays/categorical.py
3,157
[ "iterables" ]
tuple[list[np.ndarray], list[Index]]
true
2
6.56
pandas-dev/pandas
47,362
numpy
false
get_instance
def get_instance(self, instance_id: str, filters: list | None = None): """ Get EC2 instance by id and return it. :param instance_id: id of the AWS EC2 instance :param filters: List of filters to specify instances to get :return: Instance object """ if self._api_type == "client_type": return self.get_instances(filters=filters, instance_ids=[instance_id])[0] return self.conn.Instance(id=instance_id)
Get EC2 instance by id and return it. :param instance_id: id of the AWS EC2 instance :param filters: List of filters to specify instances to get :return: Instance object
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/ec2.py
84
[ "self", "instance_id", "filters" ]
true
2
8.24
apache/airflow
43,597
sphinx
false
translate
public abstract boolean translate(int codePoint, Writer out) throws IOException;
Translate the specified code point into another. @param codePoint int character input to translate. @param out Writer to optionally push the translated output to. @return boolean as to whether translation occurred or not. @throws IOException if and only if the Writer produces an IOException.
java
src/main/java/org/apache/commons/lang3/text/translate/CodePointTranslator.java
60
[ "codePoint", "out" ]
true
1
6.32
apache/commons-lang
2,896
javadoc
false
rate_limit
def rate_limit(state, task_name, rate_limit, **kwargs): """Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.app.task.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit. """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. try: rate(rate_limit) except ValueError as exc: return nok(f'Invalid rate limit string: {exc!r}') try: state.app.tasks[task_name].rate_limit = rate_limit except KeyError: logger.error('Rate limit attempt for unknown task %s', task_name, exc_info=True) return nok('unknown task') state.consumer.reset_rate_limits() if not rate_limit: logger.info('Rate limits disabled for tasks of type %s', task_name) return ok('rate limit disabled successfully') logger.info('New rate limit for tasks of type %s: %s.', task_name, rate_limit) return ok('new rate limit set successfully')
Tell worker(s) to modify the rate limit for a task by type. See Also: :attr:`celery.app.task.Task.rate_limit`. Arguments: task_name (str): Type of task to set rate limit for. rate_limit (int, str): New rate limit.
python
celery/worker/control.py
252
[ "state", "task_name", "rate_limit" ]
false
2
6.24
celery/celery
27,741
google
false
getAll
public static Map<String, ConfigurationPropertiesBean> getAll(ApplicationContext applicationContext) { Assert.notNull(applicationContext, "'applicationContext' must not be null"); if (applicationContext instanceof ConfigurableApplicationContext configurableContext) { return getAll(configurableContext); } Map<String, ConfigurationPropertiesBean> propertiesBeans = new LinkedHashMap<>(); applicationContext.getBeansWithAnnotation(ConfigurationProperties.class).forEach((name, instance) -> { ConfigurationPropertiesBean propertiesBean = get(applicationContext, instance, name); if (propertiesBean != null) { propertiesBeans.put(name, propertiesBean); } }); return propertiesBeans; }
Return all {@link ConfigurationProperties @ConfigurationProperties} beans contained in the given application context. Both directly annotated beans, as well as beans that have {@link ConfigurationProperties @ConfigurationProperties} annotated factory methods are included. @param applicationContext the source application context @return a map of all configuration properties beans keyed by the bean name
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/ConfigurationPropertiesBean.java
137
[ "applicationContext" ]
true
3
7.28
spring-projects/spring-boot
79,428
javadoc
false
partitionChanged
private boolean partitionChanged(String topic, TopicInfo topicInfo, BuiltInPartitioner.StickyPartitionInfo partitionInfo, Deque<ProducerBatch> deque, long nowMs, Cluster cluster) { if (topicInfo.builtInPartitioner.isPartitionChanged(partitionInfo)) { log.trace("Partition {} for topic {} switched by a concurrent append, retrying", partitionInfo.partition(), topic); return true; } // We might have disabled partition switch if the queue had incomplete batches. // Check if all batches are full now and switch . if (allBatchesFull(deque)) { topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, 0, cluster, true); if (topicInfo.builtInPartitioner.isPartitionChanged(partitionInfo)) { log.trace("Completed previously disabled switch for topic {} partition {}, retrying", topic, partitionInfo.partition()); return true; } } return false; }
Check if partition concurrently changed, or we need to complete previously disabled partition change. @param topic The topic @param topicInfo The topic info @param partitionInfo The built-in partitioner's partition info @param deque The partition queue @param nowMs The current time, in milliseconds @param cluster THe cluster metadata @return 'true' if partition changed and we need to get new partition info and retry, 'false' otherwise
java
clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java
232
[ "topic", "topicInfo", "partitionInfo", "deque", "nowMs", "cluster" ]
true
4
7.76
apache/kafka
31,560
javadoc
false
sizeInBytes
public static int sizeInBytes(long baseOffset, Iterable<Record> records) { Iterator<Record> iterator = records.iterator(); if (!iterator.hasNext()) return 0; int size = RECORD_BATCH_OVERHEAD; Long baseTimestamp = null; while (iterator.hasNext()) { Record record = iterator.next(); int offsetDelta = (int) (record.offset() - baseOffset); if (baseTimestamp == null) baseTimestamp = record.timestamp(); long timestampDelta = record.timestamp() - baseTimestamp; size += DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()); } return size; }
Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas. @return The base timestamp
java
clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java
510
[ "baseOffset", "records" ]
true
4
6.88
apache/kafka
31,560
javadoc
false
bindCallExpressionFlow
function bindCallExpressionFlow(node: CallExpression | CallChain) { if (isOptionalChain(node)) { bindOptionalChainFlow(node); } else { // If the target of the call expression is a function expression or arrow function we have // an immediately invoked function expression (IIFE). Initialize the flowNode property to // the current control flow (which includes evaluation of the IIFE arguments). const expr = skipParentheses(node.expression); if (expr.kind === SyntaxKind.FunctionExpression || expr.kind === SyntaxKind.ArrowFunction) { bindEach(node.typeArguments); bindEach(node.arguments); bind(node.expression); } else { bindEachChild(node); if (node.expression.kind === SyntaxKind.SuperKeyword) { currentFlow = createFlowCall(currentFlow, node); } } } if (node.expression.kind === SyntaxKind.PropertyAccessExpression) { const propertyAccess = node.expression as PropertyAccessExpression; if (isIdentifier(propertyAccess.name) && isNarrowableOperand(propertyAccess.expression) && isPushOrUnshiftIdentifier(propertyAccess.name)) { currentFlow = createFlowMutation(FlowFlags.ArrayMutation, currentFlow, node); } } }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
2,226
[ "node" ]
false
11
6.08
microsoft/TypeScript
107,154
jsdoc
false
compute_sample_weight
def compute_sample_weight(class_weight, y, *, indices=None): """Estimate sample weights by class for unbalanced datasets. Parameters ---------- class_weight : dict, list of dicts, "balanced", or None Weights associated with classes in the form `{class_label: weight}`. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. Note that for multioutput (including multilabel) weights should be defined for each class of every column in its own dict. For example, for four-class multilabel classification weights should be `[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}]` instead of `[{1:1}, {2:5}, {3:1}, {4:1}]`. The `"balanced"` mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data: `n_samples / (n_classes * np.bincount(y))`. For multi-output, the weights of each column of y will be multiplied. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) Array of original class labels per sample. indices : array-like of shape (n_subsample,), default=None Array of indices to be used in a subsample. Can be of length less than `n_samples` in the case of a subsample, or equal to `n_samples` in the case of a bootstrap subsample with repeated indices. If `None`, the sample weight will be calculated over the full sample. Only `"balanced"` is supported for `class_weight` if this is provided. Returns ------- sample_weight_vect : ndarray of shape (n_samples,) Array with sample weights as applied to the original `y`. Examples -------- >>> from sklearn.utils.class_weight import compute_sample_weight >>> y = [1, 1, 1, 1, 0, 0] >>> compute_sample_weight(class_weight="balanced", y=y) array([0.75, 0.75, 0.75, 0.75, 1.5 , 1.5 ]) """ # Ensure y is 2D. Sparse matrices are already 2D. if not sparse.issparse(y): y = np.atleast_1d(y) if y.ndim == 1: y = np.reshape(y, (-1, 1)) n_outputs = y.shape[1] if indices is not None and class_weight != "balanced": raise ValueError( "The only valid class_weight for subsampling is 'balanced'. " f"Given {class_weight}." ) elif n_outputs > 1: if class_weight is None or isinstance(class_weight, dict): raise ValueError( "For multi-output, class_weight should be a list of dicts, or the " "string 'balanced'." ) elif isinstance(class_weight, list) and len(class_weight) != n_outputs: raise ValueError( "For multi-output, number of elements in class_weight should match " f"number of outputs. Got {len(class_weight)} element(s) while having " f"{n_outputs} outputs." ) expanded_class_weight = [] for k in range(n_outputs): if sparse.issparse(y): # Ok to densify a single column at a time y_full = y[:, [k]].toarray().flatten() else: y_full = y[:, k] classes_full = np.unique(y_full) classes_missing = None if class_weight == "balanced" or n_outputs == 1: class_weight_k = class_weight else: class_weight_k = class_weight[k] if indices is not None: # Get class weights for the subsample, covering all classes in # case some labels that were present in the original data are # missing from the sample. y_subsample = y_full[indices] classes_subsample = np.unique(y_subsample) weight_k = np.take( compute_class_weight( class_weight_k, classes=classes_subsample, y=y_subsample ), np.searchsorted(classes_subsample, classes_full), mode="clip", ) classes_missing = set(classes_full) - set(classes_subsample) else: weight_k = compute_class_weight( class_weight_k, classes=classes_full, y=y_full ) weight_k = weight_k[np.searchsorted(classes_full, y_full)] if classes_missing: # Make missing classes' weight zero weight_k[np.isin(y_full, list(classes_missing))] = 0.0 expanded_class_weight.append(weight_k) expanded_class_weight = np.prod(expanded_class_weight, axis=0, dtype=np.float64) return expanded_class_weight
Estimate sample weights by class for unbalanced datasets. Parameters ---------- class_weight : dict, list of dicts, "balanced", or None Weights associated with classes in the form `{class_label: weight}`. If not given, all classes are supposed to have weight one. For multi-output problems, a list of dicts can be provided in the same order as the columns of y. Note that for multioutput (including multilabel) weights should be defined for each class of every column in its own dict. For example, for four-class multilabel classification weights should be `[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}]` instead of `[{1:1}, {2:5}, {3:1}, {4:1}]`. The `"balanced"` mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data: `n_samples / (n_classes * np.bincount(y))`. For multi-output, the weights of each column of y will be multiplied. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs) Array of original class labels per sample. indices : array-like of shape (n_subsample,), default=None Array of indices to be used in a subsample. Can be of length less than `n_samples` in the case of a subsample, or equal to `n_samples` in the case of a bootstrap subsample with repeated indices. If `None`, the sample weight will be calculated over the full sample. Only `"balanced"` is supported for `class_weight` if this is provided. Returns ------- sample_weight_vect : ndarray of shape (n_samples,) Array with sample weights as applied to the original `y`. Examples -------- >>> from sklearn.utils.class_weight import compute_sample_weight >>> y = [1, 1, 1, 1, 0, 0] >>> compute_sample_weight(class_weight="balanced", y=y) array([0.75, 0.75, 0.75, 0.75, 1.5 , 1.5 ])
python
sklearn/utils/class_weight.py
114
[ "class_weight", "y", "indices" ]
false
19
6.4
scikit-learn/scikit-learn
64,340
numpy
false
send_mass_mail
def send_mass_mail( datatuple, *, fail_silently=False, auth_user=None, auth_password=None, connection=None, ): """ Given a datatuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. If auth_user and auth_password are set, use them to log in. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly. """ connection = connection or get_connection( username=auth_user, password=auth_password, fail_silently=fail_silently, ) messages = [ EmailMessage(subject, message, sender, recipient, connection=connection) for subject, message, sender, recipient in datatuple ] return connection.send_messages(messages)
Given a datatuple of (subject, message, from_email, recipient_list), send each message to each recipient list. Return the number of emails sent. If from_email is None, use the DEFAULT_FROM_EMAIL setting. If auth_user and auth_password are set, use them to log in. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly.
python
django/core/mail/__init__.py
120
[ "datatuple", "fail_silently", "auth_user", "auth_password", "connection" ]
false
2
6.08
django/django
86,204
unknown
false
ndim
def ndim(self) -> int: """ Number of dimensions of the underlying data, by definition 1. See Also -------- Series.size: Return the number of elements in the underlying data. Series.shape: Return a tuple of the shape of the underlying data. Series.dtype: Return the dtype object of the underlying data. Series.values: Return Series as ndarray or ndarray-like depending on the dtype. Examples -------- >>> s = pd.Series(["Ant", "Bear", "Cow"]) >>> s 0 Ant 1 Bear 2 Cow dtype: str >>> s.ndim 1 For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.ndim 1 """ return 1
Number of dimensions of the underlying data, by definition 1. See Also -------- Series.size: Return the number of elements in the underlying data. Series.shape: Return a tuple of the shape of the underlying data. Series.dtype: Return the dtype object of the underlying data. Series.values: Return Series as ndarray or ndarray-like depending on the dtype. Examples -------- >>> s = pd.Series(["Ant", "Bear", "Cow"]) >>> s 0 Ant 1 Bear 2 Cow dtype: str >>> s.ndim 1 For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.ndim 1
python
pandas/core/base.py
368
[ "self" ]
int
true
1
7.12
pandas-dev/pandas
47,362
unknown
false
downgrade
def downgrade(*, to_revision, from_revision=None, show_sql_only=False, session: Session = NEW_SESSION): """ Downgrade the airflow metastore schema to a prior version. :param to_revision: The alembic revision to downgrade *to*. :param show_sql_only: if True, print sql statements but do not run them :param from_revision: if supplied, alembic revision to dawngrade *from*. This may only be used in conjunction with ``sql=True`` because if we actually run the commands, we should only downgrade from the *current* revision. :param session: sqlalchemy session for connection to airflow metadata database """ if from_revision and not show_sql_only: raise ValueError( "`from_revision` can't be combined with `sql=False`. When actually " "applying a downgrade (instead of just generating sql), we always " "downgrade from current revision." ) if not settings.SQL_ALCHEMY_CONN: raise RuntimeError("The settings.SQL_ALCHEMY_CONN not set.") # alembic adds significant import time, so we import it lazily from alembic import command log.info("Attempting downgrade to revision %s", to_revision) config = _get_alembic_config() # If downgrading to less than 3.0.0, we need to handle the FAB provider if _revision_greater(config, _REVISION_HEADS_MAP["2.10.3"], to_revision): _handle_fab_downgrade(session=session) with create_global_lock(session=session, lock=DBLocks.MIGRATIONS): if show_sql_only: log.warning("Generating sql scripts for manual migration.") if not from_revision: from_revision = _get_current_revision(session) revision_range = f"{from_revision}:{to_revision}" _offline_migration(command.downgrade, config=config, revision=revision_range) else: log.info("Applying downgrade migrations to Airflow database.") command.downgrade(config, revision=to_revision, sql=show_sql_only)
Downgrade the airflow metastore schema to a prior version. :param to_revision: The alembic revision to downgrade *to*. :param show_sql_only: if True, print sql statements but do not run them :param from_revision: if supplied, alembic revision to dawngrade *from*. This may only be used in conjunction with ``sql=True`` because if we actually run the commands, we should only downgrade from the *current* revision. :param session: sqlalchemy session for connection to airflow metadata database
python
airflow-core/src/airflow/utils/db.py
1,180
[ "to_revision", "from_revision", "show_sql_only", "session" ]
true
8
6.88
apache/airflow
43,597
sphinx
false
getGenericInterfaces
final ImmutableList<TypeToken<? super T>> getGenericInterfaces() { if (runtimeType instanceof TypeVariable) { return boundsAsInterfaces(((TypeVariable<?>) runtimeType).getBounds()); } if (runtimeType instanceof WildcardType) { return boundsAsInterfaces(((WildcardType) runtimeType).getUpperBounds()); } ImmutableList.Builder<TypeToken<? super T>> builder = ImmutableList.builder(); for (Type interfaceType : getRawType().getGenericInterfaces()) { @SuppressWarnings("unchecked") // interface of T TypeToken<? super T> resolvedInterface = (TypeToken<? super T>) resolveSupertype(interfaceType); builder.add(resolvedInterface); } return builder.build(); }
Returns the generic interfaces that this type directly {@code implements}. This method is similar but different from {@link Class#getGenericInterfaces()}. For example, {@code new TypeToken<List<String>>() {}.getGenericInterfaces()} will return a list that contains {@code new TypeToken<Iterable<String>>() {}}; while {@code List.class.getGenericInterfaces()} will return an array that contains {@code Iterable<T>}, where the {@code T} is the type variable declared by interface {@code Iterable}. <p>If this type is a type variable or wildcard, its upper bounds are examined and those that are either an interface or upper-bounded only by interfaces are returned. This means that the returned types could include type variables too.
java
android/guava/src/com/google/common/reflect/TypeToken.java
355
[]
true
3
6.24
google/guava
51,352
javadoc
false
flatten
def flatten(self) -> Series: """ Flatten list values. Returns ------- pandas.Series The data from all lists in the series flattened. See Also -------- ListAccessor.__getitem__ : Index or slice values in the Series. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... [1, 2, 3], ... [3], ... ], ... dtype=pd.ArrowDtype(pa.list_(pa.int64())), ... ) >>> s.list.flatten() 0 1 0 2 0 3 1 3 dtype: int64[pyarrow] """ from pandas import Series counts = pa.compute.list_value_length(self._pa_array) flattened = pa.compute.list_flatten(self._pa_array) index = self._data.index.repeat(counts.fill_null(pa.scalar(0, counts.type))) return Series( flattened, dtype=ArrowDtype(flattened.type), index=index, name=self._data.name, )
Flatten list values. Returns ------- pandas.Series The data from all lists in the series flattened. See Also -------- ListAccessor.__getitem__ : Index or slice values in the Series. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... [1, 2, 3], ... [3], ... ], ... dtype=pd.ArrowDtype(pa.list_(pa.int64())), ... ) >>> s.list.flatten() 0 1 0 2 0 3 1 3 dtype: int64[pyarrow]
python
pandas/core/arrays/arrow/accessors.py
194
[ "self" ]
Series
true
1
7.28
pandas-dev/pandas
47,362
unknown
false
cp
function cp(src, dest, options, callback) { if (typeof options === 'function') { callback = options; options = undefined; } callback = makeCallback(callback); options = validateCpOptions(options); src = getValidatedPath(src, 'src'); dest = getValidatedPath(dest, 'dest'); lazyLoadCp(); cpFn(src, dest, options, callback); }
Asynchronously copies `src` to `dest`. `src` can be a file, directory, or symlink. The contents of directories will be copied recursively. @param {string | URL} src @param {string | URL} dest @param {object} [options] @param {(err?: Error) => any} callback @returns {void}
javascript
lib/fs.js
3,098
[ "src", "dest", "options", "callback" ]
false
2
6.24
nodejs/node
114,839
jsdoc
false
getTempDirectory
private Path getTempDirectory() { String property = System.getProperty("java.io.tmpdir"); Assert.state(StringUtils.hasLength(property), "No 'java.io.tmpdir' property set"); Path tempDirectory = Paths.get(property); Assert.state(Files.exists(tempDirectory), () -> "Temp directory '" + tempDirectory + "' does not exist"); Assert.state(Files.isDirectory(tempDirectory), () -> "Temp location '" + tempDirectory + "' is not a directory"); return tempDirectory; }
Return a subdirectory of the application temp. @param subDir the subdirectory name @return a subdirectory
java
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationTemp.java
135
[]
Path
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
identityHashCodeHex
public static String identityHashCodeHex(final Object object) { return Integer.toHexString(System.identityHashCode(object)); }
Returns the hexadecimal hash code for the given object per {@link System#identityHashCode(Object)}. <p> Short hand for {@code Integer.toHexString(System.identityHashCode(object))}. </p> @param object object for which the hashCode is to be calculated. @return Hash code in hexadecimal format. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/ObjectUtils.java
774
[ "object" ]
String
true
1
6.16
apache/commons-lang
2,896
javadoc
false
putBytes
@CanIgnoreReturnValue PrimitiveSink putBytes(ByteBuffer bytes);
Puts the remaining bytes of a byte buffer into this sink. {@code bytes.position()} is the first byte written, {@code bytes.limit() - 1} is the last. The position of the buffer will be equal to the limit when this method returns. @param bytes a byte buffer @return this instance @since 23.0
java
android/guava/src/com/google/common/hash/PrimitiveSink.java
71
[ "bytes" ]
PrimitiveSink
true
1
6.8
google/guava
51,352
javadoc
false
symbolFlagsHaveMeaning
function symbolFlagsHaveMeaning(flags: SymbolFlags, meaning: SemanticMeaning): boolean { return meaning === SemanticMeaning.All ? true : meaning & SemanticMeaning.Value ? !!(flags & SymbolFlags.Value) : meaning & SemanticMeaning.Type ? !!(flags & SymbolFlags.Type) : meaning & SemanticMeaning.Namespace ? !!(flags & SymbolFlags.Namespace) : false; }
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`. (In other words, do not allow `const x = require("...")` for JS files.) @internal
typescript
src/services/codefixes/importFixes.ts
2,130
[ "flags", "meaning" ]
true
5
6.56
microsoft/TypeScript
107,154
jsdoc
false
estimateNNDescentMemory
static long estimateNNDescentMemory(int numVectors, int dims, CuVSMatrix.DataType dataType) { int elementTypeBytes = switch (dataType) { case FLOAT -> Float.BYTES; case INT, UINT -> Integer.BYTES; case BYTE -> Byte.BYTES; }; return (long) (GPU_COMPUTATION_MEMORY_FACTOR * numVectors * dims * elementTypeBytes); }
Estimates the required GPU memory for building an index using the NN_DESCENT algorithm. @param numVectors the number of vectors @param dims the dimensionality of vectors @param dataType the data type of the vectors @return the estimated memory in bytes needed for NN_DESCENT
java
libs/gpu-codec/src/main/java/org/elasticsearch/gpu/codec/CuVSResourceManager.java
74
[ "numVectors", "dims", "dataType" ]
true
1
6.4
elastic/elasticsearch
75,680
javadoc
false
memoizeWithExpiration
@J2ktIncompatible @GwtIncompatible // java.time.Duration @IgnoreJRERequirement public static <T extends @Nullable Object> Supplier<T> memoizeWithExpiration( Supplier<T> delegate, Duration duration) { checkNotNull(delegate); // The alternative of `duration.compareTo(Duration.ZERO) > 0` causes J2ObjC trouble. checkArgument( !duration.isNegative() && !duration.isZero(), "duration (%s) must be > 0", duration); return new ExpiringMemoizingSupplier<>(delegate, toNanosSaturated(duration)); }
Returns a supplier that caches the instance supplied by the delegate and removes the cached value after the specified time has passed. Subsequent calls to {@code get()} return the cached value if the expiration time has not passed. After the expiration time, a new value is retrieved, cached, and returned. See: <a href="http://en.wikipedia.org/wiki/Memoization">memoization</a> <p>The returned supplier is thread-safe. The supplier's serialized form does not contain the cached value, which will be recalculated when {@code get()} is called on the reserialized instance. The actual memoization does not happen when the underlying delegate throws an exception. <p>When the underlying delegate throws an exception then this memoizing supplier will keep delegating calls until it returns valid data. @param duration the length of time after a value is created that it should stop being returned by subsequent {@code get()} calls @throws IllegalArgumentException if {@code duration} is not positive @since 33.1.0
java
android/guava/src/com/google/common/base/Suppliers.java
272
[ "delegate", "duration" ]
true
2
6.88
google/guava
51,352
javadoc
false
isAccessible
private static boolean isAccessible(final Class<?> type) { Class<?> cls = type; while (cls != null) { if (!ClassUtils.isPublic(cls)) { return false; } cls = cls.getEnclosingClass(); } return true; }
Tests whether the specified class is generally accessible, i.e. is declared in an entirely {@code public} manner. @param type to check. @return {@code true} if {@code type} and any enclosing classes are {@code public}. @throws SecurityException Thrown if a security manager is present and a caller's class loader is not the same as or an ancestor of the class loader for a class and invocation of {@link SecurityManager#checkPackageAccess(String)} denies access to the package of the class.
java
src/main/java/org/apache/commons/lang3/reflect/ConstructorUtils.java
312
[ "type" ]
true
3
7.92
apache/commons-lang
2,896
javadoc
false
update_providers_with_next_version_comment
def update_providers_with_next_version_comment() -> dict[str, dict[str, Any]]: """ Scan all provider pyproject.toml files for "# use next version" comments and update the version of the referenced provider to the current version from that provider's pyproject.toml. Returns a dictionary with information about updated providers. """ updates_made: dict[str, dict[str, Any]] = {} # Find all provider pyproject.toml files provider_pyproject_files = list(AIRFLOW_PROVIDERS_ROOT_PATH.glob("**/pyproject.toml")) for pyproject_file in provider_pyproject_files: content = pyproject_file.read_text() lines = content.split("\n") updated_lines = [] file_modified = False for line in lines: # Check if line contains "# use next version" comment (but not the dependencies declaration line) if "# use next version" in line and "dependencies = [" not in line: processed_line, was_modified = _process_line_with_next_version_comment( line, pyproject_file, updates_made ) updated_lines.append(processed_line) file_modified = file_modified or was_modified else: updated_lines.append(line) # Write back if modified if file_modified: new_content = "\n".join(updated_lines) pyproject_file.write_text(new_content) get_console().print( f"[success]Updated {pyproject_file.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)}\n" ) return updates_made
Scan all provider pyproject.toml files for "# use next version" comments and update the version of the referenced provider to the current version from that provider's pyproject.toml. Returns a dictionary with information about updated providers.
python
dev/breeze/src/airflow_breeze/utils/packages.py
1,300
[]
dict[str, dict[str, Any]]
true
8
6.88
apache/airflow
43,597
unknown
false
canShortcutWithSource
boolean canShortcutWithSource(ElementType requiredType, ElementType alternativeType) { if (this.resolved != null) { return false; } for (int i = 0; i < this.size; i++) { ElementType type = this.type[i]; if (type != requiredType && type != alternativeType) { return false; } if (i > 0 && this.end[i - 1] + 1 != this.start[i]) { return false; } } return true; }
Returns if the element source can be used as a shortcut for an operation such as {@code equals} or {@code toString}. @param requiredType the required type @param alternativeType and alternative required type @return {@code true} if all elements match at least one of the types
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
962
[ "requiredType", "alternativeType" ]
true
7
7.76
spring-projects/spring-boot
79,428
javadoc
false
xp_assert_close
def xp_assert_close( actual: Array, desired: Array, *, rtol: float | None = None, atol: float = 0, err_msg: str = "", check_dtype: bool = True, check_shape: bool = True, check_scalar: bool = False, ) -> None: """ Array-API compatible version of `np.testing.assert_allclose`. Parameters ---------- actual : Array The array produced by the tested function. desired : Array The expected array (typically hardcoded). rtol : float, optional Relative tolerance. Default: dtype-dependent. atol : float, optional Absolute tolerance. Default: 0. err_msg : str, optional Error message to display on failure. check_dtype, check_shape : bool, default: True Whether to check agreement between actual and desired dtypes and shapes check_scalar : bool, default: False NumPy only: whether to check agreement between actual and desired types - 0d array vs scalar. See Also -------- xp_assert_equal : Similar function for exact equality checks. isclose : Public function for checking closeness. numpy.testing.assert_allclose : Similar function for NumPy arrays. Notes ----- The default `atol` and `rtol` differ from `xp.all(xpx.isclose(a, b))`. """ xp = _check_ns_shape_dtype(actual, desired, check_dtype, check_shape, check_scalar) if not _is_materializable(actual): return if rtol is None: if xp.isdtype(actual.dtype, ("real floating", "complex floating")): # multiplier of 4 is used as for `np.float64` this puts the default `rtol` # roughly half way between sqrt(eps) and the default for # `numpy.testing.assert_allclose`, 1e-7 rtol = xp.finfo(actual.dtype).eps ** 0.5 * 4 else: rtol = 1e-7 actual_np = as_numpy_array(actual, xp=xp) desired_np = as_numpy_array(desired, xp=xp) np.testing.assert_allclose( # pyright: ignore[reportCallIssue] actual_np, desired_np, rtol=rtol, # pyright: ignore[reportArgumentType] atol=atol, err_msg=err_msg, )
Array-API compatible version of `np.testing.assert_allclose`. Parameters ---------- actual : Array The array produced by the tested function. desired : Array The expected array (typically hardcoded). rtol : float, optional Relative tolerance. Default: dtype-dependent. atol : float, optional Absolute tolerance. Default: 0. err_msg : str, optional Error message to display on failure. check_dtype, check_shape : bool, default: True Whether to check agreement between actual and desired dtypes and shapes check_scalar : bool, default: False NumPy only: whether to check agreement between actual and desired types - 0d array vs scalar. See Also -------- xp_assert_equal : Similar function for exact equality checks. isclose : Public function for checking closeness. numpy.testing.assert_allclose : Similar function for NumPy arrays. Notes ----- The default `atol` and `rtol` differ from `xp.all(xpx.isclose(a, b))`.
python
sklearn/externals/array_api_extra/_lib/_testing.py
213
[ "actual", "desired", "rtol", "atol", "err_msg", "check_dtype", "check_shape", "check_scalar" ]
None
true
5
6.32
scikit-learn/scikit-learn
64,340
numpy
false
createTargetSource
protected TargetSource createTargetSource(Object target) { if (target instanceof TargetSource targetSource) { return targetSource; } else { return new SingletonTargetSource(target); } }
Determine a TargetSource for the given target (or TargetSource). @param target the target. If this is an implementation of TargetSource it is used as our TargetSource; otherwise it is wrapped in a SingletonTargetSource. @return a TargetSource for this object
java
spring-aop/src/main/java/org/springframework/aop/framework/AbstractSingletonProxyFactoryBean.java
191
[ "target" ]
TargetSource
true
2
7.92
spring-projects/spring-framework
59,386
javadoc
false
isTopLevelLogicalExpression
function isTopLevelLogicalExpression(node: Node): boolean { while ( isParenthesizedExpression(node.parent) || isPrefixUnaryExpression(node.parent) && node.parent.operator === SyntaxKind.ExclamationToken ) { node = node.parent; } return !isStatementCondition(node) && !isLogicalExpression(node.parent) && !(isOptionalChain(node.parent) && node.parent.expression === node); }
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names. @param symbolTable - The symbol table which node will be added to. @param parent - node's parent declaration. @param node - The declaration to be added to the symbol table @param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.) @param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
typescript
src/compiler/binder.ts
1,464
[ "node" ]
true
7
6.72
microsoft/TypeScript
107,154
jsdoc
false
matchesNoneOf
public boolean matchesNoneOf(CharSequence sequence) { return indexIn(sequence) == -1; }
Returns {@code true} if a character sequence contains no matching BMP characters. Equivalent to {@code !matchesAnyOf(sequence)}. <p>The default implementation iterates over the sequence, invoking {@link #matches} for each character, until this returns {@code true} or the end is reached. @param sequence the character sequence to examine, possibly empty @return {@code true} if this matcher matches no characters in the sequence, including when the sequence is empty
java
android/guava/src/com/google/common/base/CharMatcher.java
530
[ "sequence" ]
true
1
6.32
google/guava
51,352
javadoc
false
create
static Archive create(Class<?> target) throws Exception { return create(target.getProtectionDomain()); }
Factory method to create an appropriate {@link Archive} from the given {@link Class} target. @param target a target class that will be used to find the archive code source @return an new {@link Archive} instance @throws Exception if the archive cannot be created
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/launch/Archive.java
104
[ "target" ]
Archive
true
1
6.8
spring-projects/spring-boot
79,428
javadoc
false
equals
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (obj instanceof ErrorPage other) { return ObjectUtils.nullSafeEquals(getExceptionName(), other.getExceptionName()) && ObjectUtils.nullSafeEquals(this.path, other.path) && this.status == other.status; } return false; }
Return if this error page is a global one (matches all unmatched status and exception types). @return if this is a global error page
java
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorPage.java
109
[ "obj" ]
true
6
7.04
spring-projects/spring-boot
79,428
javadoc
false
drainKeyReferenceQueue
@GuardedBy("this") void drainKeyReferenceQueue() { Reference<? extends K> ref; int i = 0; while ((ref = keyReferenceQueue.poll()) != null) { @SuppressWarnings("unchecked") ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref; map.reclaimKey(entry); if (++i == DRAIN_MAX) { break; } } }
Drain the key and value reference queues, cleaning up internal entries containing garbage collected keys or values.
java
android/guava/src/com/google/common/cache/LocalCache.java
2,389
[]
void
true
3
6.88
google/guava
51,352
javadoc
false
endCodePath
function endCodePath() { let codePath = analyzer.codePath; // Mark the current path as the final node. CodePath.getState(codePath).makeFinal(); // Emits onCodePathSegmentEnd event of the current segments. leaveFromCurrentSegment(analyzer, node); // Emits onCodePathEnd event of this code path. analyzer.emitter.emit('onCodePathEnd', codePath, node); codePath = analyzer.codePath = analyzer.codePath.upper; }
Ends the code path for the current node. @returns {void}
javascript
packages/eslint-plugin-react-hooks/src/code-path-analysis/code-path-analyzer.js
655
[]
false
1
6.08
facebook/react
241,750
jsdoc
false
getProvider
public @Nullable TemplateAvailabilityProvider getProvider(String view, Environment environment, ClassLoader classLoader, ResourceLoader resourceLoader) { Assert.notNull(view, "'view' must not be null"); Assert.notNull(environment, "'environment' must not be null"); Assert.notNull(classLoader, "'classLoader' must not be null"); Assert.notNull(resourceLoader, "'resourceLoader' must not be null"); Boolean useCache = environment.getProperty("spring.template.provider.cache", Boolean.class, true); if (!useCache) { return findProvider(view, environment, classLoader, resourceLoader); } TemplateAvailabilityProvider provider = this.resolved.get(view); if (provider == null) { synchronized (this.cache) { provider = findProvider(view, environment, classLoader, resourceLoader); provider = (provider != null) ? provider : NONE; this.resolved.put(view, provider); this.cache.put(view, provider); } } return (provider != NONE) ? provider : null; }
Get the provider that can be used to render the given view. @param view the view to render @param environment the environment @param classLoader the class loader @param resourceLoader the resource loader @return a {@link TemplateAvailabilityProvider} or null
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/template/TemplateAvailabilityProviders.java
134
[ "view", "environment", "classLoader", "resourceLoader" ]
TemplateAvailabilityProvider
true
5
7.76
spring-projects/spring-boot
79,428
javadoc
false
check_allowed_values
def check_allowed_values(env_name: str, env_value: str) -> None: """ Check if value of provided environment variable is within a specified set of values. :param env_name: name of the environment variable which is being checked. :param env_value: value of the variable. :raises: ValueError: if env_value is not within a specified set of values """ if env_value not in values: raise ValueError( f"{env_name} value must be one of the following: {values}. Received: '{env_value}'." )
Check if value of provided environment variable is within a specified set of values. :param env_name: name of the environment variable which is being checked. :param env_value: value of the variable. :raises: ValueError: if env_value is not within a specified set of values
python
performance/src/performance_dags/performance_dag/performance_dag_utils.py
299
[ "env_name", "env_value" ]
None
true
2
6.56
apache/airflow
43,597
sphinx
false
count
def count(a, sub, start=0, end=None): """ Returns an array with the number of non-overlapping occurrences of substring ``sub`` in the range [``start``, ``end``). Parameters ---------- a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype The range to look in, interpreted as in slice notation. Returns ------- y : ndarray Output array of ints See Also -------- str.count Examples -------- >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') >>> np.strings.count(c, 'A') array([3, 1, 1]) >>> np.strings.count(c, 'aA') array([3, 1, 0]) >>> np.strings.count(c, 'A', start=1, end=4) array([2, 1, 1]) >>> np.strings.count(c, 'A', start=1, end=3) array([1, 0, 0]) """ end = end if end is not None else MAX return _count_ufunc(a, sub, start, end)
Returns an array with the number of non-overlapping occurrences of substring ``sub`` in the range [``start``, ``end``). Parameters ---------- a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype The range to look in, interpreted as in slice notation. Returns ------- y : ndarray Output array of ints See Also -------- str.count Examples -------- >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') >>> np.strings.count(c, 'A') array([3, 1, 1]) >>> np.strings.count(c, 'aA') array([3, 1, 0]) >>> np.strings.count(c, 'A', start=1, end=4) array([2, 1, 1]) >>> np.strings.count(c, 'A', start=1, end=3) array([1, 0, 0])
python
numpy/_core/strings.py
405
[ "a", "sub", "start", "end" ]
false
2
7.68
numpy/numpy
31,054
numpy
false
isQuote
private boolean isQuote(final char[] srcChars, final int pos, final int len, final int quoteStart, final int quoteLen) { for (int i = 0; i < quoteLen; i++) { if (pos + i >= len || srcChars[pos + i] != srcChars[quoteStart + i]) { return false; } } return true; }
Checks if the characters at the index specified match the quote already matched in readNextToken(). @param srcChars the character array being tokenized. @param pos the position to check for a quote. @param len the length of the character array being tokenized. @param quoteStart the start position of the matched quote, 0 if no quoting. @param quoteLen the length of the matched quote, 0 if no quoting. @return true if a quote is matched.
java
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
615
[ "srcChars", "pos", "len", "quoteStart", "quoteLen" ]
true
4
8.08
apache/commons-lang
2,896
javadoc
false
rebalanceInProgress
protected synchronized boolean rebalanceInProgress() { return this.state == MemberState.PREPARING_REBALANCE || this.state == MemberState.COMPLETING_REBALANCE; }
Get the current generation state if the group is stable, otherwise return null @return the current generation or null
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
1,053
[]
true
2
7.52
apache/kafka
31,560
javadoc
false
obtainMethodParameter
protected final MethodParameter obtainMethodParameter() { Assert.state(this.methodParameter != null, "MethodParameter is not available"); return this.methodParameter; }
Return the wrapped MethodParameter, assuming it is present. @return the MethodParameter (never {@code null}) @throws IllegalStateException if no MethodParameter is available @since 5.0
java
spring-beans/src/main/java/org/springframework/beans/factory/InjectionPoint.java
113
[]
MethodParameter
true
1
6
spring-projects/spring-framework
59,386
javadoc
false
diag_indices
def diag_indices(n, ndim=2): """ Return the indices to access the main diagonal of an array. This returns a tuple of indices that can be used to access the main diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``. Parameters ---------- n : int The size, along each dimension, of the arrays for which the returned indices can be used. ndim : int, optional The number of dimensions. See Also -------- diag_indices_from Examples -------- >>> import numpy as np Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> a[di] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) Now, we create indices to manipulate a 3-D array: >>> d3 = np.diag_indices(2, 3) >>> d3 (array([0, 1]), array([0, 1]), array([0, 1])) And use it to set the diagonal of an array of zeros to 1: >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) """ idx = np.arange(n) return (idx,) * ndim
Return the indices to access the main diagonal of an array. This returns a tuple of indices that can be used to access the main diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``. Parameters ---------- n : int The size, along each dimension, of the arrays for which the returned indices can be used. ndim : int, optional The number of dimensions. See Also -------- diag_indices_from Examples -------- >>> import numpy as np Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) >>> di (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> a[di] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) Now, we create indices to manipulate a 3-D array: >>> d3 = np.diag_indices(2, 3) >>> d3 (array([0, 1]), array([0, 1]), array([0, 1])) And use it to set the diagonal of an array of zeros to 1: >>> a = np.zeros((2, 2, 2), dtype=np.int_) >>> a[d3] = 1 >>> a array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]])
python
numpy/lib/_index_tricks_impl.py
927
[ "n", "ndim" ]
false
1
6.4
numpy/numpy
31,054
numpy
false
selectImports
protected abstract String @Nullable [] selectImports(AdviceMode adviceMode);
Determine which classes should be imported based on the given {@code AdviceMode}. <p>Returning {@code null} from this method indicates that the {@code AdviceMode} could not be handled or was unknown and that an {@code IllegalArgumentException} should be thrown. @param adviceMode the value of the {@linkplain #getAdviceModeAttributeName() advice mode attribute} for the annotation specified via generics. @return array containing classes to import (empty array if none; {@code null} if the given {@code AdviceMode} is unknown)
java
spring-context/src/main/java/org/springframework/context/annotation/AdviceModeImportSelector.java
96
[ "adviceMode" ]
true
1
6.16
spring-projects/spring-framework
59,386
javadoc
false
drain
@CanIgnoreReturnValue @J2ktIncompatible @GwtIncompatible // BlockingQueue @IgnoreJRERequirement // Users will use this only if they're already using Duration public static <E> int drain( BlockingQueue<E> q, Collection<? super E> buffer, int numElements, Duration timeout) throws InterruptedException { return drain(q, buffer, numElements, toNanosSaturated(timeout), NANOSECONDS); }
Drains the queue as {@link BlockingQueue#drainTo(Collection, int)}, but if the requested {@code numElements} elements are not available, it will wait for them up to the specified timeout. @param q the blocking queue to be drained @param buffer where to add the transferred elements @param numElements the number of elements to be waited for @param timeout how long to wait before giving up @return the number of elements transferred @throws InterruptedException if interrupted while waiting @since 33.4.0 (but since 28.0 in the JRE flavor)
java
android/guava/src/com/google/common/collect/Queues.java
292
[ "q", "buffer", "numElements", "timeout" ]
true
1
6.72
google/guava
51,352
javadoc
false
add
public void add(TopicIdPartition partition, ShareInFlightBatch<K, V> batch) { Objects.requireNonNull(batch); ShareInFlightBatch<K, V> currentBatch = this.batches.get(partition); if (currentBatch == null) { this.batches.put(partition, batch); } else { // This case shouldn't usually happen because we only send one fetch at a time per partition, // but it might conceivably happen in some rare cases (such as partition leader changes). currentBatch.merge(batch); } if (batch.getAcquisitionLockTimeoutMs().isPresent()) { acquisitionLockTimeoutMs = batch.getAcquisitionLockTimeoutMs(); } }
Add another {@link ShareInFlightBatch} to this one; all of its records will be added to this object's {@link #records() records}. @param partition the topic-partition @param batch the batch to add; may not be null
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java
65
[ "partition", "batch" ]
void
true
3
6.88
apache/kafka
31,560
javadoc
false
createJarFileForStream
private JarFile createJarFileForStream(URL url, Version version, Consumer<JarFile> closeAction) throws IOException { try (InputStream in = url.openStream()) { return createJarFileForStream(in, version, closeAction); } }
Create a new {@link UrlJarFile} or {@link UrlNestedJarFile} instance. @param jarFileUrl the jar file URL @param closeAction the action to call when the file is closed @return a new {@link JarFile} instance @throws IOException on I/O error
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/jar/UrlJarFileFactory.java
89
[ "url", "version", "closeAction" ]
JarFile
true
1
6.72
spring-projects/spring-boot
79,428
javadoc
false
mapdomain
def mapdomain(x, old, new): """ Apply linear map to input points. The linear map ``offset + scale*x`` that maps the domain `old` to the domain `new` is applied to the points `x`. Parameters ---------- x : array_like Points to be mapped. If `x` is a subtype of ndarray the subtype will be preserved. old, new : array_like The two domains that determine the map. Each must (successfully) convert to 1-d arrays containing precisely two values. Returns ------- x_out : ndarray Array of points of the same shape as `x`, after application of the linear map between the two domains. See Also -------- getdomain, mapparms Notes ----- Effectively, this implements: .. math:: x\\_out = new[0] + m(x - old[0]) where .. math:: m = \\frac{new[1]-new[0]}{old[1]-old[0]} Examples -------- >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) >>> x = np.linspace(-1,1,6); x array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary 6.28318531]) >>> x - pu.mapdomain(x_out, new_domain, old_domain) array([0., 0., 0., 0., 0., 0.]) Also works for complex numbers (and thus can be used to map any line in the complex plane to any other line therein). >>> i = complex(0,1) >>> old = (-1 - i, 1 + i) >>> new = (-1 + i, 1 - i) >>> z = np.linspace(old[0], old[1], 6); z array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) >>> new_z = pu.mapdomain(z, old, new); new_z array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary """ if type(x) not in (int, float, complex) and not isinstance(x, np.generic): x = np.asanyarray(x) off, scl = mapparms(old, new) return off + scl * x
Apply linear map to input points. The linear map ``offset + scale*x`` that maps the domain `old` to the domain `new` is applied to the points `x`. Parameters ---------- x : array_like Points to be mapped. If `x` is a subtype of ndarray the subtype will be preserved. old, new : array_like The two domains that determine the map. Each must (successfully) convert to 1-d arrays containing precisely two values. Returns ------- x_out : ndarray Array of points of the same shape as `x`, after application of the linear map between the two domains. See Also -------- getdomain, mapparms Notes ----- Effectively, this implements: .. math:: x\\_out = new[0] + m(x - old[0]) where .. math:: m = \\frac{new[1]-new[0]}{old[1]-old[0]} Examples -------- >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) >>> x = np.linspace(-1,1,6); x array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary 6.28318531]) >>> x - pu.mapdomain(x_out, new_domain, old_domain) array([0., 0., 0., 0., 0., 0.]) Also works for complex numbers (and thus can be used to map any line in the complex plane to any other line therein). >>> i = complex(0,1) >>> old = (-1 - i, 1 + i) >>> new = (-1 + i, 1 - i) >>> z = np.linspace(old[0], old[1], 6); z array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) >>> new_z = pu.mapdomain(z, old, new); new_z array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary
python
numpy/polynomial/polyutils.py
288
[ "x", "old", "new" ]
false
3
7.6
numpy/numpy
31,054
numpy
false
of
public static <T extends Comparable<? super T>> Range<T> of(final T fromInclusive, final T toInclusive) { return of(fromInclusive, toInclusive, null); }
Creates a range with the specified minimum and maximum values (both inclusive). <p>The range uses the natural ordering of the elements to determine where values lie in the range.</p> <p>The arguments may be passed in the order (min,max) or (max,min). The getMinimum and getMaximum methods will return the correct values.</p> @param <T> the type of the elements in this range. @param fromInclusive the first value that defines the edge of the range, inclusive. @param toInclusive the second value that defines the edge of the range, inclusive. @return the range object, not null. @throws NullPointerException if either element is null. @throws ClassCastException if the elements are not {@link Comparable}. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/Range.java
161
[ "fromInclusive", "toInclusive" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
getNewImports
function getNewImports( moduleSpecifier: string, quotePreference: QuotePreference, defaultImport: Import | undefined, namedImports: readonly Import[] | undefined, namespaceLikeImport: Import & { importKind: ImportKind.CommonJS | ImportKind.Namespace; } | undefined, compilerOptions: CompilerOptions, preferences: UserPreferences, ): AnyImportSyntax | readonly AnyImportSyntax[] { const quotedModuleSpecifier = makeStringLiteral(moduleSpecifier, quotePreference); let statements: AnyImportSyntax | readonly AnyImportSyntax[] | undefined; if (defaultImport !== undefined || namedImports?.length) { // `verbatimModuleSyntax` should prefer top-level `import type` - // even though it's not an error, it would add unnecessary runtime emit. const topLevelTypeOnly = (!defaultImport || needsTypeOnly(defaultImport)) && every(namedImports, needsTypeOnly) || (compilerOptions.verbatimModuleSyntax || preferences.preferTypeOnlyAutoImports) && defaultImport?.addAsTypeOnly !== AddAsTypeOnly.NotAllowed && !some(namedImports, i => i.addAsTypeOnly === AddAsTypeOnly.NotAllowed); statements = combine( statements, makeImport( defaultImport && factory.createIdentifier(defaultImport.name), namedImports?.map(namedImport => factory.createImportSpecifier( !topLevelTypeOnly && shouldUseTypeOnly(namedImport, preferences), namedImport.propertyName === undefined ? undefined : factory.createIdentifier(namedImport.propertyName), factory.createIdentifier(namedImport.name), ) ), moduleSpecifier, quotePreference, topLevelTypeOnly, ), ); } if (namespaceLikeImport) { const declaration = namespaceLikeImport.importKind === ImportKind.CommonJS ? factory.createImportEqualsDeclaration( /*modifiers*/ undefined, shouldUseTypeOnly(namespaceLikeImport, preferences), factory.createIdentifier(namespaceLikeImport.name), factory.createExternalModuleReference(quotedModuleSpecifier), ) : factory.createImportDeclaration( /*modifiers*/ undefined, factory.createImportClause( shouldUseTypeOnly(namespaceLikeImport, preferences) ? SyntaxKind.TypeKeyword : undefined, /*name*/ undefined, factory.createNamespaceImport(factory.createIdentifier(namespaceLikeImport.name)), ), quotedModuleSpecifier, /*attributes*/ undefined, ); statements = combine(statements, declaration); } return Debug.checkDefined(statements); }
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`. (In other words, do not allow `const x = require("...")` for JS files.) @internal
typescript
src/services/codefixes/importFixes.ts
2,037
[ "moduleSpecifier", "quotePreference", "defaultImport", "namedImports", "namespaceLikeImport", "compilerOptions", "preferences" ]
true
15
6.64
microsoft/TypeScript
107,154
jsdoc
false
readDeclaredStaticField
public static Object readDeclaredStaticField(final Class<?> cls, final String fieldName, final boolean forceAccess) throws IllegalAccessException { final Field field = getDeclaredField(cls, fieldName, forceAccess); Validate.notNull(field, "Cannot locate declared field %s.%s", cls.getName(), fieldName); // already forced access above, don't repeat it here: return readStaticField(field, false); }
Gets the value of a {@code static} {@link Field} by name. Only the specified class will be considered. @param cls the {@link Class} to reflect, must not be {@code null}. @param fieldName the field name to obtain. @param forceAccess whether to break scope restrictions using the {@link AccessibleObject#setAccessible(boolean)} method. {@code false} will only match {@code public} fields. @return the Field object @throws NullPointerException if the class is {@code null}, or the field could not be found. @throws IllegalArgumentException if the field name is blank or empty, is not {@code static}. @throws IllegalAccessException if the field is not made accessible @throws SecurityException if an underlying accessible object's method denies the request. @see SecurityManager#checkPermission
java
src/main/java/org/apache/commons/lang3/reflect/FieldUtils.java
354
[ "cls", "fieldName", "forceAccess" ]
Object
true
1
6.56
apache/commons-lang
2,896
javadoc
false
fromUri
public static NestedLocation fromUri(URI uri) { if (uri == null || !"nested".equalsIgnoreCase(uri.getScheme())) { throw new IllegalArgumentException("'uri' must not be null and must use 'nested' scheme"); } return parse(uri.getSchemeSpecificPart()); }
Create a new {@link NestedLocation} from the given URI. @param uri the nested URI @return a new {@link NestedLocation} instance @throws IllegalArgumentException if the URI is not valid
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/nested/NestedLocation.java
87
[ "uri" ]
NestedLocation
true
3
7.6
spring-projects/spring-boot
79,428
javadoc
false
parse_args
def parse_args() -> argparse.Namespace: """ Parse command line arguments. Returns: argparse.Namespace: Parsed arguments. """ parser = argparse.ArgumentParser(description=" System-level Usage Logger ") # debug mode used in local to gracefully exit the script when ctrl+c is # pressed,and print out the json output in a pretty format. parser.add_argument("--debug", action="store_true", help="Enable debug mode") parser.add_argument( "--log-interval", type=float, default=5, help="set time interval for logging utilization data, default is 5 seconds", ) parser.add_argument( "--data-collect-interval", type=float, default=1, help="set time interval to collect data, default is 1 second, this should not longer than log_interval", ) args = parser.parse_args() return args
Parse command line arguments. Returns: argparse.Namespace: Parsed arguments.
python
tools/stats/monitor.py
101
[]
argparse.Namespace
true
1
6.4
pytorch/pytorch
96,034
unknown
false
flatten
@SuppressWarnings("unchecked") private void flatten(Properties properties, Map<String, Object> input, @Nullable String path) { input.forEach((key, value) -> { String name = getPropertyName(path, key); if (value instanceof Map) { // Need a compound key flatten(properties, (Map<String, Object>) value, name); } else if (value instanceof Collection<?> collection) { // Need a compound key properties.put(name, StringUtils.collectionToCommaDelimitedString(collection)); int count = 0; for (Object item : collection) { String itemKey = "[" + (count++) + "]"; flatten(properties, Collections.singletonMap(itemKey, item), name); } } else if (value instanceof String) { properties.put(name, value); } else if (value instanceof Number || value instanceof Boolean) { properties.put(name, value.toString()); } else { properties.put(name, (value != null) ? value : ""); } }); }
Create a new {@link CloudFoundryVcapEnvironmentPostProcessor} instance. @param logFactory the log factory to use @since 3.0.0
java
core/spring-boot/src/main/java/org/springframework/boot/cloud/CloudFoundryVcapEnvironmentPostProcessor.java
193
[ "properties", "input", "path" ]
void
true
7
6.4
spring-projects/spring-boot
79,428
javadoc
false
initialize
public static void initialize(Class<?>... classes) { for (Class<?> clazz : classes) { try { Class.forName(clazz.getName(), true, clazz.getClassLoader()); } catch (ClassNotFoundException e) { throw new AssertionError(e); } } }
Ensures that the given classes are initialized, as described in <a href="http://java.sun.com/docs/books/jls/third_edition/html/execution.html#12.4.2">JLS Section 12.4.2</a>. <p>WARNING: Normally it's a smell if a class needs to be explicitly initialized, because static state hurts system maintainability and testability. In cases when you have no choice while interoperating with a legacy framework, this method helps to keep the code less ugly. @throws ExceptionInInitializerError if an exception is thrown during initialization of a class
java
android/guava/src/com/google/common/reflect/Reflection.java
60
[]
void
true
2
6.24
google/guava
51,352
javadoc
false
clamp
function clamp(value: number, min = 0, max = 1) { if (process.env.NODE_ENV !== 'production') { if (value < min || value > max) { console.error(`The value provided ${value} is out of range [${min}, ${max}].`); } } return Math.min(Math.max(min, value), max); }
Returns a number whose value is limited to the given range. @param value The value to be clamped @param min The lower boundary of the output range @param max The upper boundary of the output range @returns A number in the range [min, max] @beta
typescript
packages/grafana-data/src/themes/colorManipulator.ts
15
[ "value", "min", "max" ]
false
4
7.44
grafana/grafana
71,362
jsdoc
false
timeToNextPoll
public long timeToNextPoll(long now) { if (!autoCommitEnabled) return timeToNextHeartbeat(now); return Math.min(nextAutoCommitTimer.remainingMs(), timeToNextHeartbeat(now)); }
Return the time to the next needed invocation of {@link ConsumerNetworkClient#poll(Timer)}. @param now current time in milliseconds @return the maximum time in milliseconds the caller should wait before the next invocation of poll()
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java
584
[ "now" ]
true
2
7.28
apache/kafka
31,560
javadoc
false
equals
public static boolean equals(final Type type1, final Type type2) { if (Objects.equals(type1, type2)) { return true; } if (type1 instanceof ParameterizedType) { return equals((ParameterizedType) type1, type2); } if (type1 instanceof GenericArrayType) { return equals((GenericArrayType) type1, type2); } if (type1 instanceof WildcardType) { return equals((WildcardType) type1, type2); } return false; }
Tests whether the given types are equal. @param type1 The first type. @param type2 The second type. @return Whether the given types are equal. @since 3.2
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
486
[ "type1", "type2" ]
true
5
8.24
apache/commons-lang
2,896
javadoc
false
matches
function matches(source) { return baseMatches(baseClone(source, CLONE_DEEP_FLAG)); }
Creates a function that performs a partial deep comparison between a given object and `source`, returning `true` if the given object has equivalent property values, else `false`. **Note:** The created function is equivalent to `_.isMatch` with `source` partially applied. Partial comparisons will match empty array and empty object `source` values against any array or object value, respectively. See `_.isEqual` for a list of supported value comparisons. **Note:** Multiple values can be checked by combining several matchers using `_.overSome` @static @memberOf _ @since 3.0.0 @category Util @param {Object} source The object of property values to match. @returns {Function} Returns the new spec function. @example var objects = [ { 'a': 1, 'b': 2, 'c': 3 }, { 'a': 4, 'b': 5, 'c': 6 } ]; _.filter(objects, _.matches({ 'a': 4, 'c': 6 })); // => [{ 'a': 4, 'b': 5, 'c': 6 }] // Checking for several possible values _.filter(objects, _.overSome([_.matches({ 'a': 1 }), _.matches({ 'a': 4 })])); // => [{ 'a': 1, 'b': 2, 'c': 3 }, { 'a': 4, 'b': 5, 'c': 6 }]
javascript
lodash.js
15,681
[ "source" ]
false
1
6.24
lodash/lodash
61,490
jsdoc
false
partitionsToOffsetAndMetadata
public KafkaFuture<Map<TopicPartition, OffsetAndMetadata>> partitionsToOffsetAndMetadata() { if (futures.size() != 1) { throw new IllegalStateException("Offsets from multiple consumer groups were requested. " + "Use partitionsToOffsetAndMetadata(groupId) instead to get future for a specific group."); } return futures.values().iterator().next(); }
Return a future which yields a map of topic partitions to OffsetAndMetadata objects. If the group does not have a committed offset for this partition, the corresponding value in the returned map will be null.
java
clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.java
49
[]
true
2
6.56
apache/kafka
31,560
javadoc
false
from_coo
def from_coo(cls, A, dense_index: bool = False) -> Series: """ Create a Series with sparse values from a scipy.sparse.coo_matrix. This method takes a ``scipy.sparse.coo_matrix`` (coordinate format) as input and returns a pandas ``Series`` where the non-zero elements are represented as sparse values. The index of the Series can either include only the coordinates of non-zero elements (default behavior) or the full sorted set of coordinates from the matrix if ``dense_index`` is set to `True`. Parameters ---------- A : scipy.sparse.coo_matrix The sparse matrix in coordinate format from which the sparse Series will be created. dense_index : bool, default False If False (default), the index consists of only the coords of the non-null entries of the original coo_matrix. If True, the index consists of the full sorted (row, col) coordinates of the coo_matrix. Returns ------- s : Series A Series with sparse values. See Also -------- DataFrame.sparse.from_spmatrix : Create a new DataFrame from a scipy sparse matrix. scipy.sparse.coo_matrix : A sparse matrix in COOrdinate format. Examples -------- >>> from scipy import sparse >>> A = sparse.coo_matrix( ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4) ... ) >>> A <COOrdinate sparse matrix of dtype 'float64' with 3 stored elements and shape (3, 4)> >>> A.todense() matrix([[0., 0., 1., 2.], [3., 0., 0., 0.], [0., 0., 0., 0.]]) >>> ss = pd.Series.sparse.from_coo(A) >>> ss 0 2 1.0 3 2.0 1 0 3.0 dtype: Sparse[float64, nan] """ from pandas import Series from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series result = coo_to_sparse_series(A, dense_index=dense_index) result = Series(result.array, index=result.index, copy=False) return result
Create a Series with sparse values from a scipy.sparse.coo_matrix. This method takes a ``scipy.sparse.coo_matrix`` (coordinate format) as input and returns a pandas ``Series`` where the non-zero elements are represented as sparse values. The index of the Series can either include only the coordinates of non-zero elements (default behavior) or the full sorted set of coordinates from the matrix if ``dense_index`` is set to `True`. Parameters ---------- A : scipy.sparse.coo_matrix The sparse matrix in coordinate format from which the sparse Series will be created. dense_index : bool, default False If False (default), the index consists of only the coords of the non-null entries of the original coo_matrix. If True, the index consists of the full sorted (row, col) coordinates of the coo_matrix. Returns ------- s : Series A Series with sparse values. See Also -------- DataFrame.sparse.from_spmatrix : Create a new DataFrame from a scipy sparse matrix. scipy.sparse.coo_matrix : A sparse matrix in COOrdinate format. Examples -------- >>> from scipy import sparse >>> A = sparse.coo_matrix( ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4) ... ) >>> A <COOrdinate sparse matrix of dtype 'float64' with 3 stored elements and shape (3, 4)> >>> A.todense() matrix([[0., 0., 1., 2.], [3., 0., 0., 0.], [0., 0., 0., 0.]]) >>> ss = pd.Series.sparse.from_coo(A) >>> ss 0 2 1.0 3 2.0 1 0 3.0 dtype: Sparse[float64, nan]
python
pandas/core/arrays/sparse/accessor.py
87
[ "cls", "A", "dense_index" ]
Series
true
1
7.04
pandas-dev/pandas
47,362
numpy
false
delete
def delete(self, loc) -> Self: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(["a", "b", "c"]) >>> idx.delete(1) Index(['a', 'c'], dtype='str') >>> idx = pd.Index(["a", "b", "c"]) >>> idx.delete([0, 2]) Index(['b'], dtype='str') """ result = super().delete(loc) result._data._freq = self._get_delete_freq(loc) return result
Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(["a", "b", "c"]) >>> idx.delete(1) Index(['a', 'c'], dtype='str') >>> idx = pd.Index(["a", "b", "c"]) >>> idx.delete([0, 2]) Index(['b'], dtype='str')
python
pandas/core/indexes/datetimelike.py
1,007
[ "self", "loc" ]
Self
true
1
7.28
pandas-dev/pandas
47,362
numpy
false
normalizer
public abstract double normalizer(double compression, double n);
Computes the normalizer given compression and number of points. @param compression The compression parameter for the digest @param n The number of samples seen so far @return The normalizing factor for the scale function
java
libs/tdigest/src/main/java/org/elasticsearch/tdigest/ScaleFunction.java
566
[ "compression", "n" ]
true
1
6.64
elastic/elasticsearch
75,680
javadoc
false
infer_objects
def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. Returns ------- Index An Index with a new dtype if the dtype was inferred or a shallow copy if the dtype could not be inferred. See Also -------- Index.inferred_type: Return a string of the type inferred from the values. Examples -------- >>> pd.Index(["a", 1]).infer_objects() Index(['a', '1'], dtype='object') >>> pd.Index([1, 2], dtype="object").infer_objects() Index([1, 2], dtype='int64') """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_non_numeric=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result
If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. Returns ------- Index An Index with a new dtype if the dtype was inferred or a shallow copy if the dtype could not be inferred. See Also -------- Index.inferred_type: Return a string of the type inferred from the values. Examples -------- >>> pd.Index(["a", 1]).infer_objects() Index(['a', '1'], dtype='object') >>> pd.Index([1, 2], dtype="object").infer_objects() Index([1, 2], dtype='int64')
python
pandas/core/indexes/base.py
7,181
[ "self", "copy" ]
Index
true
9
8.32
pandas-dev/pandas
47,362
numpy
false
_ixs
def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes) result._name = self.index[i] return result.__finalize__(self) # icol else: col_mgr = self._mgr.iget(i) return self._box_col_values(col_mgr, i)
Parameters ---------- i : int axis : int Returns ------- Series
python
pandas/core/frame.py
4,100
[ "self", "i", "axis" ]
Series
true
3
6.56
pandas-dev/pandas
47,362
numpy
false
load
<R extends ConfigDataResource> @Nullable ConfigData load(ConfigDataLoaderContext context, R resource) throws IOException { ConfigDataLoader<R> loader = getLoader(context, resource); this.logger.trace(LogMessage.of(() -> "Loading " + resource + " using loader " + loader.getClass().getName())); return loader.load(context, resource); }
Load {@link ConfigData} using the first appropriate {@link ConfigDataLoader}. @param <R> the resource type @param context the loader context @param resource the resource to load @return the loaded {@link ConfigData} @throws IOException on IO error
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataLoaders.java
96
[ "context", "resource" ]
ConfigData
true
1
6.24
spring-projects/spring-boot
79,428
javadoc
false
appendExportsOfBindingElement
function appendExportsOfBindingElement(statements: Statement[] | undefined, decl: VariableDeclaration | BindingElement, isForInOrOfInitializer: boolean): Statement[] | undefined { if (currentModuleInfo.exportEquals) { return statements; } if (isBindingPattern(decl.name)) { for (const element of decl.name.elements) { if (!isOmittedExpression(element)) { statements = appendExportsOfBindingElement(statements, element, isForInOrOfInitializer); } } } else if (!isGeneratedIdentifier(decl.name) && (!isVariableDeclaration(decl) || decl.initializer || isForInOrOfInitializer)) { statements = appendExportsOfDeclaration(statements, new IdentifierNameMap(), decl); } return statements; }
Appends the exports of a VariableDeclaration or BindingElement to a statement list, returning the statement list. @param statements A statement list to which the down-level export statements are to be appended. If `statements` is `undefined`, a new array is allocated if statements are appended. @param decl The declaration whose exports are to be recorded.
typescript
src/compiler/transformers/module/module.ts
2,054
[ "statements", "decl", "isForInOrOfInitializer" ]
true
9
6.72
microsoft/TypeScript
107,154
jsdoc
false
refresh
@Override public void refresh() throws BeansException, IllegalStateException { this.startupShutdownLock.lock(); try { this.startupShutdownThread = Thread.currentThread(); StartupStep contextRefresh = this.applicationStartup.start("spring.context.refresh"); // Prepare this context for refreshing. prepareRefresh(); // Tell the subclass to refresh the internal bean factory. ConfigurableListableBeanFactory beanFactory = obtainFreshBeanFactory(); // Prepare the bean factory for use in this context. prepareBeanFactory(beanFactory); try { // Allows post-processing of the bean factory in context subclasses. postProcessBeanFactory(beanFactory); StartupStep beanPostProcess = this.applicationStartup.start("spring.context.beans.post-process"); // Invoke factory processors registered as beans in the context. invokeBeanFactoryPostProcessors(beanFactory); // Register bean processors that intercept bean creation. registerBeanPostProcessors(beanFactory); beanPostProcess.end(); // Initialize message source for this context. initMessageSource(); // Initialize event multicaster for this context. initApplicationEventMulticaster(); // Initialize other special beans in specific context subclasses. onRefresh(); // Check for listener beans and register them. registerListeners(); // Instantiate all remaining (non-lazy-init) singletons. finishBeanFactoryInitialization(beanFactory); // Last step: publish corresponding event. finishRefresh(); } catch (RuntimeException | Error ex) { if (logger.isWarnEnabled()) { logger.warn("Exception encountered during context initialization - " + "cancelling refresh attempt: " + ex); } // Stop already started Lifecycle beans to avoid dangling resources. if (this.lifecycleProcessor != null && this.lifecycleProcessor.isRunning()) { try { this.lifecycleProcessor.stop(); } catch (Throwable ex2) { logger.warn("Exception thrown from LifecycleProcessor on cancelled refresh", ex2); } } // Destroy already created singletons to avoid dangling resources. destroyBeans(); // Reset 'active' flag. cancelRefresh(ex); // Propagate exception to caller. throw ex; } finally { contextRefresh.end(); } } finally { this.startupShutdownThread = null; this.startupShutdownLock.unlock(); } }
Return the list of statically specified ApplicationListeners.
java
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
579
[]
void
true
6
6
spring-projects/spring-framework
59,386
javadoc
false
writeTo
void writeTo(JSONStringer stringer) throws JSONException { stringer.object(); for (Map.Entry<String, Object> entry : this.nameValuePairs.entrySet()) { stringer.key(entry.getKey()).value(entry.getValue()); } stringer.endObject(); }
Encodes this object as a human-readable JSON string for debugging, such as: <pre> { "query": "Pizza", "locations": [ 94043, 90210 ] }</pre> @param indentSpaces the number of spaces to indent for each level of nesting. @return a string representation of the object. @throws JSONException if an error occurs
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
728
[ "stringer" ]
void
true
1
6.56
spring-projects/spring-boot
79,428
javadoc
false
dtypes
def dtypes(self, *, device=None, kind=None): """ The array API data types supported by PyTorch. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : Device, optional The device to get the data types for. Unused for PyTorch, as all devices use the same dtypes. kind : str or tuple of str, optional The kind of data types to return. If ``None``, all data types are returned. If a string, only data types of that kind are returned. If a tuple, a dictionary containing the union of the given kinds is returned. The following kinds are supported: - ``'bool'``: boolean data types (i.e., ``bool``). - ``'signed integer'``: signed integer data types (i.e., ``int8``, ``int16``, ``int32``, ``int64``). - ``'unsigned integer'``: unsigned integer data types (i.e., ``uint8``, ``uint16``, ``uint32``, ``uint64``). - ``'integral'``: integer data types. Shorthand for ``('signed integer', 'unsigned integer')``. - ``'real floating'``: real-valued floating-point data types (i.e., ``float32``, ``float64``). - ``'complex floating'``: complex floating-point data types (i.e., ``complex64``, ``complex128``). - ``'numeric'``: numeric data types. Shorthand for ``('integral', 'real floating', 'complex floating')``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding PyTorch data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64} """ res = self._dtypes(kind) for k, v in res.copy().items(): try: torch.empty((0,), dtype=v, device=device) except: del res[k] return res
The array API data types supported by PyTorch. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : Device, optional The device to get the data types for. Unused for PyTorch, as all devices use the same dtypes. kind : str or tuple of str, optional The kind of data types to return. If ``None``, all data types are returned. If a string, only data types of that kind are returned. If a tuple, a dictionary containing the union of the given kinds is returned. The following kinds are supported: - ``'bool'``: boolean data types (i.e., ``bool``). - ``'signed integer'``: signed integer data types (i.e., ``int8``, ``int16``, ``int32``, ``int64``). - ``'unsigned integer'``: unsigned integer data types (i.e., ``uint8``, ``uint16``, ``uint32``, ``uint64``). - ``'integral'``: integer data types. Shorthand for ``('signed integer', 'unsigned integer')``. - ``'real floating'``: real-valued floating-point data types (i.e., ``float32``, ``float64``). - ``'complex floating'``: complex floating-point data types (i.e., ``complex64``, ``complex128``). - ``'numeric'``: numeric data types. Shorthand for ``('integral', 'real floating', 'complex floating')``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding PyTorch data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64}
python
sklearn/externals/array_api_compat/torch/_info.py
253
[ "self", "device", "kind" ]
false
2
7.12
scikit-learn/scikit-learn
64,340
numpy
false
resolveScopeMetadata
@Override public ScopeMetadata resolveScopeMetadata(BeanDefinition definition) { ScopeMetadata metadata = new ScopeMetadata(); metadata.setScopeName(BeanDefinition.SCOPE_PROTOTYPE); if (definition instanceof AnnotatedBeanDefinition annDef) { Set<String> annTypes = annDef.getMetadata().getAnnotationTypes(); String found = null; for (String annType : annTypes) { Set<String> metaAnns = annDef.getMetadata().getMetaAnnotationTypes(annType); if (metaAnns.contains("jakarta.inject.Scope")) { if (found != null) { throw new IllegalStateException("Found ambiguous scope annotations on bean class [" + definition.getBeanClassName() + "]: " + found + ", " + annType); } found = annType; String scopeName = resolveScopeName(annType); if (scopeName == null) { throw new IllegalStateException( "Unsupported scope annotation - not mapped onto Spring scope name: " + annType); } metadata.setScopeName(scopeName); } } } return metadata; }
Resolve the given annotation type into a named Spring scope. <p>The default implementation simply checks against registered scopes. Can be overridden for custom mapping rules, for example, naming conventions. @param annotationType the JSR-330 annotation type @return the Spring scope name
java
spring-context/src/main/java/org/springframework/context/annotation/Jsr330ScopeMetadataResolver.java
86
[ "definition" ]
ScopeMetadata
true
5
7.6
spring-projects/spring-framework
59,386
javadoc
false
create_transform_job
def create_transform_job( self, config: dict, wait_for_completion: bool = True, check_interval: int = 30, max_ingestion_time: int | None = None, ): """ Start a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify. .. seealso:: - :external+boto3:py:meth:`SageMaker.Client.create_transform_job` :param config: the config for transform job :param wait_for_completion: if the program should keep running until job finishes :param check_interval: the time interval in seconds which the operator will check the status of any SageMaker job :param max_ingestion_time: the maximum ingestion time in seconds. Any SageMaker jobs that run longer than this will fail. Setting this to None implies no timeout for any SageMaker job. :return: A response to transform job creation """ if "S3DataSource" in config["TransformInput"]["DataSource"]: self.check_s3_url(config["TransformInput"]["DataSource"]["S3DataSource"]["S3Uri"]) response = self.get_conn().create_transform_job(**config) if wait_for_completion: self.check_status( config["TransformJobName"], "TransformJobStatus", self.describe_transform_job, check_interval, max_ingestion_time, ) return response
Start a transform job. A transform job uses a trained model to get inferences on a dataset and saves these results to an Amazon S3 location that you specify. .. seealso:: - :external+boto3:py:meth:`SageMaker.Client.create_transform_job` :param config: the config for transform job :param wait_for_completion: if the program should keep running until job finishes :param check_interval: the time interval in seconds which the operator will check the status of any SageMaker job :param max_ingestion_time: the maximum ingestion time in seconds. Any SageMaker jobs that run longer than this will fail. Setting this to None implies no timeout for any SageMaker job. :return: A response to transform job creation
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
380
[ "self", "config", "wait_for_completion", "check_interval", "max_ingestion_time" ]
true
3
7.92
apache/airflow
43,597
sphinx
false
countOrNull
Integer countOrNull();
Get the count if it is efficiently supported by the record format (which is only the case for magic 2 and higher). @return The number of records in the batch or null for magic versions 0 and 1.
java
clients/src/main/java/org/apache/kafka/common/record/RecordBatch.java
196
[]
Integer
true
1
6.8
apache/kafka
31,560
javadoc
false
to
public <R> R to(R instance, BiFunction<R, ? super T, R> mapper) { Assert.notNull(instance, "'instance' must not be null"); Assert.notNull(mapper, "'mapper' must not be null"); T value = getValue(); if (value != null && test(value)) { return mapper.apply(instance, value); } return instance; }
Complete the mapping for any non-filtered value by applying the given function to an existing instance and returning a new one. For filtered values, the {@code instance} parameter is returned unchanged. The method is designed to be used with immutable objects. @param <R> the result type @param instance the current instance @param mapper the mapping function @return a new mapped instance or the original instance @since 3.0.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
307
[ "instance", "mapper" ]
R
true
3
8.24
spring-projects/spring-boot
79,428
javadoc
false
check_clusterings
def check_clusterings(labels_true, labels_pred): """Check that the labels arrays are 1D and of same dimension. Parameters ---------- labels_true : array-like of shape (n_samples,) The true labels. labels_pred : array-like of shape (n_samples,) The predicted labels. """ labels_true = check_array( labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None, ) labels_pred = check_array( labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None, ) type_label = type_of_target(labels_true) type_pred = type_of_target(labels_pred) if "continuous" in (type_pred, type_label): msg = ( "Clustering metrics expects discrete values but received" f" {type_label} values for label, and {type_pred} values " "for target" ) warnings.warn(msg, UserWarning) # input checks if labels_true.ndim != 1: raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,)) if labels_pred.ndim != 1: raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,)) check_consistent_length(labels_true, labels_pred) return labels_true, labels_pred
Check that the labels arrays are 1D and of same dimension. Parameters ---------- labels_true : array-like of shape (n_samples,) The true labels. labels_pred : array-like of shape (n_samples,) The predicted labels.
python
sklearn/metrics/cluster/_supervised.py
35
[ "labels_true", "labels_pred" ]
false
4
6.08
scikit-learn/scikit-learn
64,340
numpy
false
locateParent
private static void locateParent(@Nullable BeanFactory beanFactory, ConditionEvaluationReport report) { if (beanFactory != null && report.parent == null && beanFactory.containsBean(BEAN_NAME)) { report.parent = beanFactory.getBean(BEAN_NAME, ConditionEvaluationReport.class); } }
Obtain a {@link ConditionEvaluationReport} for the specified bean factory. @param beanFactory the bean factory @return an existing or new {@link ConditionEvaluationReport}
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionEvaluationReport.java
196
[ "beanFactory", "report" ]
void
true
4
7.28
spring-projects/spring-boot
79,428
javadoc
false
getNestedPropertySeparatorIndex
private static int getNestedPropertySeparatorIndex(String propertyPath, boolean last) { boolean inKey = false; int length = propertyPath.length(); int i = (last ? length - 1 : 0); while (last ? i >= 0 : i < length) { switch (propertyPath.charAt(i)) { case PropertyAccessor.PROPERTY_KEY_PREFIX_CHAR, PropertyAccessor.PROPERTY_KEY_SUFFIX_CHAR -> { inKey = !inKey; } case PropertyAccessor.NESTED_PROPERTY_SEPARATOR_CHAR -> { if (!inKey) { return i; } } } if (last) { i--; } else { i++; } } return -1; }
Determine the first (or last) nested property separator in the given property path, ignoring dots in keys (like "map[my.key]"). @param propertyPath the property path to check @param last whether to return the last separator rather than the first @return the index of the nested property separator, or -1 if none
java
spring-beans/src/main/java/org/springframework/beans/PropertyAccessorUtils.java
88
[ "propertyPath", "last" ]
true
6
7.76
spring-projects/spring-framework
59,386
javadoc
false
instantiateUsingFactoryMethod
protected BeanWrapper instantiateUsingFactoryMethod( String beanName, RootBeanDefinition mbd, @Nullable Object @Nullable [] explicitArgs) { return new ConstructorResolver(this).instantiateUsingFactoryMethod(beanName, mbd, explicitArgs); }
Instantiate the bean using a named factory method. The method may be static, if the mbd parameter specifies a class, rather than a factoryBean, or an instance variable on a factory object itself configured using Dependency Injection. @param beanName the name of the bean @param mbd the bean definition for the bean @param explicitArgs argument values passed in programmatically via the getBean method, or {@code null} if none (implying the use of constructor argument values from bean definition) @return a BeanWrapper for the new instance @see #getBean(String, Object[])
java
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractAutowireCapableBeanFactory.java
1,359
[ "beanName", "mbd", "explicitArgs" ]
BeanWrapper
true
1
6.32
spring-projects/spring-framework
59,386
javadoc
false
check_ndim
def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: """ ndim inference and validation. Validates that values.ndim and ndim are consistent. Validates that len(values) and len(placement) are consistent. Parameters ---------- values : array-like placement : BlockPlacement ndim : int Raises ------ ValueError : the number of dimensions do not match """ if values.ndim > ndim: # Check for both np.ndarray and ExtensionArray raise ValueError( f"Wrong number of dimensions. values.ndim > ndim [{values.ndim} > {ndim}]" ) if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): special case not needed with 2D EAs if values.ndim != ndim: raise ValueError( "Wrong number of dimensions. " f"values.ndim != ndim [{values.ndim} != {ndim}]" ) if len(placement) != len(values): raise ValueError( f"Wrong number of items passed {len(values)}, " f"placement implies {len(placement)}" ) elif ndim == 2 and len(placement) != 1: # TODO(EA2D): special case unnecessary with 2D EAs raise ValueError("need to split")
ndim inference and validation. Validates that values.ndim and ndim are consistent. Validates that len(values) and len(placement) are consistent. Parameters ---------- values : array-like placement : BlockPlacement ndim : int Raises ------ ValueError : the number of dimensions do not match
python
pandas/core/internals/blocks.py
2,273
[ "values", "placement", "ndim" ]
None
true
7
6.4
pandas-dev/pandas
47,362
numpy
false
dtypes
def dtypes(self, *, device=None, kind=None): """ The array API data types supported by NumPy. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : str, optional The device to get the data types for. For NumPy, only ``'cpu'`` is allowed. kind : str or tuple of str, optional The kind of data types to return. If ``None``, all data types are returned. If a string, only data types of that kind are returned. If a tuple, a dictionary containing the union of the given kinds is returned. The following kinds are supported: - ``'bool'``: boolean data types (i.e., ``bool``). - ``'signed integer'``: signed integer data types (i.e., ``int8``, ``int16``, ``int32``, ``int64``). - ``'unsigned integer'``: unsigned integer data types (i.e., ``uint8``, ``uint16``, ``uint32``, ``uint64``). - ``'integral'``: integer data types. Shorthand for ``('signed integer', 'unsigned integer')``. - ``'real floating'``: real-valued floating-point data types (i.e., ``float32``, ``float64``). - ``'complex floating'``: complex floating-point data types (i.e., ``complex64``, ``complex128``). - ``'numeric'``: numeric data types. Shorthand for ``('integral', 'real floating', 'complex floating')``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding NumPy data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = np.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64} """ if device not in ["cpu", None]: raise ValueError( 'Device not understood. Only "cpu" is allowed, but received:' f' {device}' ) if kind is None: return { "bool": dtype(bool), "int8": dtype(int8), "int16": dtype(int16), "int32": dtype(int32), "int64": dtype(int64), "uint8": dtype(uint8), "uint16": dtype(uint16), "uint32": dtype(uint32), "uint64": dtype(uint64), "float32": dtype(float32), "float64": dtype(float64), "complex64": dtype(complex64), "complex128": dtype(complex128), } if kind == "bool": return {"bool": bool} if kind == "signed integer": return { "int8": dtype(int8), "int16": dtype(int16), "int32": dtype(int32), "int64": dtype(int64), } if kind == "unsigned integer": return { "uint8": dtype(uint8), "uint16": dtype(uint16), "uint32": dtype(uint32), "uint64": dtype(uint64), } if kind == "integral": return { "int8": dtype(int8), "int16": dtype(int16), "int32": dtype(int32), "int64": dtype(int64), "uint8": dtype(uint8), "uint16": dtype(uint16), "uint32": dtype(uint32), "uint64": dtype(uint64), } if kind == "real floating": return { "float32": dtype(float32), "float64": dtype(float64), } if kind == "complex floating": return { "complex64": dtype(complex64), "complex128": dtype(complex128), } if kind == "numeric": return { "int8": dtype(int8), "int16": dtype(int16), "int32": dtype(int32), "int64": dtype(int64), "uint8": dtype(uint8), "uint16": dtype(uint16), "uint32": dtype(uint32), "uint64": dtype(uint64), "float32": dtype(float32), "float64": dtype(float64), "complex64": dtype(complex64), "complex128": dtype(complex128), } if isinstance(kind, tuple): res = {} for k in kind: res.update(self.dtypes(kind=k)) return res raise ValueError(f"unsupported kind: {kind!r}")
The array API data types supported by NumPy. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : str, optional The device to get the data types for. For NumPy, only ``'cpu'`` is allowed. kind : str or tuple of str, optional The kind of data types to return. If ``None``, all data types are returned. If a string, only data types of that kind are returned. If a tuple, a dictionary containing the union of the given kinds is returned. The following kinds are supported: - ``'bool'``: boolean data types (i.e., ``bool``). - ``'signed integer'``: signed integer data types (i.e., ``int8``, ``int16``, ``int32``, ``int64``). - ``'unsigned integer'``: unsigned integer data types (i.e., ``uint8``, ``uint16``, ``uint32``, ``uint64``). - ``'integral'``: integer data types. Shorthand for ``('signed integer', 'unsigned integer')``. - ``'real floating'``: real-valued floating-point data types (i.e., ``float32``, ``float64``). - ``'complex floating'``: complex floating-point data types (i.e., ``complex64``, ``complex128``). - ``'numeric'``: numeric data types. Shorthand for ``('integral', 'real floating', 'complex floating')``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding NumPy data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = np.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64}
python
numpy/_array_api_info.py
186
[ "self", "device", "kind" ]
false
12
6.96
numpy/numpy
31,054
numpy
false
describe_fargate_profile
def describe_fargate_profile( self, clusterName: str, fargateProfileName: str, verbose: bool = False ) -> dict: """ Return descriptive information about an AWS Fargate profile. .. seealso:: - :external+boto3:py:meth:`EKS.Client.describe_fargate_profile` :param clusterName: The name of the Amazon EKS Cluster associated with the Fargate profile. :param fargateProfileName: The name of the Fargate profile to describe. :param verbose: Provides additional logging if set to True. Defaults to False. :return: Returns descriptive information about an AWS Fargate profile. """ eks_client = self.conn response = eks_client.describe_fargate_profile( clusterName=clusterName, fargateProfileName=fargateProfileName ) self.log.info( "Retrieved details for AWS Fargate profile named %s in Amazon EKS cluster %s.", response.get("fargateProfile").get("fargateProfileName"), response.get("fargateProfile").get("clusterName"), ) if verbose: fargate_profile_data = response.get("fargateProfile") self.log.info("AWS Fargate profile details: %s", json.dumps(fargate_profile_data, default=repr)) return response
Return descriptive information about an AWS Fargate profile. .. seealso:: - :external+boto3:py:meth:`EKS.Client.describe_fargate_profile` :param clusterName: The name of the Amazon EKS Cluster associated with the Fargate profile. :param fargateProfileName: The name of the Fargate profile to describe. :param verbose: Provides additional logging if set to True. Defaults to False. :return: Returns descriptive information about an AWS Fargate profile.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/eks.py
363
[ "self", "clusterName", "fargateProfileName", "verbose" ]
dict
true
2
7.44
apache/airflow
43,597
sphinx
false
entrySet
@Override public Set<Entry<K, V>> entrySet() { Set<Entry<K, V>> result = entrySet; return (result == null) ? entrySet = new EntrySet() : result; }
Specifies the delegate maps going in each direction. Called by subclasses during deserialization.
java
android/guava/src/com/google/common/collect/AbstractBiMap.java
296
[]
true
2
6.4
google/guava
51,352
javadoc
false
isLiteralZero
bool isLiteralZero(const MatchFinder::MatchResult &Result, const Expr &Node) { auto ZeroMatcher = anyOf(integerLiteral(equals(0)), floatLiteral(equals(0.0))); // Check to see if we're using a zero directly. if (selectFirst<const clang::Expr>( "val", match(expr(ignoringImpCasts(ZeroMatcher)).bind("val"), Node, *Result.Context)) != nullptr) return true; // Now check to see if we're using a functional cast with a scalar // initializer expression, e.g. `int{0}`. if (selectFirst<const clang::Expr>( "val", match(cxxFunctionalCastExpr( hasDestinationType( anyOf(isInteger(), realFloatingPointType())), hasSourceExpression(initListExpr( hasInit(0, ignoringParenImpCasts(ZeroMatcher))))) .bind("val"), Node, *Result.Context)) != nullptr) return true; return false; }
Returns `true` if `Node` is a value which evaluates to a literal `0`.
cpp
clang-tools-extra/clang-tidy/abseil/DurationRewriter.cpp
113
[]
true
3
7.2
llvm/llvm-project
36,021
doxygen
false
atleast_1d
def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or tuple of arrays, each with ``a.ndim >= 1``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) (array([1]), array([3, 4])) """ if len(arys) == 1: result = asanyarray(arys[0]) if result.ndim == 0: result = result.reshape(1) return result res = [] for ary in arys: result = asanyarray(ary) if result.ndim == 0: result = result.reshape(1) res.append(result) return tuple(res)
Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or tuple of arrays, each with ``a.ndim >= 1``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) (array([1]), array([3, 4]))
python
numpy/_core/shape_base.py
20
[]
false
5
7.68
numpy/numpy
31,054
numpy
false
left
public static <L, R> Pair<L, R> left(final L left) { return of(left, null); }
Creates an immutable pair of two objects inferring the generic types. @param <L> the left element type. @param <R> the right element type. @param left the left element, may be null. @return an immutable formed from the two parameters, not null. @since 3.11
java
src/main/java/org/apache/commons/lang3/tuple/ImmutablePair.java
79
[ "left" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
parsePKCS8
private static PrivateKey parsePKCS8(BufferedReader bReader) throws IOException, GeneralSecurityException { StringBuilder sb = new StringBuilder(); String line = bReader.readLine(); while (line != null) { if (PKCS8_FOOTER.equals(line.trim())) { break; } sb.append(line.trim()); line = bReader.readLine(); } if (null == line || PKCS8_FOOTER.equals(line.trim()) == false) { throw new IOException("Malformed PEM file, PEM footer is invalid or missing"); } return parsePKCS8PemString(sb.toString()); }
Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an plaintext private key encoded in PKCS#8 @param bReader the {@link BufferedReader} containing the key file contents @return {@link PrivateKey} @throws IOException if the file can't be read @throws GeneralSecurityException if the private key can't be generated from the {@link PKCS8EncodedKeySpec}
java
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
223
[ "bReader" ]
PrivateKey
true
5
6.8
elastic/elasticsearch
75,680
javadoc
false
get_related_providers
def get_related_providers( provider_to_check: str, upstream_dependencies: bool, downstream_dependencies: bool, ) -> set[str]: """ Gets cross dependencies of a provider. :param provider_to_check: id of the provider to check :param upstream_dependencies: whether to include providers that depend on it :param downstream_dependencies: whether to include providers it depends on :return: set of dependent provider ids """ if not upstream_dependencies and not downstream_dependencies: raise ValueError("At least one of upstream_dependencies or downstream_dependencies must be True") related_providers = set() if upstream_dependencies: # Providers that use this provider for provider, provider_info in get_provider_dependencies().items(): if provider_to_check in provider_info["cross-providers-deps"]: related_providers.add(provider) # and providers we use directly if downstream_dependencies: for dep_name in get_provider_dependencies()[provider_to_check]["cross-providers-deps"]: related_providers.add(dep_name) return related_providers
Gets cross dependencies of a provider. :param provider_to_check: id of the provider to check :param upstream_dependencies: whether to include providers that depend on it :param downstream_dependencies: whether to include providers it depends on :return: set of dependent provider ids
python
dev/breeze/src/airflow_breeze/utils/provider_dependencies.py
124
[ "provider_to_check", "upstream_dependencies", "downstream_dependencies" ]
set[str]
true
8
7.76
apache/airflow
43,597
sphinx
false
_kl_divergence
def _kl_divergence( params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0, compute_error=True, ): """t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : ndarray of shape (n_params,) Unraveled embedding. P : ndarray of shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : int Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int, default=0 This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. compute_error: bool, default=True If False, the kl_divergence is not computed and returns NaN. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : ndarray of shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution dist = pdist(X_embedded, "sqeuclidean") dist /= degrees_of_freedom dist += 1.0 dist **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) if compute_error: kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q)) else: kl_divergence = np.nan # Gradient: dC/dY # pdist always returns double precision distances. Thus we need to take grad = np.ndarray((n_samples, n_components), dtype=params.dtype) PQd = squareform((P - Q) * dist) for i in range(skip_num_points, n_samples): grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded) grad = grad.ravel() c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad *= c return kl_divergence, grad
t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : ndarray of shape (n_params,) Unraveled embedding. P : ndarray of shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : int Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int, default=0 This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. compute_error: bool, default=True If False, the kl_divergence is not computed and returns NaN. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : ndarray of shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding.
python
sklearn/manifold/_t_sne.py
128
[ "params", "P", "degrees_of_freedom", "n_samples", "n_components", "skip_num_points", "compute_error" ]
false
4
6
scikit-learn/scikit-learn
64,340
numpy
false
value
public XContentBuilder value(Short value) throws IOException { return (value == null) ? nullValue() : value(value.shortValue()); }
@return the value of the "human readable" flag. When the value is equal to true, some types of values are written in a format easier to read for a human.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
648
[ "value" ]
XContentBuilder
true
2
6.96
elastic/elasticsearch
75,680
javadoc
false
CONST
public static byte CONST(final byte v) { return v; }
Returns the provided value unchanged. This can prevent javac from inlining a constant field, e.g., <pre> public final static byte MAGIC_BYTE = ObjectUtils.CONST((byte) 127); </pre> This way any jars that refer to this field do not have to recompile themselves if the field's value changes at some future date. @param v the byte value to return. @return the byte v, unchanged. @since 3.2
java
src/main/java/org/apache/commons/lang3/ObjectUtils.java
352
[ "v" ]
true
1
6.8
apache/commons-lang
2,896
javadoc
false
needsMetadata
synchronized boolean needsMetadata(String topic) { return subscription.contains(topic) || groupSubscription.contains(topic); }
Get the subscription topics for which metadata is required. For the leader, this will include the union of the subscriptions of all group members. For followers, it is just that member's subscription. This is used when querying topic metadata to detect the metadata changes which would require rebalancing. The leader fetches metadata for all topics in the group so that it can do the partition assignment (which requires at least partition counts for all topics to be assigned). @return The union of all subscribed topics in the group if this member is the leader of the current generation; otherwise it returns the same set as {@link #subscription()}
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
422
[ "topic" ]
true
2
6.64
apache/kafka
31,560
javadoc
false
toList
public List<String> toList() { final List<String> list = new ArrayList<>(); forEach(list::add); return list; }
Returns a new {@link List} containing the tokenizer elements. @return a new {@link List}.
java
src/main/java/org/apache/commons/lang3/util/IterableStringTokenizer.java
100
[]
true
1
7.04
apache/commons-lang
2,896
javadoc
false
resetCurrentTokenState
private void resetCurrentTokenState() { if (_currToken == JsonToken.VALUE_STRING && _tokenIncomplete && stringEnd > 0) { _inputPtr = stringEnd; _tokenIncomplete = false; } lastOptimisedValue = null; stringEnd = -1; }
Resets the current token state before moving to the next. It resets the _inputPtr and the _tokenIncomplete only if {@link UTF8StreamJsonParser#getText()} or {@link UTF8StreamJsonParser#getValueAsString()} hasn't run yet.
java
libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/ESUTF8StreamJsonParser.java
179
[]
void
true
4
6.08
elastic/elasticsearch
75,680
javadoc
false
stripFloatCast
std::optional<std::string> stripFloatCast(const ast_matchers::MatchFinder::MatchResult &Result, const Expr &Node) { if (const Expr *MaybeCastArg = selectFirst<const Expr>( "cast_arg", match(expr(anyOf(cxxStaticCastExpr( hasDestinationType(realFloatingPointType()), hasSourceExpression(expr().bind("cast_arg"))), cStyleCastExpr( hasDestinationType(realFloatingPointType()), hasSourceExpression(expr().bind("cast_arg"))), cxxFunctionalCastExpr( hasDestinationType(realFloatingPointType()), hasSourceExpression(expr().bind("cast_arg"))))), Node, *Result.Context))) return tooling::fixit::getText(*MaybeCastArg, *Result.Context).str(); return std::nullopt; }
Returns `true` if `Node` is a value which evaluates to a literal `0`.
cpp
clang-tools-extra/clang-tidy/abseil/DurationRewriter.cpp
138
[]
true
2
7.04
llvm/llvm-project
36,021
doxygen
false
topicIds
public List<Uuid> topicIds() { return data.topics() .stream() .map(MetadataRequestTopic::topicId) .collect(Collectors.toList()); }
@return Builder for metadata request using topic IDs.
java
clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
111
[]
true
1
6.88
apache/kafka
31,560
javadoc
false
initializeFinalLoggingLevels
private void initializeFinalLoggingLevels(ConfigurableEnvironment environment, LoggingSystem system) { bindLoggerGroups(environment); if (this.springBootLogging != null) { initializeSpringBootLogging(system, this.springBootLogging); } setLogLevels(system, environment); }
Initialize the logging system according to preferences expressed through the {@link Environment} and the classpath. @param environment the environment @param classLoader the classloader
java
core/spring-boot/src/main/java/org/springframework/boot/context/logging/LoggingApplicationListener.java
360
[ "environment", "system" ]
void
true
2
6.08
spring-projects/spring-boot
79,428
javadoc
false
describe_replication_configs
def describe_replication_configs(self, filters: list[dict] | None = None, **kwargs) -> list[dict]: """ Return list of serverless replication configs. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_configs` :param filters: List of filter objects :return: List of replication tasks """ filters = filters if filters is not None else [] try: resp = self.conn.describe_replication_configs(Filters=filters, **kwargs) return resp.get("ReplicationConfigs", []) except Exception as ex: self.log.error("Error while describing replication configs: %s", str(ex)) raise ex
Return list of serverless replication configs. .. seealso:: - :external+boto3:py:meth:`DatabaseMigrationService.Client.describe_replication_configs` :param filters: List of filter objects :return: List of replication tasks
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/dms.py
227
[ "self", "filters" ]
list[dict]
true
2
7.28
apache/airflow
43,597
sphinx
false
usesAccessQueue
boolean usesAccessQueue() { return expiresAfterAccess() || evictsBySize(); }
Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
java
android/guava/src/com/google/common/cache/LocalCache.java
344
[]
true
2
6.64
google/guava
51,352
javadoc
false