language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_R.py | {
"start": 10199,
"end": 11322
} | class ____(Benchmark):
r"""
Rotated Ellipse 1 objective function.
This class defines the Rotated Ellipse 1 [1]_ global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\text{RotatedEllipse01}}(x) = 7x_1^2 - 6 \sqrt{3} x_1x_2 + 13x_2^2
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.0] * self.N,
[500.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (7.0 * x[0] ** 2.0 - 6.0 * sqrt(3) * x[0] * x[1]
+ 13 * x[1] ** 2.0)
| RotatedEllipse01 |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/base.py | {
"start": 874,
"end": 33042
} | class ____(BasePydanticVectorStore):
"""
A LlamaIndex vector store implementation for Apache Solr.
This vector store provides integration with Apache Solr, supporting
both dense vector similarity search (KNN) and sparse text search (BM25).
Key Features:
* Dense vector embeddings with KNN similarity search
* Sparse text search with BM25 scoring and field boosting
* Metadata filtering with various operators
* Async/sync operations
* Automatic query escaping and field preprocessing
Field Mapping: the vector store maps LlamaIndex node attributes
to Solr fields:
* ``nodeid_field``: Maps to ``node.id_`` (required)
* ``content_field``: Maps to ``node.get_content()`` (optional)
* ``embedding_field``: Maps to ``node.get_embedding()`` (optional)
* ``docid_field``: Maps to ``node.ref_doc_id`` (optional)
* ``metadata fields``: Mapped via ``metadata_to_solr_field_mapping``
Query Modes:
* ``DEFAULT``: Dense vector KNN search using embeddings
* ``TEXT_SEARCH``: Sparse BM25 text search with field boosting
"""
# Core client properties
sync_client: SkipValidation[Any] = Field(
...,
exclude=True,
description="Synchronous Solr client instance for blocking operations.",
)
async_client: SkipValidation[Any] = Field(
...,
exclude=True,
description="Asynchronous Solr client instance for non-blocking operations.",
)
# Essential field mappings
nodeid_field: str = Field(
...,
description=(
"Solr field name that uniquely identifies a node (required). Must be unique across all documents and maps to the LlamaIndex `node.id_`."
),
)
docid_field: Optional[str] = Field(
default=None,
description=(
"Solr field name for the document ID (optional). Maps to `node.ref_doc_id` and is required for document-level operations like deletion."
),
)
content_field: Optional[str] = Field(
default=None,
description=(
"Solr field name for storing the node's text content (optional). Maps to `node.get_content()`; required for BM25 / text search."
),
)
embedding_field: Optional[str] = Field(
default=None,
description=(
"Solr field name for storing embedding vectors (optional). Maps to `node.get_embedding()`; required for vector similarity (KNN) search."
),
)
metadata_to_solr_field_mapping: Optional[list[tuple[str, str]]] = Field(
default=None,
description=(
"Mapping from node metadata keys to Solr field names (optional). Each tuple is (metadata_key, solr_field). Enables structured metadata filtering."
),
)
# Configuration options
text_search_fields: Optional[Annotated[Sequence[BoostedTextField], MinLen(1)]] = (
Field(
default=None,
description=(
"Fields used for BM25 text search with optional boosting. Sequence of BoostedTextField; required for TEXT_SEARCH mode."
),
)
)
output_fields: Annotated[Sequence[str], MinLen(1)] = Field(
default=["*", "score"],
description=(
"Default fields to return in query results. Include 'score' automatically for relevance; use '*' for all stored fields or list specific ones."
),
)
# Serialization configuration
model_config: ClassVar[ConfigDict] = ConfigDict(
arbitrary_types_allowed=True, frozen=True
)
# Required for LlamaIndex API compatibility
stores_text: bool = True
stores_node: bool = True
flat_metadata: bool = False
@field_validator("output_fields")
@classmethod
def _validate_output_fields(cls, value: Sequence[str]) -> list[str]:
"""
Ensure 'score' field is always included in output_fields during initialization.
Args:
value (Sequence[str]): The original output fields
Returns:
Modified output fields with 'score' always included
"""
result = list(value)
if "score" not in result:
result.append("score")
return result
@field_validator("text_search_fields", mode="before")
def _validate_text_search_fields(
cls, v: Optional[list[Union[str, BoostedTextField]]]
) -> Optional[list[BoostedTextField]]:
"""Validate and convert text search fields to BoostedTextField instances."""
if v is None:
return None
def to_boosted(item: Union[str, BoostedTextField]) -> BoostedTextField:
if isinstance(item, str):
return BoostedTextField(field=item)
return item
return [to_boosted(item) for item in v]
@property
def client(self) -> Any:
"""Return synchronous Solr client."""
return self.sync_client
@property
def aclient(self) -> Any:
"""Return asynchronous Solr client."""
return self.async_client
def _build_dense_query(
self, query: VectorStoreQuery, solr_query: SolrQueryDict
) -> SolrQueryDict:
"""
Build a dense vector KNN query for Solr.
Args:
query: The vector store query containing embedding and parameters
solr_query: The base Solr query dictionary to build upon
Returns:
Updated Solr query dictionary with dense vector search parameters
Raises:
ValueError: If no embedding field is specified in either query or vector store
"""
if query.embedding_field is not None:
embedding_field = query.embedding_field
logger.debug("Using embedding field from query: %s", embedding_field)
elif self.embedding_field is not None:
embedding_field = self.embedding_field
logger.debug("Using embedding field from vector store: %s", embedding_field)
else:
raise ValueError(
"No embedding field name specified in query or vector store. "
"Either set 'embedding_field' on the VectorStoreQuery or configure "
"'embedding_field' when initializing ApacheSolrVectorStore"
)
if query.query_embedding is None:
logger.warning(
"`query.query_embedding` is None, retrieval results will not be meaningful."
)
solr_query["q"] = (
f"{{!knn f={embedding_field} topK={query.similarity_top_k}}}{query.query_embedding}"
)
rows_value = None or query.similarity_top_k
solr_query["rows"] = str(rows_value)
return solr_query
def _build_bm25_query(
self, query: VectorStoreQuery, solr_query: SolrQueryDict
) -> SolrQueryDict:
"""
Build a BM25 text search query for Solr.
Args:
query: The vector store query containing the query string and parameters
solr_query: The base Solr query dictionary to build upon
Returns:
Updated Solr query dictionary with BM25 search parameters
Raises:
ValueError: If no text search fields are available or query string is None
"""
if query.query_str is None:
raise ValueError("Query string cannot be None for BM25 search")
# Use text_search_fields from the vector store
if self.text_search_fields is None:
raise ValueError(
"text_search_fields must be specified in the vector store config for BM25 search"
)
user_query = escape_query_characters(
query.query_str, translation_table=ESCAPE_RULES_NESTED_LUCENE_DISMAX
)
# Join the search fields with spaces for the Solr qf parameter
search_fields_str = " ".join(
[
text_search_field.get_query_str()
for text_search_field in self.text_search_fields
]
)
solr_query["q"] = (
f"{{!dismax deftype=lucene, qf='{search_fields_str}' v='{user_query}'}}"
)
# Use rows from query if provided, otherwise fall back to similarity_top_k
rows_value = None or query.sparse_top_k
solr_query["rows"] = str(rows_value)
return solr_query
def _to_solr_query(self, query: VectorStoreQuery) -> SolrQueryDict:
"""Generate a KNN Solr query."""
solr_query: SolrQueryDict = {"q": "*:*", "fq": []}
if (
query.mode == VectorStoreQueryMode.DEFAULT
and query.query_embedding is not None
):
solr_query = self._build_dense_query(query, solr_query)
elif query.mode == VectorStoreQueryMode.TEXT_SEARCH:
solr_query = self._build_bm25_query(query, solr_query)
if query.doc_ids is not None:
if self.docid_field is None:
raise ValueError(
"`docid_field` must be passed during initialization to filter on docid"
)
solr_query["fq"].append(
f"{self.docid_field}:({' OR '.join(query.doc_ids)})"
)
if query.node_ids is not None and len(query.node_ids) > 0:
solr_query["fq"].append(
f"{self.nodeid_field}:({' OR '.join(query.node_ids)})"
)
if query.output_fields is not None:
# Use output fields from query, ensuring score is always included
output_fields = self._validate_output_fields(query.output_fields)
solr_query["fl"] = ",".join(output_fields)
logger.info("Using output fields from query: %s", output_fields)
else:
# Use default output fields from vector store, ensuring score is always included
solr_query["fl"] = ",".join(self.output_fields)
logger.info(
"Using default output fields from vector store: %s", self.output_fields
)
if query.filters:
filter_queries = recursively_unpack_filters(query.filters)
solr_query["fq"].extend(filter_queries)
logger.debug(
"Converted input query into Solr query dictionary, input=%s, output=%s",
query,
solr_query,
)
return solr_query
def _process_query_results(
self, results: list[dict[str, Any]]
) -> VectorStoreQueryResult:
"""
Convert Solr search results to LlamaIndex VectorStoreQueryResult format.
This method transforms raw Solr documents into LlamaIndex TextNode objects
and packages them with similarity scores and metadata into a structured
query result. It handles field mapping, metadata extraction.
Args:
results: List of Solr document dictionaries from search response.
Each dictionary contains field values as returned by Solr.
Returns:
A :py:class:`VectorStoreQueryResult` containing:
* ``nodes``: List of :py:class:`TextNode` objects with content and metadata
* ``ids``: List of node IDs corresponding to each node
* ``similarities``: List of similarity scores (if available)
Raises:
ValueError: If the number of similarity scores doesn't match the
number of nodes (partial scoring is not supported).
Note:
* Metadata fields are automatically identified by excluding known
system fields (``nodeid_field``, ``content_field``, etc.)
* The 'score' field from Solr is extracted as similarity scores
* Missing optional fields (``content``, ``embedding``) are handled gracefully
"""
ids, nodes, similarities = [], [], []
for result in results:
metadata_fields = result.keys() - {
self.nodeid_field,
self.content_field,
self.embedding_field,
self.docid_field,
"score",
}
ids.append(result[self.nodeid_field])
node = TextNode(
id_=result[self.nodeid_field],
# input must be a string, if missing use empty string
text=result[self.content_field] if self.content_field else "",
embedding=(
result[self.embedding_field] if self.embedding_field else None
),
metadata={f: result[f] for f in metadata_fields},
)
nodes.append(node)
if "score" in result:
similarities.append(result["score"])
if len(similarities) == 0:
return VectorStoreQueryResult(nodes=nodes, ids=ids)
elif 0 < len(similarities) < len(nodes):
raise ValueError(
"The number of similarities (scores) does not match the number of nodes"
)
else:
return VectorStoreQueryResult(
nodes=nodes, ids=ids, similarities=similarities
)
def _validate_query_mode(self, query: VectorStoreQuery) -> None:
"""
Validate that the query mode is supported by this vector store.
This method ensures that the requested query mode is compatible with
the current Solr vector store implementation.
Supported Modes:
* ``DEFAULT``: Dense vector similarity search using KNN with embeddings
* ``TEXT_SEARCH``: Sparse text search using BM25 with field boosting
Args:
query:
The vector store query containing the mode to validate. The mode is
checked against supported :py:class:`VectorStoreQueryMode` values.
Raises:
ValueError: If the query mode is not supported. Unsupported modes
include any future modes not yet implemented in the Solr backend.
Note:
This validation occurs before query execution to provide clear
error messages for unsupported operations. Future versions may
support additional query modes like hybrid search.
"""
if (
query.mode == VectorStoreQueryMode.DEFAULT
or query.mode == VectorStoreQueryMode.TEXT_SEARCH
):
return
else:
raise ValueError(
f"ApacheSolrVectorStore does not support {query.mode} yet."
)
def query(
self, query: VectorStoreQuery, **search_kwargs: Any
) -> VectorStoreQueryResult:
"""
Execute a synchronous search query against the Solr vector store.
This method supports both dense vector similarity search (KNN) and sparse
text search (BM25) depending on the query mode and parameters. It handles
query validation, Solr query construction, execution, and result processing.
Query Types:
* Dense Vector Search: Uses ``query_embedding`` for KNN similarity search
* Text Search: Uses ``query_str`` for BM25 text search with field boosting
* Filtered Search: Combines vector/text search with metadata filters
Supported Filter Operations:
* ``EQ``, ``NE``: Equality and inequality comparisons
* ``GT``, ``GTE``, ``LT``, ``LTE``: Numeric range comparisons
* ``IN``, ``NIN``: List membership tests
* ``TEXT_MATCH``: Exact text matching
Unsupported Filter Operations:
* ``ANY``, ``ALL``: Complex logical operations
* ``TEXT_MATCH_INSENSITIVE``: Case-insensitive text matching
* ``CONTAINS``: Substring matching
Args:
query:
The vector store query containing search parameters:
* ``query_embedding``: Dense vector for similarity search (DEFAULT mode)
* ``query_str``: Text string for BM25 search (TEXT_SEARCH mode)
* ``mode``: ``VectorStoreQueryMode`` (DEFAULT or TEXT_SEARCH)
* ``similarity_top_k``: Number of results for vector search
* ``sparse_top_k``: Number of results for text search
* ``filters``: Optional metadata filters for constraining results
* ``doc_ids``: Optional list of document IDs to filter by
* ``node_ids``: Optional list of node IDs to filter by
* ``output_fields``: Optional list of fields to return
**search_kwargs: Extra keyword arguments (ignored for compatibility)
Returns:
VectorStoreQueryResult containing:
* nodes: List of TextNode objects with content and metadata
* ids: List of corresponding node IDs
* similarities: List of similarity scores (when available)
Raises:
ValueError: If the query mode is unsupported, or if required fields
are missing (e.g., ``embedding_field`` for vector search, ``docid_field``
for document filtering)
Note:
This method performs synchronous I/O operations. For better performance
in async contexts, use the :py:meth:`aquery` method instead.
"""
del search_kwargs # unused
self._validate_query_mode(query)
solr_query = self._to_solr_query(query)
results = self.sync_client.search(solr_query)
return self._process_query_results(results.response.docs)
async def aquery(
self, query: VectorStoreQuery, **search_kwargs: Any
) -> VectorStoreQueryResult:
"""
Execute an asynchronous search query against the Solr vector store.
This method supports both dense vector similarity search (KNN) and sparse
text search (BM25) depending on the query mode and parameters. It handles
query validation, Solr query construction, execution, and result processing.
Query Types:
* Dense Vector Search: Uses ``query_embedding`` for KNN similarity search
* Text Search: Uses ``query_str`` for BM25 text search with field boosting
* Filtered Search: Combines vector/text search with metadata filters
Supported Filter Operations:
* ``EQ``, ``NE``: Equality and inequality comparisons
* ``GT``, ``GTE``, ``LT``, ``LTE``: Numeric range comparisons
* ``IN``, ``NIN``: List membership tests
* ``TEXT_MATCH``: Exact text matching
Unsupported Filter Operations:
* ``ANY``, ``ALL``: Complex logical operations
* ``TEXT_MATCH_INSENSITIVE``: Case-insensitive text matching
* ``CONTAINS``: Substring matching
Args:
query:
The vector store query containing search parameters:
* ``query_embedding``: Dense vector for similarity search (DEFAULT mode)
* ``query_str``: Text string for BM25 search (TEXT_SEARCH mode)
* ``mode``: ``VectorStoreQueryMode`` (DEFAULT or TEXT_SEARCH)
* ``similarity_top_k``: Number of results for vector search
* ``sparse_top_k``: Number of results for text search
* ``filters``: Optional metadata filters for constraining results
* ``doc_ids``: Optional list of document IDs to filter by
* ``node_ids``: Optional list of node IDs to filter by
* ``output_fields``: Optional list of fields to return
**search_kwargs: Extra keyword arguments (ignored for compatibility)
Returns:
VectorStoreQueryResult containing:
* nodes: List of TextNode objects with content and metadata
* ids: List of corresponding node IDs
* similarities: List of similarity scores (when available)
Raises:
ValueError: If the query mode is unsupported, or if required fields
are missing (e.g., ``embedding_field`` for vector search, ``docid_field``
for document filtering)
"""
del search_kwargs # unused
self._validate_query_mode(query)
solr_query = self._to_solr_query(query)
results = await self.async_client.search(solr_query)
return self._process_query_results(results.response.docs)
def _get_data_from_node(self, node: BaseNode) -> dict[str, Any]:
"""
Transform a LlamaIndex node into a Solr document dictionary.
This method maps LlamaIndex node attributes to Solr fields based on the
vector store configuration. It handles content extraction, embedding
mapping, metadata processing.
Args:
node: LlamaIndex BaseNode containing content, metadata,
to be stored in Solr.
Returns:
Dictionary representing a Solr document with mapped fields:
- id: Always maps to node.node_id (required)
- content_field: Maps to node.get_content() (if configured)
- embedding_field: Maps to node.get_embedding() (if configured)
- docid_field: Maps to node.ref_doc_id (if configured)
- metadata fields: Mapped via metadata_to_solr_field_mapping
Field Mapping Process:
1. Always includes node ID as 'id' field
2. Extracts content if content_field is configured
3. Extracts embedding if embedding_field is configured
4. Includes document ID if docid_field is configured
5. Maps metadata using configured field mappings with preprocessing
Note:
This is an internal method used by add() and async_add() operations.
The returned dictionary must be compatible with the Solr schema.
"""
data: dict[str, Any] = {self.nodeid_field: node.node_id}
if self.content_field is not None:
data[self.content_field] = node.get_content()
if self.embedding_field is not None:
data[self.embedding_field] = node.get_embedding()
if self.docid_field is not None:
data[self.docid_field] = node.ref_doc_id
if self.metadata_to_solr_field_mapping is not None:
for metadata_key, solr_key in self.metadata_to_solr_field_mapping:
if metadata_key in node.metadata:
data[solr_key] = node.metadata[metadata_key]
return data
def _get_data_from_nodes(
self, nodes: Sequence[BaseNode]
) -> tuple[list[str], list[dict[str, Any]]]:
# helper to avoid double iteration, it gets expensive at large batch sizes
logger.debug("Extracting data from %d nodes", len(nodes))
data: list[dict[str, Any]] = []
node_ids: list[str] = []
for node in nodes:
node_ids.append(node.id_)
data.append(self._get_data_from_node(node))
return node_ids, data
def add(self, nodes: Sequence[BaseNode], **add_kwargs: Any) -> list[str]:
"""
Synchronously add nodes (documents) to a Solr collection.
Mapping from Solr fields to :py:class:`llama_index.core.schema.BaseNode` attributes
should be as follows:
* ``nodeid_field`` -> ``node_id``
* ``content_field`` -> ``content``
* ``embedding_field`` -> ``embedding``
* ``docid_field`` -> ``ref_doc_id``
All other fields corresponding to the Solr collection should be packed as a single
``dict`` in the ``metadata`` field.
Args:
nodes: The nodes (documents) to be added to the Solr collection.
**add_kwargs:
Extra keyword arguments.
Returns:
A list of node IDs for each node added to the store.
"""
del add_kwargs # unused
if not nodes:
raise ValueError("Call to 'add' with no contents")
start = time.perf_counter()
node_ids, data = self._get_data_from_nodes(nodes)
self.sync_client.add(data)
logger.info(
"Added %d documents to Solr in %0.2f seconds",
len(data),
time.perf_counter() - start,
)
return node_ids
async def async_add(
self,
nodes: Sequence[BaseNode],
**add_kwargs: Any,
) -> list[str]:
"""
Asynchronously add nodes (documents) to a Solr collection.
Mapping from Solr fields to :py:class:`llama_index.core.schema.BaseNode` attributes
should be as follows:
* ``nodeid_field`` -> ``node_id``
* ``content_field`` -> ``content``
* ``embedding_field`` -> ``embedding``
* ``docid_field`` -> ``ref_doc_id``
All other fields corresponding to the Solr collection should be packed as a single
``dict`` in the ``metadata`` field.
Args:
nodes: The nodes (documents) to be added to the Solr collection.
**add_kwargs:
Extra keyword arguments.
Returns:
A list of node IDs for each node added to the store.
Raises:
ValueError: If called with an empty list of nodes.
"""
del add_kwargs # unused
if not nodes:
raise ValueError("Call to 'async_add' with no contents")
start = time.perf_counter()
node_ids, data = self._get_data_from_nodes(nodes)
await self.async_client.add(data)
logger.info(
"Added %d documents to Solr in %0.2f seconds",
len(data),
time.perf_counter() - start,
)
return node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Synchronously delete a node from the collection using its reference document ID.
Args:
ref_doc_id: The reference document ID of the node to be deleted.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
Raises:
ValueError:
If a ``docid_field`` was not passed to this vector store at
initialization.
"""
del delete_kwargs # unused
logger.debug("Deleting documents from Solr using query: %s", ref_doc_id)
self.sync_client.delete_by_id([ref_doc_id])
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Asynchronously delete a node from the collection using its reference document ID.
Args:
ref_doc_id: The reference document ID of the node to be deleted.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
Raises:
ValueError:
If a ``docid_field`` was not passed to this vector store at
initialization.
"""
del delete_kwargs # unused
logger.debug("Deleting documents from Solr using query: %s", ref_doc_id)
await self.async_client.delete_by_id([ref_doc_id])
def _build_delete_nodes_query(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> str:
if not node_ids and not filters:
raise ValueError(
"At least one of `node_ids` or `filters` must be passed to `delete_nodes`"
)
queries: list[str] = []
if node_ids:
queries.append(f"{self.nodeid_field}:({' OR '.join(node_ids)})")
if filters is not None:
queries.extend(recursively_unpack_filters(filters))
if not queries:
raise ValueError(
"Neither `node_ids` nor non-empty `filters` were passed to `delete_nodes`"
)
elif len(queries) == 1:
return queries[0]
return f"({' AND '.join(q for q in queries if q)})"
def delete_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Synchronously delete nodes from vector store based on node ids.
Args:
node_ids: The node IDs to delete.
filters: The filters to be applied to the node when deleting.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
"""
del delete_kwargs # unused
has_filters = filters is not None and len(filters.filters) > 0
# we can efficiently delete by ID if no filters are specified
if node_ids and not has_filters:
logger.debug("Deleting %d nodes from Solr by ID", len(node_ids))
self.sync_client.delete_by_id(node_ids)
# otherwise, build a query to delete by IDs+filters
else:
query_string = self._build_delete_nodes_query(node_ids, filters)
logger.debug(
"Deleting nodes from Solr using query: %s", query_string
) # pragma: no cover
self.sync_client.delete_by_query(query_string)
async def adelete_nodes(
self,
node_ids: Optional[list[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Asynchronously delete nodes from vector store based on node ids.
Args:
node_ids: The node IDs to delete.
filters: The filters to be applied to the node when deleting.
**delete_kwargs:
Extra keyword arguments, ignored by this implementation. These are added
solely for interface compatibility.
"""
del delete_kwargs # unused
has_filters = filters is not None and len(filters.filters) > 0
# we can efficiently delete by ID if no filters are specified
if node_ids and not has_filters:
logger.debug("Deleting %d nodes from Solr by ID", len(node_ids))
await self.async_client.delete_by_id(node_ids)
# otherwise, build a query to delete by IDs+filters
else:
query_string = self._build_delete_nodes_query(node_ids, filters)
logger.debug("Deleting nodes from Solr using query: %s", query_string)
await self.async_client.delete_by_query(query_string)
def clear(self) -> None:
"""
Delete all documents from the Solr collection synchronously.
This action is not reversible!
"""
self.sync_client.clear_collection()
async def aclear(self) -> None:
"""
Delete all documents from the Solr collection asynchronously.
This action is not reversible!
"""
await self.async_client.clear_collection()
def close(self) -> None:
"""Close the Solr client synchronously."""
self.sync_client.close()
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# No running loop: create a temporary loop and close cleanly
asyncio.run(self.async_client.close())
else:
# Running loop: schedule async close (not awaited)
loop.create_task(self.async_client.close()) # noqa: RUF006
async def aclose(self) -> None:
"""Explicit aclose for callers running inside an event loop."""
self.sync_client.close()
await self.async_client.close()
def __del__(self) -> None:
"""
Clean up the client for shutdown.
This action is not reversible, and should only be called one time.
"""
try:
self.close()
except RuntimeError as exc:
logger.debug(
"No running event loop, nothing to close, type=%s err='%s'",
type(exc),
exc,
)
except Exception as exc:
logger.warning(
"Failed to close the async Solr client, type=%s err='%s'",
type(exc),
exc,
)
| ApacheSolrVectorStore |
python | Textualize__textual | docs/examples/guide/reactivity/world_clock03.py | {
"start": 711,
"end": 1377
} | class ____(App):
CSS_PATH = "world_clock01.tcss"
time: reactive[datetime] = reactive(datetime.now)
def compose(self) -> ComposeResult:
yield WorldClock("Europe/London").data_bind(
clock_time=WorldClockApp.time # (1)!
)
yield WorldClock("Europe/Paris").data_bind(clock_time=WorldClockApp.time)
yield WorldClock("Asia/Tokyo").data_bind(clock_time=WorldClockApp.time)
def update_time(self) -> None:
self.time = datetime.now()
def on_mount(self) -> None:
self.update_time()
self.set_interval(1, self.update_time)
if __name__ == "__main__":
WorldClockApp().run()
| WorldClockApp |
python | pytorch__pytorch | test/distributed/test_c10d_common.py | {
"start": 10247,
"end": 10782
} | class ____(nn.Module):
def __init__(self, hin, win, n_features):
super().__init__()
self.hin = hin
self.win = win
self.weight = nn.Parameter(
torch.ones((n_features, n_features, hin, win // 2 + 1), dtype=torch.cfloat)
)
def forward(self, x):
xc = torch.fft.rfft2(x, s=(self.hin, self.win), dim=(-2, -1), norm="ortho")
xcw = torch.einsum("nchw,cohw->nohw", xc, self.weight)
x = torch.fft.irfft2(xcw, dim=(-2, -1), norm="ortho")
return x
| FFTModel |
python | spack__spack | lib/spack/spack/variant.py | {
"start": 17705,
"end": 24359
} | class ____(collections.abc.Sequence):
"""Allows combinations from one of many mutually exclusive sets.
The value ``('none',)`` is reserved to denote the empty set
and therefore no other set can contain the item ``'none'``.
Args:
*sets (list): mutually exclusive sets of values
"""
_empty_set = {"none"}
def __init__(self, *sets: Tuple[str, ...]) -> None:
self.sets = [set(_flatten(x)) for x in sets]
# 'none' is a special value and can appear only in a set of
# a single element
if any("none" in s and s != {"none"} for s in self.sets):
raise spack.error.SpecError(
"The value 'none' represents the empty set,"
" and must appear alone in a set. Use the "
"method 'allow_empty_set' to add it."
)
# Sets should not intersect with each other
if any(s1 & s2 for s1, s2 in itertools.combinations(self.sets, 2)):
raise spack.error.SpecError("sets in input must be disjoint")
#: Attribute used to track values which correspond to
#: features which can be enabled or disabled as understood by the
#: package's build system.
self.feature_values = tuple(itertools.chain.from_iterable(self.sets))
self.default = None
self.multi = True
self.error_fmt = (
"this variant accepts combinations of values from "
"exactly one of the following sets '{values}' "
"@*r{{[{package}, variant '{variant}']}}"
)
def with_default(self, default):
"""Sets the default value and returns self."""
self.default = default
return self
def with_error(self, error_fmt):
"""Sets the error message format and returns self."""
self.error_fmt = error_fmt
return self
def with_non_feature_values(self, *values):
"""Marks a few values as not being tied to a feature."""
self.feature_values = tuple(x for x in self.feature_values if x not in values)
return self
def allow_empty_set(self):
"""Adds the empty set to the current list of disjoint sets."""
if self._empty_set in self.sets:
return self
# Create a new object to be returned
object_with_empty_set = type(self)(("none",), *self.sets)
object_with_empty_set.error_fmt = self.error_fmt
object_with_empty_set.feature_values = self.feature_values + ("none",)
return object_with_empty_set
def prohibit_empty_set(self):
"""Removes the empty set from the current list of disjoint sets."""
if self._empty_set not in self.sets:
return self
# Create a new object to be returned
sets = [s for s in self.sets if s != self._empty_set]
object_without_empty_set = type(self)(*sets)
object_without_empty_set.error_fmt = self.error_fmt
object_without_empty_set.feature_values = tuple(
x for x in self.feature_values if x != "none"
)
return object_without_empty_set
def __getitem__(self, idx):
return tuple(itertools.chain.from_iterable(self.sets))[idx]
def __len__(self):
return len(itertools.chain.from_iterable(self.sets))
@property
def validator(self):
def _disjoint_set_validator(pkg_name, variant_name, values):
# If for any of the sets, all the values are in it return True
if any(all(x in s for x in values) for s in self.sets):
return
format_args = {"variant": variant_name, "package": pkg_name, "values": values}
msg = self.error_fmt + " @*r{{[{package}, variant '{variant}']}}"
msg = spack.llnl.util.tty.color.colorize(msg.format(**format_args))
raise spack.error.SpecError(msg)
return _disjoint_set_validator
def _a_single_value_or_a_combination(single_value: str, *values: str) -> DisjointSetsOfValues:
error = f"the value '{single_value}' is mutually exclusive with any of the other values"
return (
DisjointSetsOfValues((single_value,), values)
.with_default(single_value)
.with_error(error)
.with_non_feature_values(single_value)
)
# TODO: The factories below are used by package writers to set values of
# TODO: multi-valued variants. It could be worthwhile to gather them in
# TODO: a common namespace (like 'multi') in the future.
def any_combination_of(*values: str) -> DisjointSetsOfValues:
"""Multi-valued variant that allows either any combination of the specified values, or none
at all (using ``variant=none``). The literal value ``none`` is used as sentinel for the empty
set, since in the spec DSL we have to always specify a value for a variant.
It is up to the package implementation to handle the value ``none`` specially, if at all.
See also :func:`auto_or_any_combination_of` and :func:`disjoint_sets`.
Args:
*values: allowed variant values
Example::
variant("cuda_arch", values=any_combination_of("10", "11"))
Returns:
a properly initialized instance of :class:`~spack.variant.DisjointSetsOfValues`
"""
return _a_single_value_or_a_combination("none", *values)
def auto_or_any_combination_of(*values: str) -> DisjointSetsOfValues:
"""Multi-valued variant that allows any combination of a set of values (but not the empty set)
or ``auto``.
See also :func:`any_combination_of` and :func:`disjoint_sets`.
Args:
*values: allowed variant values
Example::
variant(
"file_systems",
values=auto_or_any_combination_of("lustre", "gpfs", "nfs", "ufs"),
)
Returns:
a properly initialized instance of :class:`~spack.variant.DisjointSetsOfValues`
"""
return _a_single_value_or_a_combination("auto", *values)
def disjoint_sets(*sets: Tuple[str, ...]) -> DisjointSetsOfValues:
"""Multi-valued variant that allows any combination picking from one of multiple disjoint sets
of values, and also allows the user to specify ``none`` to choose none of them.
It is up to the package implementation to handle the value ``none`` specially, if at all.
See also :func:`any_combination_of` and :func:`auto_or_any_combination_of`.
Args:
*sets: sets of allowed values, each set is a tuple of strings
Returns:
a properly initialized instance of :class:`~spack.variant.DisjointSetsOfValues`
"""
return DisjointSetsOfValues(*sets).allow_empty_set().with_default("none")
@functools.total_ordering
| DisjointSetsOfValues |
python | getsentry__sentry | src/sentry/digests/utils.py | {
"start": 1982,
"end": 6466
} | class ____(TypedDict):
counts: Counter[Group]
digest: Digest
group: Group
end: datetime | None
start: datetime | None
def get_digest_as_context(digest: Digest) -> _DigestContext:
start, end, counts = get_digest_metadata(digest)
group = next(iter(counts))
return {
"counts": counts,
"digest": digest,
"group": group,
"end": end,
"start": start,
}
def get_events_by_participant(
participants_by_provider_by_event: Mapping[
Event | GroupEvent, Mapping[ExternalProviders, set[Actor]]
],
) -> Mapping[Actor, set[Event | GroupEvent]]:
"""Invert a mapping of events to participants to a mapping of participants to events."""
output = defaultdict(set)
for event, participants_by_provider in participants_by_provider_by_event.items():
participants: set[Actor]
for participants in participants_by_provider.values():
for participant in participants:
output[participant].add(event)
return output
def get_personalized_digests(
digest: Digest,
participants_by_provider_by_event: Mapping[
Event | GroupEvent, Mapping[ExternalProviders, set[Actor]]
],
) -> Mapping[Actor, Digest]:
events_by_participant = get_events_by_participant(participants_by_provider_by_event)
actor_to_digest = {}
for participant, events in events_by_participant.items():
if participant is not None:
custom_digest = build_custom_digest(digest, events, participant)
if custom_digest:
actor_to_digest[participant] = custom_digest
return actor_to_digest
def get_event_from_groups_in_digest(digest: Digest) -> Iterable[Event | GroupEvent]:
"""Gets a random event from each group in the digest."""
return {
group_records[0].value.event
for rule_groups in digest.values()
for group_records in rule_groups.values()
}
def build_custom_digest(
original_digest: Digest, events: Iterable[Event | GroupEvent], participant: Actor
) -> Digest:
"""Given a digest and a set of events, filter the digest to only records that include the events."""
user_digest: Digest = {}
rule_snoozes = RuleSnooze.objects.filter(
Q(user_id=participant.id) | Q(user_id__isnull=True), rule__in=original_digest.keys()
).values_list("rule", flat=True)
snoozed_rule_ids = {rule for rule in rule_snoozes}
for rule, rule_groups in original_digest.items():
if rule.id in snoozed_rule_ids:
continue
user_rule_groups = {}
for group, group_records in rule_groups.items():
user_group_records = [
record for record in group_records if record.value.event in events
]
if user_group_records:
user_rule_groups[group] = user_group_records
if user_rule_groups:
user_digest[rule] = user_rule_groups
return user_digest
def get_participants_by_event(
digest: Digest,
project: Project,
target_type: ActionTargetType = ActionTargetType.ISSUE_OWNERS,
target_identifier: int | None = None,
fallthrough_choice: FallthroughChoiceType | None = None,
) -> Mapping[Event | GroupEvent, Mapping[ExternalProviders, set[Actor]]]:
"""
This is probably the slowest part in sending digests because we do a lot of
DB calls while we iterate over every event. It would be great if we could
combine some queries.
"""
return {
event: get_send_to(
project=project,
target_type=target_type,
target_identifier=target_identifier,
event=event,
fallthrough_choice=fallthrough_choice,
)
for event in get_event_from_groups_in_digest(digest)
}
def sort_records(records: Sequence[Record]) -> Sequence[Record]:
"""Sorts records ordered from newest to oldest."""
def sort_func(record: Record) -> datetime:
return record.value.event.datetime
return sorted(records, key=sort_func, reverse=True)
def get_groups(digest: Digest) -> Sequence[tuple[Rule, Group, Event | GroupEvent]]:
"""
Split a digest into groups and return it as a tuple of: the applicable
rule, the group, and the group's first event.
"""
return [
(rule, group, group_records[0].value.event)
for rule, rule_groups in digest.items()
for group, group_records in rule_groups.items()
]
| _DigestContext |
python | kamyu104__LeetCode-Solutions | Python/course-schedule.py | {
"start": 94,
"end": 950
} | class ____(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
adj = collections.defaultdict(list)
in_degree = collections.Counter()
for u, v in prerequisites:
in_degree[u] += 1
adj[v].append(u)
result = []
q = [u for u in xrange(numCourses) if u not in in_degree]
while q:
new_q = []
for u in q:
result.append(u)
for v in adj[u]:
in_degree[v] -= 1
if in_degree[v] == 0:
new_q.append(v)
q = new_q
return len(result) == numCourses
# Time: O(|V| + |E|)
# Space: O(|E|)
import collections
# dfs solution
| Solution |
python | pypa__setuptools | setuptools/command/build_clib.py | {
"start": 198,
"end": 4528
} | class ____(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
def build_libraries(self, libraries) -> None:
for lib_name, build_info in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
f"in 'libraries' option (library '{lib_name}'), "
"'sources' must be present and must be "
"a list of source filenames"
)
sources = sorted(list(sources))
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
f"in 'libraries' option (library '{lib_name}'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'"
)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
f"in 'libraries' option (library '{lib_name}'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'"
)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
f"in 'libraries' option (library '{lib_name}'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'"
)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp,
)
if newer_pairwise_group(dependencies, expected_objects) != ([], []):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug,
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects, lib_name, output_dir=self.build_clib, debug=self.debug
)
| build_clib |
python | geekcomputers__Python | area_of_square_app.py | {
"start": 693,
"end": 4189
} | class ____:
def __init__(self, side=None):
if side is None:
self.ask_side()
# else:
# self.side = float(side)
else:
if not isinstance(side, (int, float)):
try:
side = float(side)
except ValueError:
# return "Invalid input for side."
raise ValueError("Invalid input for side.")
else:
self.side = float(side)
# Check if the result is a float and remove unnecessary zeros
self.calculate_square()
self.truncate_decimals()
# If ask side or input directly into the square.
# That can be done?
def calculate_square(self):
self.area = self.side * self.side
return self.area
# Want to add a while loop asking for the input.
# Also have an option to ask the user in true mode or in repeat mode.
def ask_side(self):
# if true bool then while if int or float then for loop.
# I will have to learn inheritance and polymorphism.
condition = 3
# condition = True
if condition == True and isinstance(condition, bool):
while condition:
n = input("Enter the side of the square: ")
self.side = float(n)
elif isinstance(condition, (int, float)):
for i in range(_=condition):
n = input("Enter the side of the square: ")
self.side = float(n)
# n = input("Enter the side of the square: ")
# self.side = float(n)
# return
def truncate_decimals(self):
return (
f"{self.area:.10f}".rstrip("0").rstrip(".")
if "." in str(self.area)
else self.area
)
# Prettifying the output.
def calculate_perimeter(self):
return 4 * self.side
def calculate_perimeter_prettify(self):
return f"The perimeter of the square is {self.calculate_perimeter()}."
def calculate_area_prettify(self):
return f"The area of the square is {self.area}."
def truncate_decimals_prettify(self):
return f"The area of the square is {self.truncate_decimals()}."
if __name__ == "__main__":
output_one = Square()
truncated_area = output_one.truncate_decimals()
# print(output_one.truncate_decimals())
print(truncated_area)
# add a while loop to keep asking for the user input.
# also make sure to add a about menu to input a while loop in tkinter app.
# It can use a beautiful GUI also.
# Even validation is left.
# What if string is provided in number? Then?
# What if chars are provided. Then?
# What if a negative number is provided? Then?
# What if a number is provided in alphabets characters? Then?
# Can it a single method have more object in it?
# Also need to perform testing on it.
# EXTREME FORM OF TESTING NEED TO BE PERFORMED ON IT.
# Documentation is also needed.
# Comments are also needed.
# TYPE hints are also needed.
# README.md file is also needed.
## Which will explain the whole project.
### Like how to use the application.
### List down the features in explicit detail.
### How to use different methods and classes.
### It will also a image of the project in working state.
### It will also have a video to the project in working state.
# It should also have .exe and linux executable file.
# It should also be installable into Windows(x86) system and if possible into Linux system also.
| Square |
python | weaviate__weaviate-python-client | weaviate/proto/v1/v6300/v1/weaviate_pb2_grpc.py | {
"start": 3757,
"end": 8448
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def Search(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchObjects(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchReferences(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchDelete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TenantsGet(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Aggregate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WeaviateServicer_to_server(servicer, server):
rpc_method_handlers = {
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=v1_dot_search__get__pb2.SearchRequest.FromString,
response_serializer=v1_dot_search__get__pb2.SearchReply.SerializeToString,
),
'BatchObjects': grpc.unary_unary_rpc_method_handler(
servicer.BatchObjects,
request_deserializer=v1_dot_batch__pb2.BatchObjectsRequest.FromString,
response_serializer=v1_dot_batch__pb2.BatchObjectsReply.SerializeToString,
),
'BatchReferences': grpc.unary_unary_rpc_method_handler(
servicer.BatchReferences,
request_deserializer=v1_dot_batch__pb2.BatchReferencesRequest.FromString,
response_serializer=v1_dot_batch__pb2.BatchReferencesReply.SerializeToString,
),
'BatchDelete': grpc.unary_unary_rpc_method_handler(
servicer.BatchDelete,
request_deserializer=v1_dot_batch__delete__pb2.BatchDeleteRequest.FromString,
response_serializer=v1_dot_batch__delete__pb2.BatchDeleteReply.SerializeToString,
),
'TenantsGet': grpc.unary_unary_rpc_method_handler(
servicer.TenantsGet,
request_deserializer=v1_dot_tenants__pb2.TenantsGetRequest.FromString,
response_serializer=v1_dot_tenants__pb2.TenantsGetReply.SerializeToString,
),
'Aggregate': grpc.unary_unary_rpc_method_handler(
servicer.Aggregate,
request_deserializer=v1_dot_aggregate__pb2.AggregateRequest.FromString,
response_serializer=v1_dot_aggregate__pb2.AggregateReply.SerializeToString,
),
'BatchStream': grpc.stream_stream_rpc_method_handler(
servicer.BatchStream,
request_deserializer=v1_dot_batch__pb2.BatchStreamRequest.FromString,
response_serializer=v1_dot_batch__pb2.BatchStreamReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'weaviate.v1.Weaviate', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
server.add_registered_method_handlers('weaviate.v1.Weaviate', rpc_method_handlers)
# This class is part of an EXPERIMENTAL API.
| WeaviateServicer |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/transform_observation.py | {
"start": 3880,
"end": 8991
} | class ____(
TransformObservation[WrapperObsType, ActType, ObsType],
gym.utils.RecordConstructorArgs,
):
"""Filters a Dict or Tuple observation spaces by a set of keys or indexes.
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.FilterObservation`.
Example:
>>> import gymnasium as gym
>>> from gymnasium.wrappers import FilterObservation
>>> env = gym.make("CartPole-v1")
>>> env = gym.wrappers.TimeAwareObservation(env, flatten=False)
>>> env.observation_space
Dict('obs': Box([-4.8 -inf -0.41887903 -inf], [4.8 inf 0.41887903 inf], (4,), float32), 'time': Box(0, 500, (1,), int32))
>>> env.reset(seed=42)
({'obs': array([ 0.0273956 , -0.00611216, 0.03585979, 0.0197368 ], dtype=float32), 'time': array([0], dtype=int32)}, {})
>>> env = FilterObservation(env, filter_keys=['time'])
>>> env.reset(seed=42)
({'time': array([0], dtype=int32)}, {})
>>> env.step(0)
({'time': array([1], dtype=int32)}, 1.0, False, False, {})
Change logs:
* v0.12.3 - Initially added, originally called `FilterObservationWrapper`
* v1.0.0 - Rename to `FilterObservation` and add support for tuple observation spaces with integer ``filter_keys``
"""
def __init__(
self, env: gym.Env[ObsType, ActType], filter_keys: Sequence[str | int]
):
"""Constructor for the filter observation wrapper.
Args:
env: The environment to wrap
filter_keys: The set of subspaces to be *included*, use a list of strings for ``Dict`` and integers for ``Tuple`` spaces
"""
if not isinstance(filter_keys, Sequence):
raise TypeError(
f"Expects `filter_keys` to be a Sequence, actual type: {type(filter_keys)}"
)
gym.utils.RecordConstructorArgs.__init__(self, filter_keys=filter_keys)
# Filters for dictionary space
if isinstance(env.observation_space, spaces.Dict):
assert all(isinstance(key, str) for key in filter_keys)
if any(
key not in env.observation_space.spaces.keys() for key in filter_keys
):
missing_keys = [
key
for key in filter_keys
if key not in env.observation_space.spaces.keys()
]
raise ValueError(
"All the `filter_keys` must be included in the observation space.\n"
f"Filter keys: {filter_keys}\n"
f"Observation keys: {list(env.observation_space.spaces.keys())}\n"
f"Missing keys: {missing_keys}"
)
new_observation_space = spaces.Dict(
{key: env.observation_space[key] for key in filter_keys}
)
if len(new_observation_space) == 0:
raise ValueError(
"The observation space is empty due to filtering all of the keys."
)
TransformObservation.__init__(
self,
env=env,
func=lambda obs: {key: obs[key] for key in filter_keys},
observation_space=new_observation_space,
)
# Filter for tuple observation
elif isinstance(env.observation_space, spaces.Tuple):
assert all(isinstance(key, int) for key in filter_keys)
assert len(set(filter_keys)) == len(
filter_keys
), f"Duplicate keys exist, filter_keys: {filter_keys}"
if any(
0 < key and key >= len(env.observation_space) for key in filter_keys
):
missing_index = [
key
for key in filter_keys
if 0 < key and key >= len(env.observation_space)
]
raise ValueError(
"All the `filter_keys` must be included in the length of the observation space.\n"
f"Filter keys: {filter_keys}, length of observation: {len(env.observation_space)}, "
f"missing indexes: {missing_index}"
)
new_observation_spaces = spaces.Tuple(
env.observation_space[key] for key in filter_keys
)
if len(new_observation_spaces) == 0:
raise ValueError(
"The observation space is empty due to filtering all keys."
)
TransformObservation.__init__(
self,
env=env,
func=lambda obs: tuple(obs[key] for key in filter_keys),
observation_space=new_observation_spaces,
)
else:
raise ValueError(
f"FilterObservation wrapper is only usable with `Dict` and `Tuple` observations, actual type: {type(env.observation_space)}"
)
self.filter_keys: Final[Sequence[str | int]] = filter_keys
| FilterObservation |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 786,
"end": 927
} | class ____(GroupObjectPermissionBase):
content_object = models.ForeignKey("Project", on_delete=models.CASCADE)
| ProjectGroupObjectPermission |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_tableau_utils.py | {
"start": 282,
"end": 2743
} | class ____(TableauCloudWorkspace):
def fetch_tableau_workspace_data(self) -> TableauWorkspaceData:
"""Returns mock Tableau workspace data."""
# Create mock workbook
workbook = TableauContentData(
content_type=TableauContentType.WORKBOOK,
properties={
"luid": "test_workbook_id",
"name": "test_workbook",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z",
"uri": "sites/test/workbooks/test_workbook_id",
"projectName": "test_project",
"projectLuid": "test_project_id",
"sheets": [],
"dashboards": [],
},
)
# Create mock sheet
sheet = TableauContentData(
content_type=TableauContentType.SHEET,
properties={
"luid": "test_sheet_id",
"name": "sales",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z",
"path": "test_workbook/sales",
"workbook": {"luid": "test_workbook_id"},
"parentEmbeddedDatasources": [],
},
)
# Create mock dashboard
dashboard = TableauContentData(
content_type=TableauContentType.DASHBOARD,
properties={
"luid": "test_dashboard_id",
"name": "dashboard_sales",
"createdAt": "2024-01-01T00:00:00Z",
"updatedAt": "2024-01-01T00:00:00Z",
"path": "test_workbook/dashboard_sales",
"workbook": {"luid": "test_workbook_id"},
"sheets": [],
},
)
# Create mock data source
data_source = TableauContentData(
content_type=TableauContentType.DATA_SOURCE,
properties={
"luid": "test_datasource_id",
"name": "superstore_datasource",
"hasExtracts": False,
"isPublished": True,
},
)
return TableauWorkspaceData(
site_name=self.site_name,
workbooks_by_id={"test_workbook_id": workbook},
sheets_by_id={"test_sheet_id": sheet},
dashboards_by_id={"test_dashboard_id": dashboard},
data_sources_by_id={"test_datasource_id": data_source},
)
| MockTableauWorkspace |
python | openai__openai-python | src/openai/resources/containers/containers.py | {
"start": 18551,
"end": 19240
} | class ____:
def __init__(self, containers: AsyncContainers) -> None:
self._containers = containers
self.create = async_to_streamed_response_wrapper(
containers.create,
)
self.retrieve = async_to_streamed_response_wrapper(
containers.retrieve,
)
self.list = async_to_streamed_response_wrapper(
containers.list,
)
self.delete = async_to_streamed_response_wrapper(
containers.delete,
)
@cached_property
def files(self) -> AsyncFilesWithStreamingResponse:
return AsyncFilesWithStreamingResponse(self._containers.files)
| AsyncContainersWithStreamingResponse |
python | pytorch__pytorch | torch/fx/experimental/partitioner_utils.py | {
"start": 2255,
"end": 2426
} | class ____(NamedTuple):
# Latency due to the memory bandwidth
mem_latency_sec: float
# Latency due to the computation
computer_latency_sec: float
| NodeLatency |
python | python__mypy | mypyc/irbuild/nonlocalcontrol.py | {
"start": 661,
"end": 1525
} | class ____:
"""ABC representing a stack frame of constructs that modify nonlocal control flow.
The nonlocal control flow constructs are break, continue, and
return, and their behavior is modified by a number of other
constructs. The most obvious is loop, which override where break
and continue jump to, but also `except` (which needs to clear
exc_info when left) and (eventually) finally blocks (which need to
ensure that the finally block is always executed when leaving the
try/except blocks).
"""
@abstractmethod
def gen_break(self, builder: IRBuilder, line: int) -> None:
pass
@abstractmethod
def gen_continue(self, builder: IRBuilder, line: int) -> None:
pass
@abstractmethod
def gen_return(self, builder: IRBuilder, value: Value, line: int) -> None:
pass
| NonlocalControl |
python | encode__django-rest-framework | rest_framework/routers.py | {
"start": 1503,
"end": 2926
} | class ____:
def __init__(self):
self.registry = []
def register(self, prefix, viewset, basename=None):
if basename is None:
basename = self.get_default_basename(viewset)
if self.is_already_registered(basename):
msg = (f'Router with basename "{basename}" is already registered. '
f'Please provide a unique basename for viewset "{viewset}"')
raise ImproperlyConfigured(msg)
self.registry.append((prefix, viewset, basename))
# invalidate the urls cache
if hasattr(self, '_urls'):
del self._urls
def is_already_registered(self, new_basename):
"""
Check if `basename` is already registered
"""
return any(basename == new_basename for _prefix, _viewset, basename in self.registry)
def get_default_basename(self, viewset):
"""
If `basename` is not specified, attempt to automatically determine
it from the viewset.
"""
raise NotImplementedError('get_default_basename must be overridden')
def get_urls(self):
"""
Return a list of URL patterns, given the registered viewsets.
"""
raise NotImplementedError('get_urls must be overridden')
@property
def urls(self):
if not hasattr(self, '_urls'):
self._urls = self.get_urls()
return self._urls
| BaseRouter |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_micro_gemm.py | {
"start": 7187,
"end": 8548
} | class ____:
input_dtype: torch.dtype
input2_dtype: torch.dtype
output_dtype: torch.dtype
compute_dtype: torch.dtype
vec_isa_cls: type[VecISA]
register_blocking: GemmBlocking
extra_check: Optional[Callable[..., bool]] = None
micro_gemm_configs: dict[type[CppMicroGemm], list[CppMicroGemmConfig]] = {}
def register_micro_gemm(*configs):
def inner(cls):
assert cls not in micro_gemm_configs, (
f"Duplicate micro_gemm registration for {cls}"
)
assert len(configs) > 0, f"No micro_gemm configs provided for {cls}"
micro_gemm_configs[cls] = list(configs)
return cls
return inner
def generate_gemm_config(
vec_isa_cls,
register_blockings,
input_dtype=torch.float,
input2_dtype=None,
output_dtype=None,
compute_dtype=None,
extra_check=None,
):
if output_dtype is None:
output_dtype = input_dtype
if compute_dtype is None:
compute_dtype = output_dtype
if input2_dtype is None:
input2_dtype = input_dtype
return [
CppMicroGemmConfig(
input_dtype,
input2_dtype,
output_dtype,
compute_dtype,
vec_isa_cls,
GemmBlocking(*blocking),
extra_check,
)
for blocking in register_blockings
]
| CppMicroGemmConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 701495,
"end": 821480
} | class ____(sgqlc.types.Type):
"""The root query for implementing GraphQL mutations."""
__schema__ = github_schema
__field_names__ = (
"abort_queued_migrations",
"accept_enterprise_administrator_invitation",
"accept_topic_suggestion",
"add_assignees_to_assignable",
"add_comment",
"add_discussion_comment",
"add_discussion_poll_vote",
"add_enterprise_organization_member",
"add_enterprise_support_entitlement",
"add_labels_to_labelable",
"add_project_card",
"add_project_column",
"add_project_v2_draft_issue",
"add_project_v2_item_by_id",
"add_pull_request_review",
"add_pull_request_review_comment",
"add_pull_request_review_thread",
"add_reaction",
"add_star",
"add_upvote",
"add_verifiable_domain",
"approve_deployments",
"approve_verifiable_domain",
"archive_project_v2_item",
"archive_repository",
"cancel_enterprise_admin_invitation",
"cancel_sponsorship",
"change_user_status",
"clear_labels_from_labelable",
"clear_project_v2_item_field_value",
"clone_project",
"clone_template_repository",
"close_discussion",
"close_issue",
"close_pull_request",
"convert_project_card_note_to_issue",
"convert_pull_request_to_draft",
"copy_project_v2",
"create_attribution_invitation",
"create_branch_protection_rule",
"create_check_run",
"create_check_suite",
"create_commit_on_branch",
"create_discussion",
"create_enterprise_organization",
"create_environment",
"create_ip_allow_list_entry",
"create_issue",
"create_linked_branch",
"create_migration_source",
"create_project",
"create_project_v2",
"create_project_v2_field",
"create_pull_request",
"create_ref",
"create_repository",
"create_repository_ruleset",
"create_sponsors_listing",
"create_sponsors_tier",
"create_sponsorship",
"create_sponsorships",
"create_team_discussion",
"create_team_discussion_comment",
"decline_topic_suggestion",
"delete_branch_protection_rule",
"delete_deployment",
"delete_discussion",
"delete_discussion_comment",
"delete_environment",
"delete_ip_allow_list_entry",
"delete_issue",
"delete_issue_comment",
"delete_linked_branch",
"delete_project",
"delete_project_card",
"delete_project_column",
"delete_project_v2",
"delete_project_v2_field",
"delete_project_v2_item",
"delete_project_v2_workflow",
"delete_pull_request_review",
"delete_pull_request_review_comment",
"delete_ref",
"delete_repository_ruleset",
"delete_team_discussion",
"delete_team_discussion_comment",
"delete_verifiable_domain",
"dequeue_pull_request",
"disable_pull_request_auto_merge",
"dismiss_pull_request_review",
"dismiss_repository_vulnerability_alert",
"enable_pull_request_auto_merge",
"enqueue_pull_request",
"follow_organization",
"follow_user",
"grant_enterprise_organizations_migrator_role",
"grant_migrator_role",
"invite_enterprise_admin",
"link_project_v2_to_repository",
"link_project_v2_to_team",
"link_repository_to_project",
"lock_lockable",
"mark_discussion_comment_as_answer",
"mark_file_as_viewed",
"mark_project_v2_as_template",
"mark_pull_request_ready_for_review",
"merge_branch",
"merge_pull_request",
"minimize_comment",
"move_project_card",
"move_project_column",
"pin_issue",
"publish_sponsors_tier",
"regenerate_enterprise_identity_provider_recovery_codes",
"regenerate_verifiable_domain_token",
"reject_deployments",
"remove_assignees_from_assignable",
"remove_enterprise_admin",
"remove_enterprise_identity_provider",
"remove_enterprise_member",
"remove_enterprise_organization",
"remove_enterprise_support_entitlement",
"remove_labels_from_labelable",
"remove_outside_collaborator",
"remove_reaction",
"remove_star",
"remove_upvote",
"reopen_discussion",
"reopen_issue",
"reopen_pull_request",
"request_reviews",
"rerequest_check_suite",
"resolve_review_thread",
"retire_sponsors_tier",
"revert_pull_request",
"revoke_enterprise_organizations_migrator_role",
"revoke_migrator_role",
"set_enterprise_identity_provider",
"set_organization_interaction_limit",
"set_repository_interaction_limit",
"set_user_interaction_limit",
"start_organization_migration",
"start_repository_migration",
"submit_pull_request_review",
"transfer_enterprise_organization",
"transfer_issue",
"unarchive_project_v2_item",
"unarchive_repository",
"unfollow_organization",
"unfollow_user",
"unlink_project_v2_from_repository",
"unlink_project_v2_from_team",
"unlink_repository_from_project",
"unlock_lockable",
"unmark_discussion_comment_as_answer",
"unmark_file_as_viewed",
"unmark_issue_as_duplicate",
"unmark_project_v2_as_template",
"unminimize_comment",
"unpin_issue",
"unresolve_review_thread",
"update_branch_protection_rule",
"update_check_run",
"update_check_suite_preferences",
"update_discussion",
"update_discussion_comment",
"update_enterprise_administrator_role",
"update_enterprise_allow_private_repository_forking_setting",
"update_enterprise_default_repository_permission_setting",
"update_enterprise_members_can_change_repository_visibility_setting",
"update_enterprise_members_can_create_repositories_setting",
"update_enterprise_members_can_delete_issues_setting",
"update_enterprise_members_can_delete_repositories_setting",
"update_enterprise_members_can_invite_collaborators_setting",
"update_enterprise_members_can_make_purchases_setting",
"update_enterprise_members_can_update_protected_branches_setting",
"update_enterprise_members_can_view_dependency_insights_setting",
"update_enterprise_organization_projects_setting",
"update_enterprise_owner_organization_role",
"update_enterprise_profile",
"update_enterprise_repository_projects_setting",
"update_enterprise_team_discussions_setting",
"update_enterprise_two_factor_authentication_required_setting",
"update_environment",
"update_ip_allow_list_enabled_setting",
"update_ip_allow_list_entry",
"update_ip_allow_list_for_installed_apps_enabled_setting",
"update_issue",
"update_issue_comment",
"update_notification_restriction_setting",
"update_organization_allow_private_repository_forking_setting",
"update_organization_web_commit_signoff_setting",
"update_project",
"update_project_card",
"update_project_column",
"update_project_v2",
"update_project_v2_collaborators",
"update_project_v2_draft_issue",
"update_project_v2_item_field_value",
"update_project_v2_item_position",
"update_pull_request",
"update_pull_request_branch",
"update_pull_request_review",
"update_pull_request_review_comment",
"update_ref",
"update_repository",
"update_repository_ruleset",
"update_repository_web_commit_signoff_setting",
"update_sponsorship_preferences",
"update_subscription",
"update_team_discussion",
"update_team_discussion_comment",
"update_teams_repository",
"update_topics",
"verify_verifiable_domain",
)
abort_queued_migrations = sgqlc.types.Field(
AbortQueuedMigrationsPayload,
graphql_name="abortQueuedMigrations",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AbortQueuedMigrationsInput), graphql_name="input", default=None)),)
),
)
"""Clear all of a customer's queued migrations
Arguments:
* `input` (`AbortQueuedMigrationsInput!`): Parameters for
AbortQueuedMigrations
"""
accept_enterprise_administrator_invitation = sgqlc.types.Field(
AcceptEnterpriseAdministratorInvitationPayload,
graphql_name="acceptEnterpriseAdministratorInvitation",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(sgqlc.types.non_null(AcceptEnterpriseAdministratorInvitationInput), graphql_name="input", default=None),
),
)
),
)
"""Accepts a pending invitation for a user to become an administrator
of an enterprise.
Arguments:
* `input` (`AcceptEnterpriseAdministratorInvitationInput!`):
Parameters for AcceptEnterpriseAdministratorInvitation
"""
accept_topic_suggestion = sgqlc.types.Field(
AcceptTopicSuggestionPayload,
graphql_name="acceptTopicSuggestion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AcceptTopicSuggestionInput), graphql_name="input", default=None)),)
),
)
"""Applies a suggested topic to the repository.
Arguments:
* `input` (`AcceptTopicSuggestionInput!`): Parameters for
AcceptTopicSuggestion
"""
add_assignees_to_assignable = sgqlc.types.Field(
AddAssigneesToAssignablePayload,
graphql_name="addAssigneesToAssignable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddAssigneesToAssignableInput), graphql_name="input", default=None)),)
),
)
"""Adds assignees to an assignable object.
Arguments:
* `input` (`AddAssigneesToAssignableInput!`): Parameters for
AddAssigneesToAssignable
"""
add_comment = sgqlc.types.Field(
AddCommentPayload,
graphql_name="addComment",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(AddCommentInput), graphql_name="input", default=None)),)),
)
"""Adds a comment to an Issue or Pull Request.
Arguments:
* `input` (`AddCommentInput!`): Parameters for AddComment
"""
add_discussion_comment = sgqlc.types.Field(
AddDiscussionCommentPayload,
graphql_name="addDiscussionComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddDiscussionCommentInput), graphql_name="input", default=None)),)
),
)
"""Adds a comment to a Discussion, possibly as a reply to another
comment.
Arguments:
* `input` (`AddDiscussionCommentInput!`): Parameters for
AddDiscussionComment
"""
add_discussion_poll_vote = sgqlc.types.Field(
AddDiscussionPollVotePayload,
graphql_name="addDiscussionPollVote",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddDiscussionPollVoteInput), graphql_name="input", default=None)),)
),
)
"""Vote for an option in a discussion poll.
Arguments:
* `input` (`AddDiscussionPollVoteInput!`): Parameters for
AddDiscussionPollVote
"""
add_enterprise_organization_member = sgqlc.types.Field(
AddEnterpriseOrganizationMemberPayload,
graphql_name="addEnterpriseOrganizationMember",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddEnterpriseOrganizationMemberInput), graphql_name="input", default=None)),)
),
)
"""Adds enterprise members to an organization within the enterprise.
Arguments:
* `input` (`AddEnterpriseOrganizationMemberInput!`): Parameters
for AddEnterpriseOrganizationMember
"""
add_enterprise_support_entitlement = sgqlc.types.Field(
AddEnterpriseSupportEntitlementPayload,
graphql_name="addEnterpriseSupportEntitlement",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddEnterpriseSupportEntitlementInput), graphql_name="input", default=None)),)
),
)
"""Adds a support entitlement to an enterprise member.
Arguments:
* `input` (`AddEnterpriseSupportEntitlementInput!`): Parameters
for AddEnterpriseSupportEntitlement
"""
add_labels_to_labelable = sgqlc.types.Field(
AddLabelsToLabelablePayload,
graphql_name="addLabelsToLabelable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddLabelsToLabelableInput), graphql_name="input", default=None)),)
),
)
"""Adds labels to a labelable object.
Arguments:
* `input` (`AddLabelsToLabelableInput!`): Parameters for
AddLabelsToLabelable
"""
add_project_card = sgqlc.types.Field(
AddProjectCardPayload,
graphql_name="addProjectCard",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddProjectCardInput), graphql_name="input", default=None)),)
),
)
"""Adds a card to a ProjectColumn. Either `contentId` or `note` must
be provided but **not** both.
Arguments:
* `input` (`AddProjectCardInput!`): Parameters for AddProjectCard
"""
add_project_column = sgqlc.types.Field(
AddProjectColumnPayload,
graphql_name="addProjectColumn",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddProjectColumnInput), graphql_name="input", default=None)),)
),
)
"""Adds a column to a Project.
Arguments:
* `input` (`AddProjectColumnInput!`): Parameters for
AddProjectColumn
"""
add_project_v2_draft_issue = sgqlc.types.Field(
AddProjectV2DraftIssuePayload,
graphql_name="addProjectV2DraftIssue",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddProjectV2DraftIssueInput), graphql_name="input", default=None)),)
),
)
"""Creates a new draft issue and add it to a Project.
Arguments:
* `input` (`AddProjectV2DraftIssueInput!`): Parameters for
AddProjectV2DraftIssue
"""
add_project_v2_item_by_id = sgqlc.types.Field(
AddProjectV2ItemByIdPayload,
graphql_name="addProjectV2ItemById",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddProjectV2ItemByIdInput), graphql_name="input", default=None)),)
),
)
"""Links an existing content instance to a Project.
Arguments:
* `input` (`AddProjectV2ItemByIdInput!`): Parameters for
AddProjectV2ItemById
"""
add_pull_request_review = sgqlc.types.Field(
AddPullRequestReviewPayload,
graphql_name="addPullRequestReview",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddPullRequestReviewInput), graphql_name="input", default=None)),)
),
)
"""Adds a review to a Pull Request.
Arguments:
* `input` (`AddPullRequestReviewInput!`): Parameters for
AddPullRequestReview
"""
add_pull_request_review_comment = sgqlc.types.Field(
AddPullRequestReviewCommentPayload,
graphql_name="addPullRequestReviewComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddPullRequestReviewCommentInput), graphql_name="input", default=None)),)
),
)
"""Adds a comment to a review.
Arguments:
* `input` (`AddPullRequestReviewCommentInput!`): Parameters for
AddPullRequestReviewComment
"""
add_pull_request_review_thread = sgqlc.types.Field(
AddPullRequestReviewThreadPayload,
graphql_name="addPullRequestReviewThread",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddPullRequestReviewThreadInput), graphql_name="input", default=None)),)
),
)
"""Adds a new thread to a pending Pull Request Review.
Arguments:
* `input` (`AddPullRequestReviewThreadInput!`): Parameters for
AddPullRequestReviewThread
"""
add_reaction = sgqlc.types.Field(
AddReactionPayload,
graphql_name="addReaction",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(AddReactionInput), graphql_name="input", default=None)),)),
)
"""Adds a reaction to a subject.
Arguments:
* `input` (`AddReactionInput!`): Parameters for AddReaction
"""
add_star = sgqlc.types.Field(
AddStarPayload,
graphql_name="addStar",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(AddStarInput), graphql_name="input", default=None)),)),
)
"""Adds a star to a Starrable.
Arguments:
* `input` (`AddStarInput!`): Parameters for AddStar
"""
add_upvote = sgqlc.types.Field(
AddUpvotePayload,
graphql_name="addUpvote",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(AddUpvoteInput), graphql_name="input", default=None)),)),
)
"""Add an upvote to a discussion or discussion comment.
Arguments:
* `input` (`AddUpvoteInput!`): Parameters for AddUpvote
"""
add_verifiable_domain = sgqlc.types.Field(
AddVerifiableDomainPayload,
graphql_name="addVerifiableDomain",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(AddVerifiableDomainInput), graphql_name="input", default=None)),)
),
)
"""Adds a verifiable domain to an owning account.
Arguments:
* `input` (`AddVerifiableDomainInput!`): Parameters for
AddVerifiableDomain
"""
approve_deployments = sgqlc.types.Field(
ApproveDeploymentsPayload,
graphql_name="approveDeployments",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ApproveDeploymentsInput), graphql_name="input", default=None)),)
),
)
"""Approve all pending deployments under one or more environments
Arguments:
* `input` (`ApproveDeploymentsInput!`): Parameters for
ApproveDeployments
"""
approve_verifiable_domain = sgqlc.types.Field(
ApproveVerifiableDomainPayload,
graphql_name="approveVerifiableDomain",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ApproveVerifiableDomainInput), graphql_name="input", default=None)),)
),
)
"""Approve a verifiable domain for notification delivery.
Arguments:
* `input` (`ApproveVerifiableDomainInput!`): Parameters for
ApproveVerifiableDomain
"""
archive_project_v2_item = sgqlc.types.Field(
ArchiveProjectV2ItemPayload,
graphql_name="archiveProjectV2Item",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ArchiveProjectV2ItemInput), graphql_name="input", default=None)),)
),
)
"""Archives a ProjectV2Item
Arguments:
* `input` (`ArchiveProjectV2ItemInput!`): Parameters for
ArchiveProjectV2Item
"""
archive_repository = sgqlc.types.Field(
ArchiveRepositoryPayload,
graphql_name="archiveRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ArchiveRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Marks a repository as archived.
Arguments:
* `input` (`ArchiveRepositoryInput!`): Parameters for
ArchiveRepository
"""
cancel_enterprise_admin_invitation = sgqlc.types.Field(
CancelEnterpriseAdminInvitationPayload,
graphql_name="cancelEnterpriseAdminInvitation",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CancelEnterpriseAdminInvitationInput), graphql_name="input", default=None)),)
),
)
"""Cancels a pending invitation for an administrator to join an
enterprise.
Arguments:
* `input` (`CancelEnterpriseAdminInvitationInput!`): Parameters
for CancelEnterpriseAdminInvitation
"""
cancel_sponsorship = sgqlc.types.Field(
CancelSponsorshipPayload,
graphql_name="cancelSponsorship",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CancelSponsorshipInput), graphql_name="input", default=None)),)
),
)
"""Cancel an active sponsorship.
Arguments:
* `input` (`CancelSponsorshipInput!`): Parameters for
CancelSponsorship
"""
change_user_status = sgqlc.types.Field(
ChangeUserStatusPayload,
graphql_name="changeUserStatus",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ChangeUserStatusInput), graphql_name="input", default=None)),)
),
)
"""Update your status on GitHub.
Arguments:
* `input` (`ChangeUserStatusInput!`): Parameters for
ChangeUserStatus
"""
clear_labels_from_labelable = sgqlc.types.Field(
ClearLabelsFromLabelablePayload,
graphql_name="clearLabelsFromLabelable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ClearLabelsFromLabelableInput), graphql_name="input", default=None)),)
),
)
"""Clears all labels from a labelable object.
Arguments:
* `input` (`ClearLabelsFromLabelableInput!`): Parameters for
ClearLabelsFromLabelable
"""
clear_project_v2_item_field_value = sgqlc.types.Field(
ClearProjectV2ItemFieldValuePayload,
graphql_name="clearProjectV2ItemFieldValue",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ClearProjectV2ItemFieldValueInput), graphql_name="input", default=None)),)
),
)
"""This mutation clears the value of a field for an item in a
Project. Currently only text, number, date, assignees, labels,
single-select, iteration and milestone fields are supported.
Arguments:
* `input` (`ClearProjectV2ItemFieldValueInput!`): Parameters for
ClearProjectV2ItemFieldValue
"""
clone_project = sgqlc.types.Field(
CloneProjectPayload,
graphql_name="cloneProject",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CloneProjectInput), graphql_name="input", default=None)),)
),
)
"""Creates a new project by cloning configuration from an existing
project.
Arguments:
* `input` (`CloneProjectInput!`): Parameters for CloneProject
"""
clone_template_repository = sgqlc.types.Field(
CloneTemplateRepositoryPayload,
graphql_name="cloneTemplateRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CloneTemplateRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Create a new repository with the same files and directory
structure as a template repository.
Arguments:
* `input` (`CloneTemplateRepositoryInput!`): Parameters for
CloneTemplateRepository
"""
close_discussion = sgqlc.types.Field(
CloseDiscussionPayload,
graphql_name="closeDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CloseDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Close a discussion.
Arguments:
* `input` (`CloseDiscussionInput!`): Parameters for
CloseDiscussion
"""
close_issue = sgqlc.types.Field(
CloseIssuePayload,
graphql_name="closeIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(CloseIssueInput), graphql_name="input", default=None)),)),
)
"""Close an issue.
Arguments:
* `input` (`CloseIssueInput!`): Parameters for CloseIssue
"""
close_pull_request = sgqlc.types.Field(
ClosePullRequestPayload,
graphql_name="closePullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ClosePullRequestInput), graphql_name="input", default=None)),)
),
)
"""Close a pull request.
Arguments:
* `input` (`ClosePullRequestInput!`): Parameters for
ClosePullRequest
"""
convert_project_card_note_to_issue = sgqlc.types.Field(
ConvertProjectCardNoteToIssuePayload,
graphql_name="convertProjectCardNoteToIssue",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ConvertProjectCardNoteToIssueInput), graphql_name="input", default=None)),)
),
)
"""Convert a project note card to one associated with a newly created
issue.
Arguments:
* `input` (`ConvertProjectCardNoteToIssueInput!`): Parameters for
ConvertProjectCardNoteToIssue
"""
convert_pull_request_to_draft = sgqlc.types.Field(
ConvertPullRequestToDraftPayload,
graphql_name="convertPullRequestToDraft",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ConvertPullRequestToDraftInput), graphql_name="input", default=None)),)
),
)
"""Converts a pull request to draft
Arguments:
* `input` (`ConvertPullRequestToDraftInput!`): Parameters for
ConvertPullRequestToDraft
"""
copy_project_v2 = sgqlc.types.Field(
CopyProjectV2Payload,
graphql_name="copyProjectV2",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CopyProjectV2Input), graphql_name="input", default=None)),)
),
)
"""Copy a project.
Arguments:
* `input` (`CopyProjectV2Input!`): Parameters for CopyProjectV2
"""
create_attribution_invitation = sgqlc.types.Field(
CreateAttributionInvitationPayload,
graphql_name="createAttributionInvitation",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateAttributionInvitationInput), graphql_name="input", default=None)),)
),
)
"""Invites a user to claim reattributable data
Arguments:
* `input` (`CreateAttributionInvitationInput!`): Parameters for
CreateAttributionInvitation
"""
create_branch_protection_rule = sgqlc.types.Field(
CreateBranchProtectionRulePayload,
graphql_name="createBranchProtectionRule",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateBranchProtectionRuleInput), graphql_name="input", default=None)),)
),
)
"""Create a new branch protection rule
Arguments:
* `input` (`CreateBranchProtectionRuleInput!`): Parameters for
CreateBranchProtectionRule
"""
create_check_run = sgqlc.types.Field(
CreateCheckRunPayload,
graphql_name="createCheckRun",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateCheckRunInput), graphql_name="input", default=None)),)
),
)
"""Create a check run.
Arguments:
* `input` (`CreateCheckRunInput!`): Parameters for CreateCheckRun
"""
create_check_suite = sgqlc.types.Field(
CreateCheckSuitePayload,
graphql_name="createCheckSuite",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateCheckSuiteInput), graphql_name="input", default=None)),)
),
)
"""Create a check suite
Arguments:
* `input` (`CreateCheckSuiteInput!`): Parameters for
CreateCheckSuite
"""
create_commit_on_branch = sgqlc.types.Field(
CreateCommitOnBranchPayload,
graphql_name="createCommitOnBranch",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateCommitOnBranchInput), graphql_name="input", default=None)),)
),
)
"""Appends a commit to the given branch as the authenticated user.
This mutation creates a commit whose parent is the HEAD of the
provided branch and also updates that branch to point to the new
commit. It can be thought of as similar to `git commit`. ###
Locating a Branch Commits are appended to a `branch` of type
`Ref`. This must refer to a git branch (i.e. the fully qualified
path must begin with `refs/heads/`, although including this prefix
is optional. Callers may specify the `branch` to commit to either
by its global node ID or by passing both of
`repositoryNameWithOwner` and `refName`. For more details see the
documentation for `CommittableBranch`. ### Describing Changes
`fileChanges` are specified as a `FilesChanges` object describing
`FileAdditions` and `FileDeletions`. Please see the documentation
for `FileChanges` for more information on how to use this argument
to describe any set of file changes. ### Authorship Similar to
the web commit interface, this mutation does not support
specifying the author or committer of the commit and will not add
support for this in the future. A commit created by a successful
execution of this mutation will be authored by the owner of the
credential which authenticates the API request. The committer
will be identical to that of commits authored using the web
interface. If you need full control over author and committer
information, please use the Git Database REST API instead. ###
Commit Signing Commits made using this mutation are automatically
signed by GitHub if supported and will be marked as verified in
the user interface.
Arguments:
* `input` (`CreateCommitOnBranchInput!`): Parameters for
CreateCommitOnBranch
"""
create_discussion = sgqlc.types.Field(
CreateDiscussionPayload,
graphql_name="createDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Create a discussion.
Arguments:
* `input` (`CreateDiscussionInput!`): Parameters for
CreateDiscussion
"""
create_enterprise_organization = sgqlc.types.Field(
CreateEnterpriseOrganizationPayload,
graphql_name="createEnterpriseOrganization",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateEnterpriseOrganizationInput), graphql_name="input", default=None)),)
),
)
"""Creates an organization as part of an enterprise account.
Arguments:
* `input` (`CreateEnterpriseOrganizationInput!`): Parameters for
CreateEnterpriseOrganization
"""
create_environment = sgqlc.types.Field(
CreateEnvironmentPayload,
graphql_name="createEnvironment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateEnvironmentInput), graphql_name="input", default=None)),)
),
)
"""Creates an environment or simply returns it if already exists.
Arguments:
* `input` (`CreateEnvironmentInput!`): Parameters for
CreateEnvironment
"""
create_ip_allow_list_entry = sgqlc.types.Field(
CreateIpAllowListEntryPayload,
graphql_name="createIpAllowListEntry",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateIpAllowListEntryInput), graphql_name="input", default=None)),)
),
)
"""Creates a new IP allow list entry.
Arguments:
* `input` (`CreateIpAllowListEntryInput!`): Parameters for
CreateIpAllowListEntry
"""
create_issue = sgqlc.types.Field(
CreateIssuePayload,
graphql_name="createIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateIssueInput), graphql_name="input", default=None)),)),
)
"""Creates a new issue.
Arguments:
* `input` (`CreateIssueInput!`): Parameters for CreateIssue
"""
create_linked_branch = sgqlc.types.Field(
CreateLinkedBranchPayload,
graphql_name="createLinkedBranch",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateLinkedBranchInput), graphql_name="input", default=None)),)
),
)
"""Create a branch linked to an issue.
Arguments:
* `input` (`CreateLinkedBranchInput!`): Parameters for
CreateLinkedBranch
"""
create_migration_source = sgqlc.types.Field(
CreateMigrationSourcePayload,
graphql_name="createMigrationSource",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateMigrationSourceInput), graphql_name="input", default=None)),)
),
)
"""Creates a GitHub Enterprise Importer (GEI) migration source.
Arguments:
* `input` (`CreateMigrationSourceInput!`): Parameters for
CreateMigrationSource
"""
create_project = sgqlc.types.Field(
CreateProjectPayload,
graphql_name="createProject",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateProjectInput), graphql_name="input", default=None)),)
),
)
"""Creates a new project.
Arguments:
* `input` (`CreateProjectInput!`): Parameters for CreateProject
"""
create_project_v2 = sgqlc.types.Field(
CreateProjectV2Payload,
graphql_name="createProjectV2",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateProjectV2Input), graphql_name="input", default=None)),)
),
)
"""Creates a new project.
Arguments:
* `input` (`CreateProjectV2Input!`): Parameters for
CreateProjectV2
"""
create_project_v2_field = sgqlc.types.Field(
CreateProjectV2FieldPayload,
graphql_name="createProjectV2Field",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateProjectV2FieldInput), graphql_name="input", default=None)),)
),
)
"""Create a new project field.
Arguments:
* `input` (`CreateProjectV2FieldInput!`): Parameters for
CreateProjectV2Field
"""
create_pull_request = sgqlc.types.Field(
CreatePullRequestPayload,
graphql_name="createPullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreatePullRequestInput), graphql_name="input", default=None)),)
),
)
"""Create a new pull request
Arguments:
* `input` (`CreatePullRequestInput!`): Parameters for
CreatePullRequest
"""
create_ref = sgqlc.types.Field(
CreateRefPayload,
graphql_name="createRef",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateRefInput), graphql_name="input", default=None)),)),
)
"""Create a new Git Ref.
Arguments:
* `input` (`CreateRefInput!`): Parameters for CreateRef
"""
create_repository = sgqlc.types.Field(
CreateRepositoryPayload,
graphql_name="createRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Create a new repository.
Arguments:
* `input` (`CreateRepositoryInput!`): Parameters for
CreateRepository
"""
create_repository_ruleset = sgqlc.types.Field(
CreateRepositoryRulesetPayload,
graphql_name="createRepositoryRuleset",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateRepositoryRulesetInput), graphql_name="input", default=None)),)
),
)
"""Create a repository ruleset
Arguments:
* `input` (`CreateRepositoryRulesetInput!`): Parameters for
CreateRepositoryRuleset
"""
create_sponsors_listing = sgqlc.types.Field(
CreateSponsorsListingPayload,
graphql_name="createSponsorsListing",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateSponsorsListingInput), graphql_name="input", default=None)),)
),
)
"""Create a GitHub Sponsors profile to allow others to sponsor you or
your organization.
Arguments:
* `input` (`CreateSponsorsListingInput!`): Parameters for
CreateSponsorsListing
"""
create_sponsors_tier = sgqlc.types.Field(
CreateSponsorsTierPayload,
graphql_name="createSponsorsTier",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateSponsorsTierInput), graphql_name="input", default=None)),)
),
)
"""Create a new payment tier for your GitHub Sponsors profile.
Arguments:
* `input` (`CreateSponsorsTierInput!`): Parameters for
CreateSponsorsTier
"""
create_sponsorship = sgqlc.types.Field(
CreateSponsorshipPayload,
graphql_name="createSponsorship",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateSponsorshipInput), graphql_name="input", default=None)),)
),
)
"""Start a new sponsorship of a maintainer in GitHub Sponsors, or
reactivate a past sponsorship.
Arguments:
* `input` (`CreateSponsorshipInput!`): Parameters for
CreateSponsorship
"""
create_sponsorships = sgqlc.types.Field(
CreateSponsorshipsPayload,
graphql_name="createSponsorships",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateSponsorshipsInput), graphql_name="input", default=None)),)
),
)
"""Make many one-time sponsorships for different sponsorable users or
organizations at once. Can only sponsor those who have a public
GitHub Sponsors profile.
Arguments:
* `input` (`CreateSponsorshipsInput!`): Parameters for
CreateSponsorships
"""
create_team_discussion = sgqlc.types.Field(
CreateTeamDiscussionPayload,
graphql_name="createTeamDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateTeamDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Creates a new team discussion.
Arguments:
* `input` (`CreateTeamDiscussionInput!`): Parameters for
CreateTeamDiscussion
"""
create_team_discussion_comment = sgqlc.types.Field(
CreateTeamDiscussionCommentPayload,
graphql_name="createTeamDiscussionComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(CreateTeamDiscussionCommentInput), graphql_name="input", default=None)),)
),
)
"""Creates a new team discussion comment.
Arguments:
* `input` (`CreateTeamDiscussionCommentInput!`): Parameters for
CreateTeamDiscussionComment
"""
decline_topic_suggestion = sgqlc.types.Field(
DeclineTopicSuggestionPayload,
graphql_name="declineTopicSuggestion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeclineTopicSuggestionInput), graphql_name="input", default=None)),)
),
)
"""Rejects a suggested topic for the repository.
Arguments:
* `input` (`DeclineTopicSuggestionInput!`): Parameters for
DeclineTopicSuggestion
"""
delete_branch_protection_rule = sgqlc.types.Field(
DeleteBranchProtectionRulePayload,
graphql_name="deleteBranchProtectionRule",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteBranchProtectionRuleInput), graphql_name="input", default=None)),)
),
)
"""Delete a branch protection rule
Arguments:
* `input` (`DeleteBranchProtectionRuleInput!`): Parameters for
DeleteBranchProtectionRule
"""
delete_deployment = sgqlc.types.Field(
DeleteDeploymentPayload,
graphql_name="deleteDeployment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteDeploymentInput), graphql_name="input", default=None)),)
),
)
"""Deletes a deployment.
Arguments:
* `input` (`DeleteDeploymentInput!`): Parameters for
DeleteDeployment
"""
delete_discussion = sgqlc.types.Field(
DeleteDiscussionPayload,
graphql_name="deleteDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Delete a discussion and all of its replies.
Arguments:
* `input` (`DeleteDiscussionInput!`): Parameters for
DeleteDiscussion
"""
delete_discussion_comment = sgqlc.types.Field(
DeleteDiscussionCommentPayload,
graphql_name="deleteDiscussionComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteDiscussionCommentInput), graphql_name="input", default=None)),)
),
)
"""Delete a discussion comment. If it has replies, wipe it instead.
Arguments:
* `input` (`DeleteDiscussionCommentInput!`): Parameters for
DeleteDiscussionComment
"""
delete_environment = sgqlc.types.Field(
DeleteEnvironmentPayload,
graphql_name="deleteEnvironment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteEnvironmentInput), graphql_name="input", default=None)),)
),
)
"""Deletes an environment
Arguments:
* `input` (`DeleteEnvironmentInput!`): Parameters for
DeleteEnvironment
"""
delete_ip_allow_list_entry = sgqlc.types.Field(
DeleteIpAllowListEntryPayload,
graphql_name="deleteIpAllowListEntry",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteIpAllowListEntryInput), graphql_name="input", default=None)),)
),
)
"""Deletes an IP allow list entry.
Arguments:
* `input` (`DeleteIpAllowListEntryInput!`): Parameters for
DeleteIpAllowListEntry
"""
delete_issue = sgqlc.types.Field(
DeleteIssuePayload,
graphql_name="deleteIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteIssueInput), graphql_name="input", default=None)),)),
)
"""Deletes an Issue object.
Arguments:
* `input` (`DeleteIssueInput!`): Parameters for DeleteIssue
"""
delete_issue_comment = sgqlc.types.Field(
DeleteIssueCommentPayload,
graphql_name="deleteIssueComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteIssueCommentInput), graphql_name="input", default=None)),)
),
)
"""Deletes an IssueComment object.
Arguments:
* `input` (`DeleteIssueCommentInput!`): Parameters for
DeleteIssueComment
"""
delete_linked_branch = sgqlc.types.Field(
DeleteLinkedBranchPayload,
graphql_name="deleteLinkedBranch",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteLinkedBranchInput), graphql_name="input", default=None)),)
),
)
"""Unlink a branch from an issue.
Arguments:
* `input` (`DeleteLinkedBranchInput!`): Parameters for
DeleteLinkedBranch
"""
delete_project = sgqlc.types.Field(
DeleteProjectPayload,
graphql_name="deleteProject",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectInput), graphql_name="input", default=None)),)
),
)
"""Deletes a project.
Arguments:
* `input` (`DeleteProjectInput!`): Parameters for DeleteProject
"""
delete_project_card = sgqlc.types.Field(
DeleteProjectCardPayload,
graphql_name="deleteProjectCard",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectCardInput), graphql_name="input", default=None)),)
),
)
"""Deletes a project card.
Arguments:
* `input` (`DeleteProjectCardInput!`): Parameters for
DeleteProjectCard
"""
delete_project_column = sgqlc.types.Field(
DeleteProjectColumnPayload,
graphql_name="deleteProjectColumn",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectColumnInput), graphql_name="input", default=None)),)
),
)
"""Deletes a project column.
Arguments:
* `input` (`DeleteProjectColumnInput!`): Parameters for
DeleteProjectColumn
"""
delete_project_v2 = sgqlc.types.Field(
DeleteProjectV2Payload,
graphql_name="deleteProjectV2",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectV2Input), graphql_name="input", default=None)),)
),
)
"""Delete a project.
Arguments:
* `input` (`DeleteProjectV2Input!`): Parameters for
DeleteProjectV2
"""
delete_project_v2_field = sgqlc.types.Field(
DeleteProjectV2FieldPayload,
graphql_name="deleteProjectV2Field",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectV2FieldInput), graphql_name="input", default=None)),)
),
)
"""Delete a project field.
Arguments:
* `input` (`DeleteProjectV2FieldInput!`): Parameters for
DeleteProjectV2Field
"""
delete_project_v2_item = sgqlc.types.Field(
DeleteProjectV2ItemPayload,
graphql_name="deleteProjectV2Item",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectV2ItemInput), graphql_name="input", default=None)),)
),
)
"""Deletes an item from a Project.
Arguments:
* `input` (`DeleteProjectV2ItemInput!`): Parameters for
DeleteProjectV2Item
"""
delete_project_v2_workflow = sgqlc.types.Field(
DeleteProjectV2WorkflowPayload,
graphql_name="deleteProjectV2Workflow",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteProjectV2WorkflowInput), graphql_name="input", default=None)),)
),
)
"""Deletes a project workflow.
Arguments:
* `input` (`DeleteProjectV2WorkflowInput!`): Parameters for
DeleteProjectV2Workflow
"""
delete_pull_request_review = sgqlc.types.Field(
DeletePullRequestReviewPayload,
graphql_name="deletePullRequestReview",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeletePullRequestReviewInput), graphql_name="input", default=None)),)
),
)
"""Deletes a pull request review.
Arguments:
* `input` (`DeletePullRequestReviewInput!`): Parameters for
DeletePullRequestReview
"""
delete_pull_request_review_comment = sgqlc.types.Field(
DeletePullRequestReviewCommentPayload,
graphql_name="deletePullRequestReviewComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeletePullRequestReviewCommentInput), graphql_name="input", default=None)),)
),
)
"""Deletes a pull request review comment.
Arguments:
* `input` (`DeletePullRequestReviewCommentInput!`): Parameters for
DeletePullRequestReviewComment
"""
delete_ref = sgqlc.types.Field(
DeleteRefPayload,
graphql_name="deleteRef",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteRefInput), graphql_name="input", default=None)),)),
)
"""Delete a Git Ref.
Arguments:
* `input` (`DeleteRefInput!`): Parameters for DeleteRef
"""
delete_repository_ruleset = sgqlc.types.Field(
DeleteRepositoryRulesetPayload,
graphql_name="deleteRepositoryRuleset",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteRepositoryRulesetInput), graphql_name="input", default=None)),)
),
)
"""Delete a repository ruleset
Arguments:
* `input` (`DeleteRepositoryRulesetInput!`): Parameters for
DeleteRepositoryRuleset
"""
delete_team_discussion = sgqlc.types.Field(
DeleteTeamDiscussionPayload,
graphql_name="deleteTeamDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteTeamDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Deletes a team discussion.
Arguments:
* `input` (`DeleteTeamDiscussionInput!`): Parameters for
DeleteTeamDiscussion
"""
delete_team_discussion_comment = sgqlc.types.Field(
DeleteTeamDiscussionCommentPayload,
graphql_name="deleteTeamDiscussionComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteTeamDiscussionCommentInput), graphql_name="input", default=None)),)
),
)
"""Deletes a team discussion comment.
Arguments:
* `input` (`DeleteTeamDiscussionCommentInput!`): Parameters for
DeleteTeamDiscussionComment
"""
delete_verifiable_domain = sgqlc.types.Field(
DeleteVerifiableDomainPayload,
graphql_name="deleteVerifiableDomain",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DeleteVerifiableDomainInput), graphql_name="input", default=None)),)
),
)
"""Deletes a verifiable domain.
Arguments:
* `input` (`DeleteVerifiableDomainInput!`): Parameters for
DeleteVerifiableDomain
"""
dequeue_pull_request = sgqlc.types.Field(
DequeuePullRequestPayload,
graphql_name="dequeuePullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DequeuePullRequestInput), graphql_name="input", default=None)),)
),
)
"""Remove a pull request from the merge queue.
Arguments:
* `input` (`DequeuePullRequestInput!`): Parameters for
DequeuePullRequest
"""
disable_pull_request_auto_merge = sgqlc.types.Field(
DisablePullRequestAutoMergePayload,
graphql_name="disablePullRequestAutoMerge",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DisablePullRequestAutoMergeInput), graphql_name="input", default=None)),)
),
)
"""Disable auto merge on the given pull request
Arguments:
* `input` (`DisablePullRequestAutoMergeInput!`): Parameters for
DisablePullRequestAutoMerge
"""
dismiss_pull_request_review = sgqlc.types.Field(
DismissPullRequestReviewPayload,
graphql_name="dismissPullRequestReview",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(DismissPullRequestReviewInput), graphql_name="input", default=None)),)
),
)
"""Dismisses an approved or rejected pull request review.
Arguments:
* `input` (`DismissPullRequestReviewInput!`): Parameters for
DismissPullRequestReview
"""
dismiss_repository_vulnerability_alert = sgqlc.types.Field(
DismissRepositoryVulnerabilityAlertPayload,
graphql_name="dismissRepositoryVulnerabilityAlert",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(sgqlc.types.non_null(DismissRepositoryVulnerabilityAlertInput), graphql_name="input", default=None),
),
)
),
)
"""Dismisses the Dependabot alert.
Arguments:
* `input` (`DismissRepositoryVulnerabilityAlertInput!`):
Parameters for DismissRepositoryVulnerabilityAlert
"""
enable_pull_request_auto_merge = sgqlc.types.Field(
EnablePullRequestAutoMergePayload,
graphql_name="enablePullRequestAutoMerge",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(EnablePullRequestAutoMergeInput), graphql_name="input", default=None)),)
),
)
"""Enable the default auto-merge on a pull request.
Arguments:
* `input` (`EnablePullRequestAutoMergeInput!`): Parameters for
EnablePullRequestAutoMerge
"""
enqueue_pull_request = sgqlc.types.Field(
EnqueuePullRequestPayload,
graphql_name="enqueuePullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(EnqueuePullRequestInput), graphql_name="input", default=None)),)
),
)
"""Add a pull request to the merge queue.
Arguments:
* `input` (`EnqueuePullRequestInput!`): Parameters for
EnqueuePullRequest
"""
follow_organization = sgqlc.types.Field(
FollowOrganizationPayload,
graphql_name="followOrganization",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(FollowOrganizationInput), graphql_name="input", default=None)),)
),
)
"""Follow an organization.
Arguments:
* `input` (`FollowOrganizationInput!`): Parameters for
FollowOrganization
"""
follow_user = sgqlc.types.Field(
FollowUserPayload,
graphql_name="followUser",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(FollowUserInput), graphql_name="input", default=None)),)),
)
"""Follow a user.
Arguments:
* `input` (`FollowUserInput!`): Parameters for FollowUser
"""
grant_enterprise_organizations_migrator_role = sgqlc.types.Field(
GrantEnterpriseOrganizationsMigratorRolePayload,
graphql_name="grantEnterpriseOrganizationsMigratorRole",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(GrantEnterpriseOrganizationsMigratorRoleInput), graphql_name="input", default=None
),
),
)
),
)
"""Grant the migrator role to a user for all organizations under an
enterprise account.
Arguments:
* `input` (`GrantEnterpriseOrganizationsMigratorRoleInput!`):
Parameters for GrantEnterpriseOrganizationsMigratorRole
"""
grant_migrator_role = sgqlc.types.Field(
GrantMigratorRolePayload,
graphql_name="grantMigratorRole",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(GrantMigratorRoleInput), graphql_name="input", default=None)),)
),
)
"""Grant the migrator role to a user or a team.
Arguments:
* `input` (`GrantMigratorRoleInput!`): Parameters for
GrantMigratorRole
"""
invite_enterprise_admin = sgqlc.types.Field(
InviteEnterpriseAdminPayload,
graphql_name="inviteEnterpriseAdmin",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(InviteEnterpriseAdminInput), graphql_name="input", default=None)),)
),
)
"""Invite someone to become an administrator of the enterprise.
Arguments:
* `input` (`InviteEnterpriseAdminInput!`): Parameters for
InviteEnterpriseAdmin
"""
link_project_v2_to_repository = sgqlc.types.Field(
LinkProjectV2ToRepositoryPayload,
graphql_name="linkProjectV2ToRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(LinkProjectV2ToRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Links a project to a repository.
Arguments:
* `input` (`LinkProjectV2ToRepositoryInput!`): Parameters for
LinkProjectV2ToRepository
"""
link_project_v2_to_team = sgqlc.types.Field(
LinkProjectV2ToTeamPayload,
graphql_name="linkProjectV2ToTeam",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(LinkProjectV2ToTeamInput), graphql_name="input", default=None)),)
),
)
"""Links a project to a team.
Arguments:
* `input` (`LinkProjectV2ToTeamInput!`): Parameters for
LinkProjectV2ToTeam
"""
link_repository_to_project = sgqlc.types.Field(
LinkRepositoryToProjectPayload,
graphql_name="linkRepositoryToProject",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(LinkRepositoryToProjectInput), graphql_name="input", default=None)),)
),
)
"""Creates a repository link for a project.
Arguments:
* `input` (`LinkRepositoryToProjectInput!`): Parameters for
LinkRepositoryToProject
"""
lock_lockable = sgqlc.types.Field(
LockLockablePayload,
graphql_name="lockLockable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(LockLockableInput), graphql_name="input", default=None)),)
),
)
"""Lock a lockable object
Arguments:
* `input` (`LockLockableInput!`): Parameters for LockLockable
"""
mark_discussion_comment_as_answer = sgqlc.types.Field(
MarkDiscussionCommentAsAnswerPayload,
graphql_name="markDiscussionCommentAsAnswer",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MarkDiscussionCommentAsAnswerInput), graphql_name="input", default=None)),)
),
)
"""Mark a discussion comment as the chosen answer for discussions in
an answerable category.
Arguments:
* `input` (`MarkDiscussionCommentAsAnswerInput!`): Parameters for
MarkDiscussionCommentAsAnswer
"""
mark_file_as_viewed = sgqlc.types.Field(
MarkFileAsViewedPayload,
graphql_name="markFileAsViewed",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MarkFileAsViewedInput), graphql_name="input", default=None)),)
),
)
"""Mark a pull request file as viewed
Arguments:
* `input` (`MarkFileAsViewedInput!`): Parameters for
MarkFileAsViewed
"""
mark_project_v2_as_template = sgqlc.types.Field(
MarkProjectV2AsTemplatePayload,
graphql_name="markProjectV2AsTemplate",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MarkProjectV2AsTemplateInput), graphql_name="input", default=None)),)
),
)
"""Mark a project as a template. Note that only projects which are
owned by an Organization can be marked as a template.
Arguments:
* `input` (`MarkProjectV2AsTemplateInput!`): Parameters for
MarkProjectV2AsTemplate
"""
mark_pull_request_ready_for_review = sgqlc.types.Field(
MarkPullRequestReadyForReviewPayload,
graphql_name="markPullRequestReadyForReview",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MarkPullRequestReadyForReviewInput), graphql_name="input", default=None)),)
),
)
"""Marks a pull request ready for review.
Arguments:
* `input` (`MarkPullRequestReadyForReviewInput!`): Parameters for
MarkPullRequestReadyForReview
"""
merge_branch = sgqlc.types.Field(
MergeBranchPayload,
graphql_name="mergeBranch",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(MergeBranchInput), graphql_name="input", default=None)),)),
)
"""Merge a head into a branch.
Arguments:
* `input` (`MergeBranchInput!`): Parameters for MergeBranch
"""
merge_pull_request = sgqlc.types.Field(
MergePullRequestPayload,
graphql_name="mergePullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MergePullRequestInput), graphql_name="input", default=None)),)
),
)
"""Merge a pull request.
Arguments:
* `input` (`MergePullRequestInput!`): Parameters for
MergePullRequest
"""
minimize_comment = sgqlc.types.Field(
MinimizeCommentPayload,
graphql_name="minimizeComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MinimizeCommentInput), graphql_name="input", default=None)),)
),
)
"""Minimizes a comment on an Issue, Commit, Pull Request, or Gist
Arguments:
* `input` (`MinimizeCommentInput!`): Parameters for
MinimizeComment
"""
move_project_card = sgqlc.types.Field(
MoveProjectCardPayload,
graphql_name="moveProjectCard",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MoveProjectCardInput), graphql_name="input", default=None)),)
),
)
"""Moves a project card to another place.
Arguments:
* `input` (`MoveProjectCardInput!`): Parameters for
MoveProjectCard
"""
move_project_column = sgqlc.types.Field(
MoveProjectColumnPayload,
graphql_name="moveProjectColumn",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(MoveProjectColumnInput), graphql_name="input", default=None)),)
),
)
"""Moves a project column to another place.
Arguments:
* `input` (`MoveProjectColumnInput!`): Parameters for
MoveProjectColumn
"""
pin_issue = sgqlc.types.Field(
"PinIssuePayload",
graphql_name="pinIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(PinIssueInput), graphql_name="input", default=None)),)),
)
"""Pin an issue to a repository
Arguments:
* `input` (`PinIssueInput!`): Parameters for PinIssue
"""
publish_sponsors_tier = sgqlc.types.Field(
"PublishSponsorsTierPayload",
graphql_name="publishSponsorsTier",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(PublishSponsorsTierInput), graphql_name="input", default=None)),)
),
)
"""Publish an existing sponsorship tier that is currently still a
draft to a GitHub Sponsors profile.
Arguments:
* `input` (`PublishSponsorsTierInput!`): Parameters for
PublishSponsorsTier
"""
regenerate_enterprise_identity_provider_recovery_codes = sgqlc.types.Field(
"RegenerateEnterpriseIdentityProviderRecoveryCodesPayload",
graphql_name="regenerateEnterpriseIdentityProviderRecoveryCodes",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(RegenerateEnterpriseIdentityProviderRecoveryCodesInput), graphql_name="input", default=None
),
),
)
),
)
"""Regenerates the identity provider recovery codes for an enterprise
Arguments:
* `input`
(`RegenerateEnterpriseIdentityProviderRecoveryCodesInput!`):
Parameters for RegenerateEnterpriseIdentityProviderRecoveryCodes
"""
regenerate_verifiable_domain_token = sgqlc.types.Field(
"RegenerateVerifiableDomainTokenPayload",
graphql_name="regenerateVerifiableDomainToken",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RegenerateVerifiableDomainTokenInput), graphql_name="input", default=None)),)
),
)
"""Regenerates a verifiable domain's verification token.
Arguments:
* `input` (`RegenerateVerifiableDomainTokenInput!`): Parameters
for RegenerateVerifiableDomainToken
"""
reject_deployments = sgqlc.types.Field(
"RejectDeploymentsPayload",
graphql_name="rejectDeployments",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RejectDeploymentsInput), graphql_name="input", default=None)),)
),
)
"""Reject all pending deployments under one or more environments
Arguments:
* `input` (`RejectDeploymentsInput!`): Parameters for
RejectDeployments
"""
remove_assignees_from_assignable = sgqlc.types.Field(
"RemoveAssigneesFromAssignablePayload",
graphql_name="removeAssigneesFromAssignable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveAssigneesFromAssignableInput), graphql_name="input", default=None)),)
),
)
"""Removes assignees from an assignable object.
Arguments:
* `input` (`RemoveAssigneesFromAssignableInput!`): Parameters for
RemoveAssigneesFromAssignable
"""
remove_enterprise_admin = sgqlc.types.Field(
"RemoveEnterpriseAdminPayload",
graphql_name="removeEnterpriseAdmin",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveEnterpriseAdminInput), graphql_name="input", default=None)),)
),
)
"""Removes an administrator from the enterprise.
Arguments:
* `input` (`RemoveEnterpriseAdminInput!`): Parameters for
RemoveEnterpriseAdmin
"""
remove_enterprise_identity_provider = sgqlc.types.Field(
"RemoveEnterpriseIdentityProviderPayload",
graphql_name="removeEnterpriseIdentityProvider",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveEnterpriseIdentityProviderInput), graphql_name="input", default=None)),)
),
)
"""Removes the identity provider from an enterprise
Arguments:
* `input` (`RemoveEnterpriseIdentityProviderInput!`): Parameters
for RemoveEnterpriseIdentityProvider
"""
remove_enterprise_member = sgqlc.types.Field(
"RemoveEnterpriseMemberPayload",
graphql_name="removeEnterpriseMember",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveEnterpriseMemberInput), graphql_name="input", default=None)),)
),
)
"""Removes a user from all organizations within the enterprise
Arguments:
* `input` (`RemoveEnterpriseMemberInput!`): Parameters for
RemoveEnterpriseMember
"""
remove_enterprise_organization = sgqlc.types.Field(
"RemoveEnterpriseOrganizationPayload",
graphql_name="removeEnterpriseOrganization",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveEnterpriseOrganizationInput), graphql_name="input", default=None)),)
),
)
"""Removes an organization from the enterprise
Arguments:
* `input` (`RemoveEnterpriseOrganizationInput!`): Parameters for
RemoveEnterpriseOrganization
"""
remove_enterprise_support_entitlement = sgqlc.types.Field(
"RemoveEnterpriseSupportEntitlementPayload",
graphql_name="removeEnterpriseSupportEntitlement",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveEnterpriseSupportEntitlementInput), graphql_name="input", default=None)),)
),
)
"""Removes a support entitlement from an enterprise member.
Arguments:
* `input` (`RemoveEnterpriseSupportEntitlementInput!`): Parameters
for RemoveEnterpriseSupportEntitlement
"""
remove_labels_from_labelable = sgqlc.types.Field(
"RemoveLabelsFromLabelablePayload",
graphql_name="removeLabelsFromLabelable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveLabelsFromLabelableInput), graphql_name="input", default=None)),)
),
)
"""Removes labels from a Labelable object.
Arguments:
* `input` (`RemoveLabelsFromLabelableInput!`): Parameters for
RemoveLabelsFromLabelable
"""
remove_outside_collaborator = sgqlc.types.Field(
"RemoveOutsideCollaboratorPayload",
graphql_name="removeOutsideCollaborator",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveOutsideCollaboratorInput), graphql_name="input", default=None)),)
),
)
"""Removes outside collaborator from all repositories in an
organization.
Arguments:
* `input` (`RemoveOutsideCollaboratorInput!`): Parameters for
RemoveOutsideCollaborator
"""
remove_reaction = sgqlc.types.Field(
"RemoveReactionPayload",
graphql_name="removeReaction",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveReactionInput), graphql_name="input", default=None)),)
),
)
"""Removes a reaction from a subject.
Arguments:
* `input` (`RemoveReactionInput!`): Parameters for RemoveReaction
"""
remove_star = sgqlc.types.Field(
"RemoveStarPayload",
graphql_name="removeStar",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveStarInput), graphql_name="input", default=None)),)),
)
"""Removes a star from a Starrable.
Arguments:
* `input` (`RemoveStarInput!`): Parameters for RemoveStar
"""
remove_upvote = sgqlc.types.Field(
"RemoveUpvotePayload",
graphql_name="removeUpvote",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RemoveUpvoteInput), graphql_name="input", default=None)),)
),
)
"""Remove an upvote to a discussion or discussion comment.
Arguments:
* `input` (`RemoveUpvoteInput!`): Parameters for RemoveUpvote
"""
reopen_discussion = sgqlc.types.Field(
"ReopenDiscussionPayload",
graphql_name="reopenDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ReopenDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Reopen a discussion.
Arguments:
* `input` (`ReopenDiscussionInput!`): Parameters for
ReopenDiscussion
"""
reopen_issue = sgqlc.types.Field(
"ReopenIssuePayload",
graphql_name="reopenIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(ReopenIssueInput), graphql_name="input", default=None)),)),
)
"""Reopen a issue.
Arguments:
* `input` (`ReopenIssueInput!`): Parameters for ReopenIssue
"""
reopen_pull_request = sgqlc.types.Field(
"ReopenPullRequestPayload",
graphql_name="reopenPullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ReopenPullRequestInput), graphql_name="input", default=None)),)
),
)
"""Reopen a pull request.
Arguments:
* `input` (`ReopenPullRequestInput!`): Parameters for
ReopenPullRequest
"""
request_reviews = sgqlc.types.Field(
"RequestReviewsPayload",
graphql_name="requestReviews",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RequestReviewsInput), graphql_name="input", default=None)),)
),
)
"""Set review requests on a pull request.
Arguments:
* `input` (`RequestReviewsInput!`): Parameters for RequestReviews
"""
rerequest_check_suite = sgqlc.types.Field(
"RerequestCheckSuitePayload",
graphql_name="rerequestCheckSuite",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RerequestCheckSuiteInput), graphql_name="input", default=None)),)
),
)
"""Rerequests an existing check suite.
Arguments:
* `input` (`RerequestCheckSuiteInput!`): Parameters for
RerequestCheckSuite
"""
resolve_review_thread = sgqlc.types.Field(
"ResolveReviewThreadPayload",
graphql_name="resolveReviewThread",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(ResolveReviewThreadInput), graphql_name="input", default=None)),)
),
)
"""Marks a review thread as resolved.
Arguments:
* `input` (`ResolveReviewThreadInput!`): Parameters for
ResolveReviewThread
"""
retire_sponsors_tier = sgqlc.types.Field(
"RetireSponsorsTierPayload",
graphql_name="retireSponsorsTier",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RetireSponsorsTierInput), graphql_name="input", default=None)),)
),
)
"""Retire a published payment tier from your GitHub Sponsors profile
so it cannot be used to start new sponsorships.
Arguments:
* `input` (`RetireSponsorsTierInput!`): Parameters for
RetireSponsorsTier
"""
revert_pull_request = sgqlc.types.Field(
"RevertPullRequestPayload",
graphql_name="revertPullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RevertPullRequestInput), graphql_name="input", default=None)),)
),
)
"""Create a pull request that reverts the changes from a merged pull
request.
Arguments:
* `input` (`RevertPullRequestInput!`): Parameters for
RevertPullRequest
"""
revoke_enterprise_organizations_migrator_role = sgqlc.types.Field(
"RevokeEnterpriseOrganizationsMigratorRolePayload",
graphql_name="revokeEnterpriseOrganizationsMigratorRole",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(RevokeEnterpriseOrganizationsMigratorRoleInput), graphql_name="input", default=None
),
),
)
),
)
"""Revoke the migrator role to a user for all organizations under an
enterprise account.
Arguments:
* `input` (`RevokeEnterpriseOrganizationsMigratorRoleInput!`):
Parameters for RevokeEnterpriseOrganizationsMigratorRole
"""
revoke_migrator_role = sgqlc.types.Field(
"RevokeMigratorRolePayload",
graphql_name="revokeMigratorRole",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(RevokeMigratorRoleInput), graphql_name="input", default=None)),)
),
)
"""Revoke the migrator role from a user or a team.
Arguments:
* `input` (`RevokeMigratorRoleInput!`): Parameters for
RevokeMigratorRole
"""
set_enterprise_identity_provider = sgqlc.types.Field(
"SetEnterpriseIdentityProviderPayload",
graphql_name="setEnterpriseIdentityProvider",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(SetEnterpriseIdentityProviderInput), graphql_name="input", default=None)),)
),
)
"""Creates or updates the identity provider for an enterprise.
Arguments:
* `input` (`SetEnterpriseIdentityProviderInput!`): Parameters for
SetEnterpriseIdentityProvider
"""
set_organization_interaction_limit = sgqlc.types.Field(
"SetOrganizationInteractionLimitPayload",
graphql_name="setOrganizationInteractionLimit",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(SetOrganizationInteractionLimitInput), graphql_name="input", default=None)),)
),
)
"""Set an organization level interaction limit for an organization's
public repositories.
Arguments:
* `input` (`SetOrganizationInteractionLimitInput!`): Parameters
for SetOrganizationInteractionLimit
"""
set_repository_interaction_limit = sgqlc.types.Field(
"SetRepositoryInteractionLimitPayload",
graphql_name="setRepositoryInteractionLimit",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(SetRepositoryInteractionLimitInput), graphql_name="input", default=None)),)
),
)
"""Sets an interaction limit setting for a repository.
Arguments:
* `input` (`SetRepositoryInteractionLimitInput!`): Parameters for
SetRepositoryInteractionLimit
"""
set_user_interaction_limit = sgqlc.types.Field(
"SetUserInteractionLimitPayload",
graphql_name="setUserInteractionLimit",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(SetUserInteractionLimitInput), graphql_name="input", default=None)),)
),
)
"""Set a user level interaction limit for an user's public
repositories.
Arguments:
* `input` (`SetUserInteractionLimitInput!`): Parameters for
SetUserInteractionLimit
"""
start_organization_migration = sgqlc.types.Field(
"StartOrganizationMigrationPayload",
graphql_name="startOrganizationMigration",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(StartOrganizationMigrationInput), graphql_name="input", default=None)),)
),
)
"""Starts a GitHub Enterprise Importer organization migration.
Arguments:
* `input` (`StartOrganizationMigrationInput!`): Parameters for
StartOrganizationMigration
"""
start_repository_migration = sgqlc.types.Field(
"StartRepositoryMigrationPayload",
graphql_name="startRepositoryMigration",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(StartRepositoryMigrationInput), graphql_name="input", default=None)),)
),
)
"""Starts a GitHub Enterprise Importer (GEI) repository migration.
Arguments:
* `input` (`StartRepositoryMigrationInput!`): Parameters for
StartRepositoryMigration
"""
submit_pull_request_review = sgqlc.types.Field(
"SubmitPullRequestReviewPayload",
graphql_name="submitPullRequestReview",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(SubmitPullRequestReviewInput), graphql_name="input", default=None)),)
),
)
"""Submits a pending pull request review.
Arguments:
* `input` (`SubmitPullRequestReviewInput!`): Parameters for
SubmitPullRequestReview
"""
transfer_enterprise_organization = sgqlc.types.Field(
"TransferEnterpriseOrganizationPayload",
graphql_name="transferEnterpriseOrganization",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(TransferEnterpriseOrganizationInput), graphql_name="input", default=None)),)
),
)
"""Transfer an organization from one enterprise to another
enterprise.
Arguments:
* `input` (`TransferEnterpriseOrganizationInput!`): Parameters for
TransferEnterpriseOrganization
"""
transfer_issue = sgqlc.types.Field(
"TransferIssuePayload",
graphql_name="transferIssue",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(TransferIssueInput), graphql_name="input", default=None)),)
),
)
"""Transfer an issue to a different repository
Arguments:
* `input` (`TransferIssueInput!`): Parameters for TransferIssue
"""
unarchive_project_v2_item = sgqlc.types.Field(
"UnarchiveProjectV2ItemPayload",
graphql_name="unarchiveProjectV2Item",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnarchiveProjectV2ItemInput), graphql_name="input", default=None)),)
),
)
"""Unarchives a ProjectV2Item
Arguments:
* `input` (`UnarchiveProjectV2ItemInput!`): Parameters for
UnarchiveProjectV2Item
"""
unarchive_repository = sgqlc.types.Field(
"UnarchiveRepositoryPayload",
graphql_name="unarchiveRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnarchiveRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Unarchives a repository.
Arguments:
* `input` (`UnarchiveRepositoryInput!`): Parameters for
UnarchiveRepository
"""
unfollow_organization = sgqlc.types.Field(
"UnfollowOrganizationPayload",
graphql_name="unfollowOrganization",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnfollowOrganizationInput), graphql_name="input", default=None)),)
),
)
"""Unfollow an organization.
Arguments:
* `input` (`UnfollowOrganizationInput!`): Parameters for
UnfollowOrganization
"""
unfollow_user = sgqlc.types.Field(
"UnfollowUserPayload",
graphql_name="unfollowUser",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnfollowUserInput), graphql_name="input", default=None)),)
),
)
"""Unfollow a user.
Arguments:
* `input` (`UnfollowUserInput!`): Parameters for UnfollowUser
"""
unlink_project_v2_from_repository = sgqlc.types.Field(
"UnlinkProjectV2FromRepositoryPayload",
graphql_name="unlinkProjectV2FromRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnlinkProjectV2FromRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Unlinks a project from a repository.
Arguments:
* `input` (`UnlinkProjectV2FromRepositoryInput!`): Parameters for
UnlinkProjectV2FromRepository
"""
unlink_project_v2_from_team = sgqlc.types.Field(
"UnlinkProjectV2FromTeamPayload",
graphql_name="unlinkProjectV2FromTeam",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnlinkProjectV2FromTeamInput), graphql_name="input", default=None)),)
),
)
"""Unlinks a project to a team.
Arguments:
* `input` (`UnlinkProjectV2FromTeamInput!`): Parameters for
UnlinkProjectV2FromTeam
"""
unlink_repository_from_project = sgqlc.types.Field(
"UnlinkRepositoryFromProjectPayload",
graphql_name="unlinkRepositoryFromProject",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnlinkRepositoryFromProjectInput), graphql_name="input", default=None)),)
),
)
"""Deletes a repository link from a project.
Arguments:
* `input` (`UnlinkRepositoryFromProjectInput!`): Parameters for
UnlinkRepositoryFromProject
"""
unlock_lockable = sgqlc.types.Field(
"UnlockLockablePayload",
graphql_name="unlockLockable",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnlockLockableInput), graphql_name="input", default=None)),)
),
)
"""Unlock a lockable object
Arguments:
* `input` (`UnlockLockableInput!`): Parameters for UnlockLockable
"""
unmark_discussion_comment_as_answer = sgqlc.types.Field(
"UnmarkDiscussionCommentAsAnswerPayload",
graphql_name="unmarkDiscussionCommentAsAnswer",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnmarkDiscussionCommentAsAnswerInput), graphql_name="input", default=None)),)
),
)
"""Unmark a discussion comment as the chosen answer for discussions
in an answerable category.
Arguments:
* `input` (`UnmarkDiscussionCommentAsAnswerInput!`): Parameters
for UnmarkDiscussionCommentAsAnswer
"""
unmark_file_as_viewed = sgqlc.types.Field(
"UnmarkFileAsViewedPayload",
graphql_name="unmarkFileAsViewed",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnmarkFileAsViewedInput), graphql_name="input", default=None)),)
),
)
"""Unmark a pull request file as viewed
Arguments:
* `input` (`UnmarkFileAsViewedInput!`): Parameters for
UnmarkFileAsViewed
"""
unmark_issue_as_duplicate = sgqlc.types.Field(
"UnmarkIssueAsDuplicatePayload",
graphql_name="unmarkIssueAsDuplicate",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnmarkIssueAsDuplicateInput), graphql_name="input", default=None)),)
),
)
"""Unmark an issue as a duplicate of another issue.
Arguments:
* `input` (`UnmarkIssueAsDuplicateInput!`): Parameters for
UnmarkIssueAsDuplicate
"""
unmark_project_v2_as_template = sgqlc.types.Field(
"UnmarkProjectV2AsTemplatePayload",
graphql_name="unmarkProjectV2AsTemplate",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnmarkProjectV2AsTemplateInput), graphql_name="input", default=None)),)
),
)
"""Unmark a project as a template.
Arguments:
* `input` (`UnmarkProjectV2AsTemplateInput!`): Parameters for
UnmarkProjectV2AsTemplate
"""
unminimize_comment = sgqlc.types.Field(
"UnminimizeCommentPayload",
graphql_name="unminimizeComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnminimizeCommentInput), graphql_name="input", default=None)),)
),
)
"""Unminimizes a comment on an Issue, Commit, Pull Request, or Gist
Arguments:
* `input` (`UnminimizeCommentInput!`): Parameters for
UnminimizeComment
"""
unpin_issue = sgqlc.types.Field(
"UnpinIssuePayload",
graphql_name="unpinIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(UnpinIssueInput), graphql_name="input", default=None)),)),
)
"""Unpin a pinned issue from a repository
Arguments:
* `input` (`UnpinIssueInput!`): Parameters for UnpinIssue
"""
unresolve_review_thread = sgqlc.types.Field(
"UnresolveReviewThreadPayload",
graphql_name="unresolveReviewThread",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UnresolveReviewThreadInput), graphql_name="input", default=None)),)
),
)
"""Marks a review thread as unresolved.
Arguments:
* `input` (`UnresolveReviewThreadInput!`): Parameters for
UnresolveReviewThread
"""
update_branch_protection_rule = sgqlc.types.Field(
"UpdateBranchProtectionRulePayload",
graphql_name="updateBranchProtectionRule",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateBranchProtectionRuleInput), graphql_name="input", default=None)),)
),
)
"""Update a branch protection rule
Arguments:
* `input` (`UpdateBranchProtectionRuleInput!`): Parameters for
UpdateBranchProtectionRule
"""
update_check_run = sgqlc.types.Field(
"UpdateCheckRunPayload",
graphql_name="updateCheckRun",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateCheckRunInput), graphql_name="input", default=None)),)
),
)
"""Update a check run
Arguments:
* `input` (`UpdateCheckRunInput!`): Parameters for UpdateCheckRun
"""
update_check_suite_preferences = sgqlc.types.Field(
"UpdateCheckSuitePreferencesPayload",
graphql_name="updateCheckSuitePreferences",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateCheckSuitePreferencesInput), graphql_name="input", default=None)),)
),
)
"""Modifies the settings of an existing check suite
Arguments:
* `input` (`UpdateCheckSuitePreferencesInput!`): Parameters for
UpdateCheckSuitePreferences
"""
update_discussion = sgqlc.types.Field(
"UpdateDiscussionPayload",
graphql_name="updateDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Update a discussion
Arguments:
* `input` (`UpdateDiscussionInput!`): Parameters for
UpdateDiscussion
"""
update_discussion_comment = sgqlc.types.Field(
"UpdateDiscussionCommentPayload",
graphql_name="updateDiscussionComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateDiscussionCommentInput), graphql_name="input", default=None)),)
),
)
"""Update the contents of a comment on a Discussion
Arguments:
* `input` (`UpdateDiscussionCommentInput!`): Parameters for
UpdateDiscussionComment
"""
update_enterprise_administrator_role = sgqlc.types.Field(
"UpdateEnterpriseAdministratorRolePayload",
graphql_name="updateEnterpriseAdministratorRole",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateEnterpriseAdministratorRoleInput), graphql_name="input", default=None)),)
),
)
"""Updates the role of an enterprise administrator.
Arguments:
* `input` (`UpdateEnterpriseAdministratorRoleInput!`): Parameters
for UpdateEnterpriseAdministratorRole
"""
update_enterprise_allow_private_repository_forking_setting = sgqlc.types.Field(
"UpdateEnterpriseAllowPrivateRepositoryForkingSettingPayload",
graphql_name="updateEnterpriseAllowPrivateRepositoryForkingSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseAllowPrivateRepositoryForkingSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether private repository forks are enabled for an
enterprise.
Arguments:
* `input`
(`UpdateEnterpriseAllowPrivateRepositoryForkingSettingInput!`):
Parameters for
UpdateEnterpriseAllowPrivateRepositoryForkingSetting
"""
update_enterprise_default_repository_permission_setting = sgqlc.types.Field(
"UpdateEnterpriseDefaultRepositoryPermissionSettingPayload",
graphql_name="updateEnterpriseDefaultRepositoryPermissionSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseDefaultRepositoryPermissionSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets the base repository permission for organizations in an
enterprise.
Arguments:
* `input`
(`UpdateEnterpriseDefaultRepositoryPermissionSettingInput!`):
Parameters for
UpdateEnterpriseDefaultRepositoryPermissionSetting
"""
update_enterprise_members_can_change_repository_visibility_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanChangeRepositoryVisibilitySettingPayload",
graphql_name="updateEnterpriseMembersCanChangeRepositoryVisibilitySetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanChangeRepositoryVisibilitySettingInput),
graphql_name="input",
default=None,
),
),
)
),
)
"""Sets whether organization members with admin permissions on a
repository can change repository visibility.
Arguments:
* `input` (`UpdateEnterpriseMembersCanChangeRepositoryVisibilitySe
ttingInput!`): Parameters for
UpdateEnterpriseMembersCanChangeRepositoryVisibilitySetting
"""
update_enterprise_members_can_create_repositories_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanCreateRepositoriesSettingPayload",
graphql_name="updateEnterpriseMembersCanCreateRepositoriesSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanCreateRepositoriesSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets the members can create repositories setting for an
enterprise.
Arguments:
* `input`
(`UpdateEnterpriseMembersCanCreateRepositoriesSettingInput!`):
Parameters for
UpdateEnterpriseMembersCanCreateRepositoriesSetting
"""
update_enterprise_members_can_delete_issues_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanDeleteIssuesSettingPayload",
graphql_name="updateEnterpriseMembersCanDeleteIssuesSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanDeleteIssuesSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets the members can delete issues setting for an enterprise.
Arguments:
* `input` (`UpdateEnterpriseMembersCanDeleteIssuesSettingInput!`):
Parameters for UpdateEnterpriseMembersCanDeleteIssuesSetting
"""
update_enterprise_members_can_delete_repositories_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanDeleteRepositoriesSettingPayload",
graphql_name="updateEnterpriseMembersCanDeleteRepositoriesSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanDeleteRepositoriesSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets the members can delete repositories setting for an
enterprise.
Arguments:
* `input`
(`UpdateEnterpriseMembersCanDeleteRepositoriesSettingInput!`):
Parameters for
UpdateEnterpriseMembersCanDeleteRepositoriesSetting
"""
update_enterprise_members_can_invite_collaborators_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanInviteCollaboratorsSettingPayload",
graphql_name="updateEnterpriseMembersCanInviteCollaboratorsSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanInviteCollaboratorsSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether members can invite collaborators are enabled for an
enterprise.
Arguments:
* `input`
(`UpdateEnterpriseMembersCanInviteCollaboratorsSettingInput!`):
Parameters for
UpdateEnterpriseMembersCanInviteCollaboratorsSetting
"""
update_enterprise_members_can_make_purchases_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanMakePurchasesSettingPayload",
graphql_name="updateEnterpriseMembersCanMakePurchasesSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanMakePurchasesSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether or not an organization admin can make purchases.
Arguments:
* `input`
(`UpdateEnterpriseMembersCanMakePurchasesSettingInput!`):
Parameters for UpdateEnterpriseMembersCanMakePurchasesSetting
"""
update_enterprise_members_can_update_protected_branches_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanUpdateProtectedBranchesSettingPayload",
graphql_name="updateEnterpriseMembersCanUpdateProtectedBranchesSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanUpdateProtectedBranchesSettingInput),
graphql_name="input",
default=None,
),
),
)
),
)
"""Sets the members can update protected branches setting for an
enterprise.
Arguments:
* `input` (`UpdateEnterpriseMembersCanUpdateProtectedBranchesSetti
ngInput!`): Parameters for
UpdateEnterpriseMembersCanUpdateProtectedBranchesSetting
"""
update_enterprise_members_can_view_dependency_insights_setting = sgqlc.types.Field(
"UpdateEnterpriseMembersCanViewDependencyInsightsSettingPayload",
graphql_name="updateEnterpriseMembersCanViewDependencyInsightsSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseMembersCanViewDependencyInsightsSettingInput),
graphql_name="input",
default=None,
),
),
)
),
)
"""Sets the members can view dependency insights for an enterprise.
Arguments:
* `input` (`UpdateEnterpriseMembersCanViewDependencyInsightsSettin
gInput!`): Parameters for
UpdateEnterpriseMembersCanViewDependencyInsightsSetting
"""
update_enterprise_organization_projects_setting = sgqlc.types.Field(
"UpdateEnterpriseOrganizationProjectsSettingPayload",
graphql_name="updateEnterpriseOrganizationProjectsSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseOrganizationProjectsSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether organization projects are enabled for an enterprise.
Arguments:
* `input` (`UpdateEnterpriseOrganizationProjectsSettingInput!`):
Parameters for UpdateEnterpriseOrganizationProjectsSetting
"""
update_enterprise_owner_organization_role = sgqlc.types.Field(
"UpdateEnterpriseOwnerOrganizationRolePayload",
graphql_name="updateEnterpriseOwnerOrganizationRole",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(sgqlc.types.non_null(UpdateEnterpriseOwnerOrganizationRoleInput), graphql_name="input", default=None),
),
)
),
)
"""Updates the role of an enterprise owner with an organization.
Arguments:
* `input` (`UpdateEnterpriseOwnerOrganizationRoleInput!`):
Parameters for UpdateEnterpriseOwnerOrganizationRole
"""
update_enterprise_profile = sgqlc.types.Field(
"UpdateEnterpriseProfilePayload",
graphql_name="updateEnterpriseProfile",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateEnterpriseProfileInput), graphql_name="input", default=None)),)
),
)
"""Updates an enterprise's profile.
Arguments:
* `input` (`UpdateEnterpriseProfileInput!`): Parameters for
UpdateEnterpriseProfile
"""
update_enterprise_repository_projects_setting = sgqlc.types.Field(
"UpdateEnterpriseRepositoryProjectsSettingPayload",
graphql_name="updateEnterpriseRepositoryProjectsSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseRepositoryProjectsSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether repository projects are enabled for a enterprise.
Arguments:
* `input` (`UpdateEnterpriseRepositoryProjectsSettingInput!`):
Parameters for UpdateEnterpriseRepositoryProjectsSetting
"""
update_enterprise_team_discussions_setting = sgqlc.types.Field(
"UpdateEnterpriseTeamDiscussionsSettingPayload",
graphql_name="updateEnterpriseTeamDiscussionsSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(sgqlc.types.non_null(UpdateEnterpriseTeamDiscussionsSettingInput), graphql_name="input", default=None),
),
)
),
)
"""Sets whether team discussions are enabled for an enterprise.
Arguments:
* `input` (`UpdateEnterpriseTeamDiscussionsSettingInput!`):
Parameters for UpdateEnterpriseTeamDiscussionsSetting
"""
update_enterprise_two_factor_authentication_required_setting = sgqlc.types.Field(
"UpdateEnterpriseTwoFactorAuthenticationRequiredSettingPayload",
graphql_name="updateEnterpriseTwoFactorAuthenticationRequiredSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateEnterpriseTwoFactorAuthenticationRequiredSettingInput),
graphql_name="input",
default=None,
),
),
)
),
)
"""Sets whether two factor authentication is required for all users
in an enterprise.
Arguments:
* `input` (`UpdateEnterpriseTwoFactorAuthenticationRequiredSetting
Input!`): Parameters for
UpdateEnterpriseTwoFactorAuthenticationRequiredSetting
"""
update_environment = sgqlc.types.Field(
"UpdateEnvironmentPayload",
graphql_name="updateEnvironment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateEnvironmentInput), graphql_name="input", default=None)),)
),
)
"""Updates an environment.
Arguments:
* `input` (`UpdateEnvironmentInput!`): Parameters for
UpdateEnvironment
"""
update_ip_allow_list_enabled_setting = sgqlc.types.Field(
"UpdateIpAllowListEnabledSettingPayload",
graphql_name="updateIpAllowListEnabledSetting",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateIpAllowListEnabledSettingInput), graphql_name="input", default=None)),)
),
)
"""Sets whether an IP allow list is enabled on an owner.
Arguments:
* `input` (`UpdateIpAllowListEnabledSettingInput!`): Parameters
for UpdateIpAllowListEnabledSetting
"""
update_ip_allow_list_entry = sgqlc.types.Field(
"UpdateIpAllowListEntryPayload",
graphql_name="updateIpAllowListEntry",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateIpAllowListEntryInput), graphql_name="input", default=None)),)
),
)
"""Updates an IP allow list entry.
Arguments:
* `input` (`UpdateIpAllowListEntryInput!`): Parameters for
UpdateIpAllowListEntry
"""
update_ip_allow_list_for_installed_apps_enabled_setting = sgqlc.types.Field(
"UpdateIpAllowListForInstalledAppsEnabledSettingPayload",
graphql_name="updateIpAllowListForInstalledAppsEnabledSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateIpAllowListForInstalledAppsEnabledSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether IP allow list configuration for installed GitHub Apps
is enabled on an owner.
Arguments:
* `input`
(`UpdateIpAllowListForInstalledAppsEnabledSettingInput!`):
Parameters for UpdateIpAllowListForInstalledAppsEnabledSetting
"""
update_issue = sgqlc.types.Field(
"UpdateIssuePayload",
graphql_name="updateIssue",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateIssueInput), graphql_name="input", default=None)),)),
)
"""Updates an Issue.
Arguments:
* `input` (`UpdateIssueInput!`): Parameters for UpdateIssue
"""
update_issue_comment = sgqlc.types.Field(
"UpdateIssueCommentPayload",
graphql_name="updateIssueComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateIssueCommentInput), graphql_name="input", default=None)),)
),
)
"""Updates an IssueComment object.
Arguments:
* `input` (`UpdateIssueCommentInput!`): Parameters for
UpdateIssueComment
"""
update_notification_restriction_setting = sgqlc.types.Field(
"UpdateNotificationRestrictionSettingPayload",
graphql_name="updateNotificationRestrictionSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(sgqlc.types.non_null(UpdateNotificationRestrictionSettingInput), graphql_name="input", default=None),
),
)
),
)
"""Update the setting to restrict notifications to only verified or
approved domains available to an owner.
Arguments:
* `input` (`UpdateNotificationRestrictionSettingInput!`):
Parameters for UpdateNotificationRestrictionSetting
"""
update_organization_allow_private_repository_forking_setting = sgqlc.types.Field(
"UpdateOrganizationAllowPrivateRepositoryForkingSettingPayload",
graphql_name="updateOrganizationAllowPrivateRepositoryForkingSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateOrganizationAllowPrivateRepositoryForkingSettingInput),
graphql_name="input",
default=None,
),
),
)
),
)
"""Sets whether private repository forks are enabled for an
organization.
Arguments:
* `input` (`UpdateOrganizationAllowPrivateRepositoryForkingSetting
Input!`): Parameters for
UpdateOrganizationAllowPrivateRepositoryForkingSetting
"""
update_organization_web_commit_signoff_setting = sgqlc.types.Field(
"UpdateOrganizationWebCommitSignoffSettingPayload",
graphql_name="updateOrganizationWebCommitSignoffSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(
sgqlc.types.non_null(UpdateOrganizationWebCommitSignoffSettingInput), graphql_name="input", default=None
),
),
)
),
)
"""Sets whether contributors are required to sign off on web-based
commits for repositories in an organization.
Arguments:
* `input` (`UpdateOrganizationWebCommitSignoffSettingInput!`):
Parameters for UpdateOrganizationWebCommitSignoffSetting
"""
update_project = sgqlc.types.Field(
"UpdateProjectPayload",
graphql_name="updateProject",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectInput), graphql_name="input", default=None)),)
),
)
"""Updates an existing project.
Arguments:
* `input` (`UpdateProjectInput!`): Parameters for UpdateProject
"""
update_project_card = sgqlc.types.Field(
"UpdateProjectCardPayload",
graphql_name="updateProjectCard",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectCardInput), graphql_name="input", default=None)),)
),
)
"""Updates an existing project card.
Arguments:
* `input` (`UpdateProjectCardInput!`): Parameters for
UpdateProjectCard
"""
update_project_column = sgqlc.types.Field(
"UpdateProjectColumnPayload",
graphql_name="updateProjectColumn",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectColumnInput), graphql_name="input", default=None)),)
),
)
"""Updates an existing project column.
Arguments:
* `input` (`UpdateProjectColumnInput!`): Parameters for
UpdateProjectColumn
"""
update_project_v2 = sgqlc.types.Field(
"UpdateProjectV2Payload",
graphql_name="updateProjectV2",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectV2Input), graphql_name="input", default=None)),)
),
)
"""Updates an existing project (beta).
Arguments:
* `input` (`UpdateProjectV2Input!`): Parameters for
UpdateProjectV2
"""
update_project_v2_collaborators = sgqlc.types.Field(
"UpdateProjectV2CollaboratorsPayload",
graphql_name="updateProjectV2Collaborators",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectV2CollaboratorsInput), graphql_name="input", default=None)),)
),
)
"""Update the collaborators on a team or a project
Arguments:
* `input` (`UpdateProjectV2CollaboratorsInput!`): Parameters for
UpdateProjectV2Collaborators
"""
update_project_v2_draft_issue = sgqlc.types.Field(
"UpdateProjectV2DraftIssuePayload",
graphql_name="updateProjectV2DraftIssue",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectV2DraftIssueInput), graphql_name="input", default=None)),)
),
)
"""Updates a draft issue within a Project.
Arguments:
* `input` (`UpdateProjectV2DraftIssueInput!`): Parameters for
UpdateProjectV2DraftIssue
"""
update_project_v2_item_field_value = sgqlc.types.Field(
"UpdateProjectV2ItemFieldValuePayload",
graphql_name="updateProjectV2ItemFieldValue",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectV2ItemFieldValueInput), graphql_name="input", default=None)),)
),
)
"""This mutation updates the value of a field for an item in a
Project. Currently only single-select, text, number, date, and
iteration fields are supported.
Arguments:
* `input` (`UpdateProjectV2ItemFieldValueInput!`): Parameters for
UpdateProjectV2ItemFieldValue
"""
update_project_v2_item_position = sgqlc.types.Field(
"UpdateProjectV2ItemPositionPayload",
graphql_name="updateProjectV2ItemPosition",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateProjectV2ItemPositionInput), graphql_name="input", default=None)),)
),
)
"""This mutation updates the position of the item in the project,
where the position represents the priority of an item.
Arguments:
* `input` (`UpdateProjectV2ItemPositionInput!`): Parameters for
UpdateProjectV2ItemPosition
"""
update_pull_request = sgqlc.types.Field(
"UpdatePullRequestPayload",
graphql_name="updatePullRequest",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdatePullRequestInput), graphql_name="input", default=None)),)
),
)
"""Update a pull request
Arguments:
* `input` (`UpdatePullRequestInput!`): Parameters for
UpdatePullRequest
"""
update_pull_request_branch = sgqlc.types.Field(
"UpdatePullRequestBranchPayload",
graphql_name="updatePullRequestBranch",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdatePullRequestBranchInput), graphql_name="input", default=None)),)
),
)
"""Merge or Rebase HEAD from upstream branch into pull request branch
Arguments:
* `input` (`UpdatePullRequestBranchInput!`): Parameters for
UpdatePullRequestBranch
"""
update_pull_request_review = sgqlc.types.Field(
"UpdatePullRequestReviewPayload",
graphql_name="updatePullRequestReview",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdatePullRequestReviewInput), graphql_name="input", default=None)),)
),
)
"""Updates the body of a pull request review.
Arguments:
* `input` (`UpdatePullRequestReviewInput!`): Parameters for
UpdatePullRequestReview
"""
update_pull_request_review_comment = sgqlc.types.Field(
"UpdatePullRequestReviewCommentPayload",
graphql_name="updatePullRequestReviewComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdatePullRequestReviewCommentInput), graphql_name="input", default=None)),)
),
)
"""Updates a pull request review comment.
Arguments:
* `input` (`UpdatePullRequestReviewCommentInput!`): Parameters for
UpdatePullRequestReviewComment
"""
update_ref = sgqlc.types.Field(
"UpdateRefPayload",
graphql_name="updateRef",
args=sgqlc.types.ArgDict((("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateRefInput), graphql_name="input", default=None)),)),
)
"""Update a Git Ref.
Arguments:
* `input` (`UpdateRefInput!`): Parameters for UpdateRef
"""
update_repository = sgqlc.types.Field(
"UpdateRepositoryPayload",
graphql_name="updateRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Update information about a repository.
Arguments:
* `input` (`UpdateRepositoryInput!`): Parameters for
UpdateRepository
"""
update_repository_ruleset = sgqlc.types.Field(
"UpdateRepositoryRulesetPayload",
graphql_name="updateRepositoryRuleset",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateRepositoryRulesetInput), graphql_name="input", default=None)),)
),
)
"""Update a repository ruleset
Arguments:
* `input` (`UpdateRepositoryRulesetInput!`): Parameters for
UpdateRepositoryRuleset
"""
update_repository_web_commit_signoff_setting = sgqlc.types.Field(
"UpdateRepositoryWebCommitSignoffSettingPayload",
graphql_name="updateRepositoryWebCommitSignoffSetting",
args=sgqlc.types.ArgDict(
(
(
"input",
sgqlc.types.Arg(sgqlc.types.non_null(UpdateRepositoryWebCommitSignoffSettingInput), graphql_name="input", default=None),
),
)
),
)
"""Sets whether contributors are required to sign off on web-based
commits for a repository.
Arguments:
* `input` (`UpdateRepositoryWebCommitSignoffSettingInput!`):
Parameters for UpdateRepositoryWebCommitSignoffSetting
"""
update_sponsorship_preferences = sgqlc.types.Field(
"UpdateSponsorshipPreferencesPayload",
graphql_name="updateSponsorshipPreferences",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateSponsorshipPreferencesInput), graphql_name="input", default=None)),)
),
)
"""Change visibility of your sponsorship and opt in or out of email
updates from the maintainer.
Arguments:
* `input` (`UpdateSponsorshipPreferencesInput!`): Parameters for
UpdateSponsorshipPreferences
"""
update_subscription = sgqlc.types.Field(
"UpdateSubscriptionPayload",
graphql_name="updateSubscription",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateSubscriptionInput), graphql_name="input", default=None)),)
),
)
"""Updates the state for subscribable subjects.
Arguments:
* `input` (`UpdateSubscriptionInput!`): Parameters for
UpdateSubscription
"""
update_team_discussion = sgqlc.types.Field(
"UpdateTeamDiscussionPayload",
graphql_name="updateTeamDiscussion",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateTeamDiscussionInput), graphql_name="input", default=None)),)
),
)
"""Updates a team discussion.
Arguments:
* `input` (`UpdateTeamDiscussionInput!`): Parameters for
UpdateTeamDiscussion
"""
update_team_discussion_comment = sgqlc.types.Field(
"UpdateTeamDiscussionCommentPayload",
graphql_name="updateTeamDiscussionComment",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateTeamDiscussionCommentInput), graphql_name="input", default=None)),)
),
)
"""Updates a discussion comment.
Arguments:
* `input` (`UpdateTeamDiscussionCommentInput!`): Parameters for
UpdateTeamDiscussionComment
"""
update_teams_repository = sgqlc.types.Field(
"UpdateTeamsRepositoryPayload",
graphql_name="updateTeamsRepository",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateTeamsRepositoryInput), graphql_name="input", default=None)),)
),
)
"""Update team repository.
Arguments:
* `input` (`UpdateTeamsRepositoryInput!`): Parameters for
UpdateTeamsRepository
"""
update_topics = sgqlc.types.Field(
"UpdateTopicsPayload",
graphql_name="updateTopics",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(UpdateTopicsInput), graphql_name="input", default=None)),)
),
)
"""Replaces the repository's topics with the given topics.
Arguments:
* `input` (`UpdateTopicsInput!`): Parameters for UpdateTopics
"""
verify_verifiable_domain = sgqlc.types.Field(
"VerifyVerifiableDomainPayload",
graphql_name="verifyVerifiableDomain",
args=sgqlc.types.ArgDict(
(("input", sgqlc.types.Arg(sgqlc.types.non_null(VerifyVerifiableDomainInput), graphql_name="input", default=None)),)
),
)
"""Verify that a verifiable domain has the expected DNS record.
Arguments:
* `input` (`VerifyVerifiableDomainInput!`): Parameters for
VerifyVerifiableDomain
"""
| Mutation |
python | huggingface__transformers | src/transformers/models/dab_detr/modeling_dab_detr.py | {
"start": 33371,
"end": 36538
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: DabDetrConfig, is_first: bool = False):
super().__init__()
self.self_attn = DabDetrDecoderLayerSelfAttention(config)
self.cross_attn = DabDetrDecoderLayerCrossAttention(config, is_first)
self.mlp = DabDetrDecoderLayerFFN(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
object_queries: Optional[torch.Tensor] = None,
query_position_embeddings: Optional[torch.Tensor] = None,
query_sine_embed: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
object_queries that are added to the queries and keys
in the cross-attention layer.
query_position_embeddings (`torch.FloatTensor`, *optional*):
object_queries that are added to the queries and keys
in the self-attention layer.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
query_position_embeddings=query_position_embeddings,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states, cross_attn_weights = self.cross_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
query_position_embeddings=query_position_embeddings,
object_queries=object_queries,
encoder_attention_mask=encoder_attention_mask,
query_sine_embed=query_sine_embed,
output_attentions=output_attentions,
)
hidden_states = self.mlp(hidden_states=hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Modified from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->DabDetrMLP
| DabDetrDecoderLayer |
python | coleifer__peewee | tests/models.py | {
"start": 153213,
"end": 154147
} | class ____(ModelTestCase):
@requires_models(User)
def test_sum_case(self):
for username in ('charlie', 'huey', 'zaizee'):
User.create(username=username)
case = Case(None, [(User.username.endswith('e'), 1)], 0)
e_sum = fn.SUM(case)
query = (User
.select(User.username, e_sum.alias('e_sum'))
.group_by(User.username)
.order_by(User.username))
self.assertSQL(query, (
'SELECT "t1"."username", '
'SUM(CASE WHEN ("t1"."username" ILIKE ?) THEN ? ELSE ? END) '
'AS "e_sum" '
'FROM "users" AS "t1" '
'GROUP BY "t1"."username" '
'ORDER BY "t1"."username"'), ['%e', 1, 0])
data = [(user.username, user.e_sum) for user in query]
self.assertEqual(data, [
('charlie', 1),
('huey', 0),
('zaizee', 1)])
| TestSumCase |
python | pytorch__pytorch | torch/_inductor/scheduler.py | {
"start": 16406,
"end": 16507
} | class ____(SchedulerBuffer):
defining_op: Optional[BaseSchedulerNode] = None
| SchedulerDonatedBuffer |
python | pappasam__jedi-language-server | jedi_language_server/initialization_options.py | {
"start": 1048,
"end": 1219
} | class ____:
all: bool = False
names: Set[str] = field(default_factory=set)
full_names: Set[str] = field(default_factory=set)
@light_dataclass
| HoverDisableOptions |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_collections.py | {
"start": 34375,
"end": 59949
} | class ____(ABCTestCase):
def test_Awaitable(self):
def gen():
yield
@types.coroutine
def coro():
yield
async def new_coro():
pass
class Bar:
def __await__(self):
yield
class MinimalCoro(Coroutine):
def send(self, value):
return value
def throw(self, typ, val=None, tb=None):
super().throw(typ, val, tb)
def __await__(self):
yield
self.validate_abstract_methods(Awaitable, '__await__')
non_samples = [None, int(), gen(), object()]
for x in non_samples:
self.assertNotIsInstance(x, Awaitable)
self.assertFalse(issubclass(type(x), Awaitable), repr(type(x)))
samples = [Bar(), MinimalCoro()]
for x in samples:
self.assertIsInstance(x, Awaitable)
self.assertTrue(issubclass(type(x), Awaitable))
c = coro()
# Iterable coroutines (generators with CO_ITERABLE_COROUTINE
# flag don't have '__await__' method, hence can't be instances
# of Awaitable. Use inspect.isawaitable to detect them.
self.assertNotIsInstance(c, Awaitable)
c = new_coro()
self.assertIsInstance(c, Awaitable)
c.close() # avoid RuntimeWarning that coro() was not awaited
class CoroLike: pass
Coroutine.register(CoroLike)
self.assertTrue(isinstance(CoroLike(), Awaitable))
self.assertTrue(issubclass(CoroLike, Awaitable))
CoroLike = None
support.gc_collect() # Kill CoroLike to clean-up ABCMeta cache
def test_Coroutine(self):
def gen():
yield
@types.coroutine
def coro():
yield
async def new_coro():
pass
class Bar:
def __await__(self):
yield
class MinimalCoro(Coroutine):
def send(self, value):
return value
def throw(self, typ, val=None, tb=None):
super().throw(typ, val, tb)
def __await__(self):
yield
self.validate_abstract_methods(Coroutine, '__await__', 'send', 'throw')
non_samples = [None, int(), gen(), object(), Bar()]
for x in non_samples:
self.assertNotIsInstance(x, Coroutine)
self.assertFalse(issubclass(type(x), Coroutine), repr(type(x)))
samples = [MinimalCoro()]
for x in samples:
self.assertIsInstance(x, Awaitable)
self.assertTrue(issubclass(type(x), Awaitable))
c = coro()
# Iterable coroutines (generators with CO_ITERABLE_COROUTINE
# flag don't have '__await__' method, hence can't be instances
# of Coroutine. Use inspect.isawaitable to detect them.
self.assertNotIsInstance(c, Coroutine)
c = new_coro()
self.assertIsInstance(c, Coroutine)
c.close() # avoid RuntimeWarning that coro() was not awaited
class CoroLike:
def send(self, value):
pass
def throw(self, typ, val=None, tb=None):
pass
def close(self):
pass
def __await__(self):
pass
self.assertTrue(isinstance(CoroLike(), Coroutine))
self.assertTrue(issubclass(CoroLike, Coroutine))
class CoroLike:
def send(self, value):
pass
def close(self):
pass
def __await__(self):
pass
self.assertFalse(isinstance(CoroLike(), Coroutine))
self.assertFalse(issubclass(CoroLike, Coroutine))
def test_Hashable(self):
# Check some non-hashables
non_samples = [bytearray(), list(), set(), dict()]
for x in non_samples:
self.assertNotIsInstance(x, Hashable)
self.assertFalse(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type, bytes()
]
for x in samples:
self.assertIsInstance(x, Hashable)
self.assertTrue(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super().__hash__()
self.assertEqual(hash(H()), 0)
self.assertFalse(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
self.validate_isinstance(Hashable, '__hash__')
def test_AsyncIterable(self):
class AI:
def __aiter__(self):
return self
self.assertTrue(isinstance(AI(), AsyncIterable))
self.assertTrue(issubclass(AI, AsyncIterable))
# Check some non-iterables
non_samples = [None, object, []]
for x in non_samples:
self.assertNotIsInstance(x, AsyncIterable)
self.assertFalse(issubclass(type(x), AsyncIterable), repr(type(x)))
self.validate_abstract_methods(AsyncIterable, '__aiter__')
self.validate_isinstance(AsyncIterable, '__aiter__')
def test_AsyncIterator(self):
class AI:
def __aiter__(self):
return self
async def __anext__(self):
raise StopAsyncIteration
self.assertTrue(isinstance(AI(), AsyncIterator))
self.assertTrue(issubclass(AI, AsyncIterator))
non_samples = [None, object, []]
# Check some non-iterables
for x in non_samples:
self.assertNotIsInstance(x, AsyncIterator)
self.assertFalse(issubclass(type(x), AsyncIterator), repr(type(x)))
# Similarly to regular iterators (see issue 10565)
class AnextOnly:
async def __anext__(self):
raise StopAsyncIteration
self.assertNotIsInstance(AnextOnly(), AsyncIterator)
self.validate_abstract_methods(AsyncIterator, '__anext__', '__aiter__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.assertNotIsInstance(x, Iterable)
self.assertFalse(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
_test_gen(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterable)
self.assertTrue(issubclass(type(x), Iterable), repr(type(x)))
with torch._dynamo.error_on_graph_break(False):
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super().__iter__()
self.assertEqual(list(I()), [])
self.assertFalse(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
self.validate_isinstance(Iterable, '__iter__')
with torch._dynamo.error_on_graph_break(False):
# Check None blocking
class It:
def __iter__(self): return iter([])
class ItBlocked(It):
__iter__ = None
self.assertTrue(issubclass(It, Iterable))
self.assertTrue(isinstance(It(), Iterable))
self.assertFalse(issubclass(ItBlocked, Iterable))
self.assertFalse(isinstance(ItBlocked(), Iterable))
def test_Reversible(self):
# Check some non-reversibles
non_samples = [None, 42, 3.14, 1j, set(), frozenset()]
for x in non_samples:
self.assertNotIsInstance(x, Reversible)
self.assertFalse(issubclass(type(x), Reversible), repr(type(x)))
# Check some non-reversible iterables
non_reversibles = [_test_gen(), (x for x in []), iter([]), reversed([])]
for x in non_reversibles:
self.assertNotIsInstance(x, Reversible)
self.assertFalse(issubclass(type(x), Reversible), repr(type(x)))
# Check some reversible iterables
samples = [bytes(), str(), tuple(), list(), OrderedDict(),
OrderedDict().keys(), OrderedDict().items(),
OrderedDict().values(), Counter(), Counter().keys(),
Counter().items(), Counter().values(), dict(),
dict().keys(), dict().items(), dict().values()]
for x in samples:
self.assertIsInstance(x, Reversible)
self.assertTrue(issubclass(type(x), Reversible), repr(type(x)))
# Check also Mapping, MutableMapping, and Sequence
self.assertTrue(issubclass(Sequence, Reversible), repr(Sequence))
self.assertFalse(issubclass(Mapping, Reversible), repr(Mapping))
self.assertFalse(issubclass(MutableMapping, Reversible), repr(MutableMapping))
with torch._dynamo.error_on_graph_break(False):
# Check direct subclassing
class R(Reversible):
def __iter__(self):
return iter(list())
def __reversed__(self):
return iter(list())
self.assertEqual(list(reversed(R())), [])
self.assertFalse(issubclass(float, R))
self.validate_abstract_methods(Reversible, '__reversed__', '__iter__')
with torch._dynamo.error_on_graph_break(False):
# Check reversible non-iterable (which is not Reversible)
class RevNoIter:
def __reversed__(self): return reversed([])
class RevPlusIter(RevNoIter):
def __iter__(self): return iter([])
self.assertFalse(issubclass(RevNoIter, Reversible))
self.assertFalse(isinstance(RevNoIter(), Reversible))
self.assertTrue(issubclass(RevPlusIter, Reversible))
self.assertTrue(isinstance(RevPlusIter(), Reversible))
with torch._dynamo.error_on_graph_break(False):
# Check None blocking
class Rev:
def __iter__(self): return iter([])
def __reversed__(self): return reversed([])
class RevItBlocked(Rev):
__iter__ = None
class RevRevBlocked(Rev):
__reversed__ = None
self.assertTrue(issubclass(Rev, Reversible))
self.assertTrue(isinstance(Rev(), Reversible))
self.assertFalse(issubclass(RevItBlocked, Reversible))
self.assertFalse(isinstance(RevItBlocked(), Reversible))
self.assertFalse(issubclass(RevRevBlocked, Reversible))
self.assertFalse(isinstance(RevRevBlocked(), Reversible))
def test_Collection(self):
# Check some non-collections
non_collections = [None, 42, 3.14, 1j, lambda x: 2*x]
for x in non_collections:
self.assertNotIsInstance(x, Collection)
self.assertFalse(issubclass(type(x), Collection), repr(type(x)))
# Check some non-collection iterables
non_col_iterables = [_test_gen(), iter(b''), iter(bytearray()),
(x for x in [])]
for x in non_col_iterables:
self.assertNotIsInstance(x, Collection)
self.assertFalse(issubclass(type(x), Collection), repr(type(x)))
# Check some collections
samples = [set(), frozenset(), dict(), bytes(), str(), tuple(),
list(), dict().keys(), dict().items(), dict().values()]
for x in samples:
self.assertIsInstance(x, Collection)
self.assertTrue(issubclass(type(x), Collection), repr(type(x)))
# Check also Mapping, MutableMapping, etc.
self.assertTrue(issubclass(Sequence, Collection), repr(Sequence))
self.assertTrue(issubclass(Mapping, Collection), repr(Mapping))
self.assertTrue(issubclass(MutableMapping, Collection),
repr(MutableMapping))
self.assertTrue(issubclass(Set, Collection), repr(Set))
self.assertTrue(issubclass(MutableSet, Collection), repr(MutableSet))
self.assertTrue(issubclass(Sequence, Collection), repr(MutableSet))
with torch._dynamo.error_on_graph_break(False):
# Check direct subclassing
class Col(Collection):
def __iter__(self):
return iter(list())
def __len__(self):
return 0
def __contains__(self, item):
return False
class DerCol(Col): pass
self.assertEqual(list(iter(Col())), [])
self.assertFalse(issubclass(list, Col))
self.assertFalse(issubclass(set, Col))
self.assertFalse(issubclass(float, Col))
self.assertEqual(list(iter(DerCol())), [])
self.assertFalse(issubclass(list, DerCol))
self.assertFalse(issubclass(set, DerCol))
self.assertFalse(issubclass(float, DerCol))
self.validate_abstract_methods(Collection, '__len__', '__iter__',
'__contains__')
# Check sized container non-iterable (which is not Collection) etc.
with torch._dynamo.error_on_graph_break(False):
class ColNoIter:
def __len__(self): return 0
def __contains__(self, item): return False
class ColNoSize:
def __iter__(self): return iter([])
def __contains__(self, item): return False
class ColNoCont:
def __iter__(self): return iter([])
def __len__(self): return 0
self.assertFalse(issubclass(ColNoIter, Collection))
self.assertFalse(isinstance(ColNoIter(), Collection))
self.assertFalse(issubclass(ColNoSize, Collection))
self.assertFalse(isinstance(ColNoSize(), Collection))
self.assertFalse(issubclass(ColNoCont, Collection))
self.assertFalse(isinstance(ColNoCont(), Collection))
with torch._dynamo.error_on_graph_break(False):
# Check None blocking
class SizeBlock:
def __iter__(self): return iter([])
def __contains__(self): return False
__len__ = None
class IterBlock:
def __len__(self): return 0
def __contains__(self): return True
__iter__ = None
self.assertFalse(issubclass(SizeBlock, Collection))
self.assertFalse(isinstance(SizeBlock(), Collection))
self.assertFalse(issubclass(IterBlock, Collection))
self.assertFalse(isinstance(IterBlock(), Collection))
with torch._dynamo.error_on_graph_break(False):
# Check None blocking in subclass
class ColImpl:
def __iter__(self):
return iter(list())
def __len__(self):
return 0
def __contains__(self, item):
return False
class NonCol(ColImpl):
__contains__ = None
self.assertFalse(issubclass(NonCol, Collection))
self.assertFalse(isinstance(NonCol(), Collection))
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, b"", "", (), [], {}, set()]
for x in non_samples:
self.assertNotIsInstance(x, Iterator)
self.assertFalse(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(bytes()), iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
_test_gen(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertTrue(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, '__next__', '__iter__')
with torch._dynamo.error_on_graph_break(False):
# Issue 10565
class NextOnly:
def __next__(self):
yield 1
return
self.assertNotIsInstance(NextOnly(), Iterator)
def test_Generator(self):
with torch._dynamo.error_on_graph_break(False):
class NonGen1:
def __iter__(self): return self
def __next__(self): return None
def close(self): pass
def throw(self, typ, val=None, tb=None): pass
class NonGen2:
def __iter__(self): return self
def __next__(self): return None
def close(self): pass
def send(self, value): return value
class NonGen3:
def close(self): pass
def send(self, value): return value
def throw(self, typ, val=None, tb=None): pass
non_samples = [
None, 42, 3.14, 1j, b"", "", (), [], {}, set(),
iter(()), iter([]), NonGen1(), NonGen2(), NonGen3()]
for x in non_samples:
self.assertNotIsInstance(x, Generator)
self.assertFalse(issubclass(type(x), Generator), repr(type(x)))
with torch._dynamo.error_on_graph_break(False):
class Gen:
def __iter__(self): return self
def __next__(self): return None
def close(self): pass
def send(self, value): return value
def throw(self, typ, val=None, tb=None): pass
class MinimalGen(Generator):
def send(self, value):
return value
def throw(self, typ, val=None, tb=None):
super().throw(typ, val, tb)
def gen():
yield 1
samples = [gen(), (lambda: (yield))(), Gen(), MinimalGen()]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertIsInstance(x, Generator)
self.assertTrue(issubclass(type(x), Generator), repr(type(x)))
self.validate_abstract_methods(Generator, 'send', 'throw')
# mixin tests
mgen = MinimalGen()
self.assertIs(mgen, iter(mgen))
self.assertIs(mgen.send(None), next(mgen))
self.assertEqual(2, mgen.send(2))
self.assertIsNone(mgen.close())
self.assertRaises(ValueError, mgen.throw, ValueError)
self.assertRaisesRegex(ValueError, "^huhu$",
mgen.throw, ValueError, ValueError("huhu"))
self.assertRaises(StopIteration, mgen.throw, StopIteration())
with torch._dynamo.error_on_graph_break(False):
class FailOnClose(Generator):
def send(self, value): return value
def throw(self, *args): raise ValueError
self.assertRaises(ValueError, FailOnClose().close)
with torch._dynamo.error_on_graph_break(False):
class IgnoreGeneratorExit(Generator):
def send(self, value): return value
def throw(self, *args): pass
self.assertRaises(RuntimeError, IgnoreGeneratorExit().close)
def test_AsyncGenerator(self):
class NonAGen1:
def __aiter__(self): return self
def __anext__(self): return None
def aclose(self): pass
def athrow(self, typ, val=None, tb=None): pass
class NonAGen2:
def __aiter__(self): return self
def __anext__(self): return None
def aclose(self): pass
def asend(self, value): return value
class NonAGen3:
def aclose(self): pass
def asend(self, value): return value
def athrow(self, typ, val=None, tb=None): pass
non_samples = [
None, 42, 3.14, 1j, b"", "", (), [], {}, set(),
iter(()), iter([]), NonAGen1(), NonAGen2(), NonAGen3()]
for x in non_samples:
self.assertNotIsInstance(x, AsyncGenerator)
self.assertFalse(issubclass(type(x), AsyncGenerator), repr(type(x)))
class Gen:
def __aiter__(self): return self
async def __anext__(self): return None
async def aclose(self): pass
async def asend(self, value): return value
async def athrow(self, typ, val=None, tb=None): pass
class MinimalAGen(AsyncGenerator):
async def asend(self, value):
return value
async def athrow(self, typ, val=None, tb=None):
await super().athrow(typ, val, tb)
async def gen():
yield 1
samples = [gen(), Gen(), MinimalAGen()]
for x in samples:
self.assertIsInstance(x, AsyncIterator)
self.assertIsInstance(x, AsyncGenerator)
self.assertTrue(issubclass(type(x), AsyncGenerator), repr(type(x)))
self.validate_abstract_methods(AsyncGenerator, 'asend', 'athrow')
def run_async(coro):
result = None
while True:
try:
coro.send(None)
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return result
# mixin tests
mgen = MinimalAGen()
self.assertIs(mgen, mgen.__aiter__())
self.assertIs(run_async(mgen.asend(None)), run_async(mgen.__anext__()))
self.assertEqual(2, run_async(mgen.asend(2)))
self.assertIsNone(run_async(mgen.aclose()))
with self.assertRaises(ValueError):
run_async(mgen.athrow(ValueError))
class FailOnClose(AsyncGenerator):
async def asend(self, value): return value
async def athrow(self, *args): raise ValueError
with self.assertRaises(ValueError):
run_async(FailOnClose().aclose())
class IgnoreGeneratorExit(AsyncGenerator):
async def asend(self, value): return value
async def athrow(self, *args): pass
with self.assertRaises(RuntimeError):
run_async(IgnoreGeneratorExit().aclose())
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
_test_gen(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Sized)
self.assertFalse(issubclass(type(x), Sized), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.assertIsInstance(x, Sized)
self.assertTrue(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
self.validate_isinstance(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
_test_gen(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Container)
self.assertFalse(issubclass(type(x), Container), repr(type(x)))
samples = [bytes(), str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.assertIsInstance(x, Container)
self.assertTrue(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
self.validate_isinstance(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", b"", (), [], {}, set(),
_test_gen(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Callable)
self.assertFalse(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.assertIsInstance(x, Callable)
self.assertTrue(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
self.validate_isinstance(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Reversible, Sized, Container, Callable:
with torch._dynamo.error_on_graph_break(False):
class C(B):
pass
self.assertTrue(issubclass(C, B))
self.assertFalse(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Reversible, Sized, Container, Callable:
with torch._dynamo.error_on_graph_break(False):
class C:
__hash__ = None # Make sure it isn't hashable by default
self.assertFalse(issubclass(C, B), B.__name__)
B.register(C)
self.assertTrue(issubclass(C, B))
| TestOneTrickPonyABCs |
python | giampaolo__psutil | tests/test_contracts.py | {
"start": 1282,
"end": 4127
} | class ____(PsutilTestCase):
def test_PROCFS_PATH(self):
assert hasattr(psutil, "PROCFS_PATH") == (LINUX or SUNOS or AIX)
def test_win_priority(self):
assert hasattr(psutil, "ABOVE_NORMAL_PRIORITY_CLASS") == WINDOWS
assert hasattr(psutil, "BELOW_NORMAL_PRIORITY_CLASS") == WINDOWS
assert hasattr(psutil, "HIGH_PRIORITY_CLASS") == WINDOWS
assert hasattr(psutil, "IDLE_PRIORITY_CLASS") == WINDOWS
assert hasattr(psutil, "NORMAL_PRIORITY_CLASS") == WINDOWS
assert hasattr(psutil, "REALTIME_PRIORITY_CLASS") == WINDOWS
def test_linux_ioprio_linux(self):
assert hasattr(psutil, "IOPRIO_CLASS_NONE") == LINUX
assert hasattr(psutil, "IOPRIO_CLASS_RT") == LINUX
assert hasattr(psutil, "IOPRIO_CLASS_BE") == LINUX
assert hasattr(psutil, "IOPRIO_CLASS_IDLE") == LINUX
def test_linux_ioprio_windows(self):
assert hasattr(psutil, "IOPRIO_HIGH") == WINDOWS
assert hasattr(psutil, "IOPRIO_NORMAL") == WINDOWS
assert hasattr(psutil, "IOPRIO_LOW") == WINDOWS
assert hasattr(psutil, "IOPRIO_VERYLOW") == WINDOWS
@pytest.mark.skipif(
GITHUB_ACTIONS and LINUX,
reason="unsupported on GITHUB_ACTIONS + LINUX",
)
def test_rlimit(self):
assert hasattr(psutil, "RLIM_INFINITY") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_AS") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_CORE") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_CPU") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_DATA") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_FSIZE") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_MEMLOCK") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_NOFILE") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_NPROC") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_RSS") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_STACK") == LINUX or FREEBSD
assert hasattr(psutil, "RLIMIT_LOCKS") == LINUX
if POSIX:
if kernel_version() >= (2, 6, 8):
assert hasattr(psutil, "RLIMIT_MSGQUEUE") == LINUX
if kernel_version() >= (2, 6, 12):
assert hasattr(psutil, "RLIMIT_NICE") == LINUX
if kernel_version() >= (2, 6, 12):
assert hasattr(psutil, "RLIMIT_RTPRIO") == LINUX
if kernel_version() >= (2, 6, 25):
assert hasattr(psutil, "RLIMIT_RTTIME") == LINUX
if kernel_version() >= (2, 6, 8):
assert hasattr(psutil, "RLIMIT_SIGPENDING") == LINUX
assert hasattr(psutil, "RLIMIT_SWAP") == FREEBSD
assert hasattr(psutil, "RLIMIT_SBSIZE") == FREEBSD
assert hasattr(psutil, "RLIMIT_NPTS") == FREEBSD
| TestAvailConstantsAPIs |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels39.py | {
"start": 315,
"end": 1653
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels39.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56179712, 56185600]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"border": {"color": "red", "width": 1, "dash_type": "dash"},
"gradient": {"colors": ["#DDEBCF", "#9CB86E", "#156B13"]},
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-node-parser-semantic-chunking/llama_index/packs/node_parser_semantic_chunking/base.py | {
"start": 7145,
"end": 8386
} | class ____(BaseLlamaPack):
"""
Semantic Chunking Query Engine Pack.
Takes in a list of documents, parses it with semantic embedding chunker,
and runs a query engine on the resulting chunks.
"""
def __init__(
self,
documents: List[Document],
buffer_size: int = 1,
breakpoint_percentile_threshold: float = 95.0,
) -> None:
"""Init params."""
self.embed_model = OpenAIEmbedding()
self.splitter = SemanticChunker(
buffer_size=buffer_size,
breakpoint_percentile_threshold=breakpoint_percentile_threshold,
embed_model=self.embed_model,
)
nodes = self.splitter.get_nodes_from_documents(documents)
self.vector_index = VectorStoreIndex(nodes)
self.query_engine = self.vector_index.as_query_engine()
def get_modules(self) -> Dict[str, Any]:
return {
"vector_index": self.vector_index,
"query_engine": self.query_engine,
"splitter": self.splitter,
"embed_model": self.embed_model,
}
def run(self, query: str) -> Any:
"""Run the pipeline."""
return self.query_engine.query(query)
| SemanticChunkingQueryEnginePack |
python | django__django | django/views/decorators/csrf.py | {
"start": 1030,
"end": 2323
} | class ____(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super().process_view(request, callback, callback_args, callback_kwargs)
# Force process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = "ensure_csrf_cookie"
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""Mark a view function as being exempt from the CSRF view protection."""
# view_func.csrf_exempt = True would also work, but decorators are nicer
# if they don't have side effects, so return a new function.
if iscoroutinefunction(view_func):
async def _view_wrapper(request, *args, **kwargs):
return await view_func(request, *args, **kwargs)
else:
def _view_wrapper(request, *args, **kwargs):
return view_func(request, *args, **kwargs)
_view_wrapper.csrf_exempt = True
return wraps(view_func)(_view_wrapper)
| _EnsureCsrfCookie |
python | FactoryBoy__factory_boy | examples/django_demo/generic_foreignkey/migrations/0001_initial.py | {
"start": 76,
"end": 837
} | class ____(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='TaggedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.SlugField()),
('object_id', models.PositiveIntegerField()),
(
'content_type',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='contenttypes.ContentType'
)
),
],
),
]
| Migration |
python | getsentry__sentry | tests/sentry/models/test_organizationmemberteam.py | {
"start": 204,
"end": 1184
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization()
self.team = self.create_team(organization=self.organization)
self.member = self.create_member(organization=self.organization, user=self.create_user())
@with_feature("organizations:team-roles")
def test_get_team_role(self) -> None:
omt = OrganizationMemberTeam(organizationmember=self.member, team=self.team)
assert omt.get_team_role() == team_roles.get("contributor")
omt.role = "admin"
assert omt.get_team_role() == team_roles.get("admin")
@with_feature("organizations:team-roles")
def test_get_team_role_derives_minimum_role(self) -> None:
omt = OrganizationMemberTeam(organizationmember=self.member, team=self.team)
for org_role in ("admin", "manager", "owner"):
self.member.role = org_role
assert omt.get_team_role() == team_roles.get("admin")
| OrganizationMemberTest |
python | great-expectations__great_expectations | tests/data_context/abstract_data_context/test_data_docs_config_crud.py | {
"start": 657,
"end": 2486
} | class ____:
@pytest.mark.unit
def test_add_data_docs_site(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Add a new site
new_site_name = "my_new_site"
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
# Check that the new site is present
assert new_site_name in ephemeral_context_with_defaults.get_site_names()
@pytest.mark.unit
def test_add_data_docs_site_persists(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
new_site_name = "my_new_site"
with mock.patch(
"great_expectations.data_context.EphemeralDataContext._save_project_config"
) as mock_save_project_config:
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
mock_save_project_config.assert_called_once()
@pytest.mark.unit
def test_add_data_docs_site_already_existing_site_raises_exception(
self,
ephemeral_context_with_defaults: EphemeralDataContext,
new_site_config: dict,
):
# Check fixture configuration
existing_site_name = "local_site"
assert existing_site_name in ephemeral_context_with_defaults.get_site_names()
with pytest.raises(gx_exceptions.InvalidKeyError) as e:
new_site_name = existing_site_name
ephemeral_context_with_defaults.add_data_docs_site(
site_name=new_site_name, site_config=new_site_config
)
assert "Data Docs Site `local_site` already exists in the Data Context." in str(e.value)
| TestAddDataDocsSite |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/shape_output_test.py | {
"start": 3285,
"end": 4266
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Similar to the previous test, but the ShapeOp output is reshaped to 2D.
This makes the output tensor not compatible with shape tensor.
"""
def setUp(self):
super().setUp()
self.DisableNonTrtOptimizers()
def GraphFn(self, x):
q = 2 * x + 1
q = array_ops.shape(q)
q = gen_array_ops.reshape(q, [2, 2])
q = math_ops.cast(q, dtypes.float32)
q = self.trt_incompatible_op(q)
q = q * 2 + q * q
return array_ops.identity(q, name="output_0")
def GetParams(self):
return self.BuildParamsWithMask(
self.GraphFn,
dtypes.float32, [[2, 2, 5, 3]], [[2, 2]],
extra_inputs=[],
extra_outputs=[],
input_mask=[[False, True, True, True]],
output_mask=[[True, True]])
def ExpectedEnginesToBuild(self, run_params):
"""Returns the expected engines to build."""
return ["TRTEngineOp_000", "TRTEngineOp_001"]
| ShapeOutputWithSingleInputAndReshape |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 25561,
"end": 29844
} | class ____(nn.Module):
"""
Pad frames extracted from videos in the surroundings.
"""
def __init__(self, config):
if config.visual_prompter_apply not in ("add", "replace", "remove"):
raise ValueError("`visual_prompter_apply` must be in (add, replace, remove)")
super().__init__()
self.num_frames = config.num_frames
self.max_img_size = config.max_img_size
self.visual_prompter_apply = config.visual_prompter_apply
self.base_size = config.max_img_size - config.visual_prompt_size * 2
self.pad_up = nn.Parameter(
torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size])
)
self.pad_down = nn.Parameter(
torch.randn([1, config.num_frames, 3, config.visual_prompt_size, config.max_img_size])
)
self.pad_left = nn.Parameter(
torch.randn(
[
1,
config.num_frames,
3,
config.max_img_size - config.visual_prompt_size * 2,
config.visual_prompt_size,
]
)
)
self.pad_right = nn.Parameter(
torch.randn(
[
1,
config.num_frames,
3,
config.max_img_size - config.visual_prompt_size * 2,
config.visual_prompt_size,
]
)
)
def interpolate_pad_encoding(self, prompt: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained pad weights, to be able to use the model on collection of high
resolution images (high resolution videos).
"""
# creates scale factor from height and width of original image wrt to the config.max_img_size
h0, w0 = height / self.max_img_size, width / self.max_img_size
batch, num_frames, channels, prompt_height, prompt_width = prompt.shape
# reshaping the batch and num_frames dimension into a single one (i.e (b,frames,c,h,w)-->(b*frames,c,h,w)), to apply bicubic interpolation
prompt = prompt.reshape(batch * num_frames, channels, prompt_height, prompt_width)
prompt = nn.functional.interpolate(
prompt,
scale_factor=(h0, w0),
mode="bicubic",
align_corners=False,
)
# reversing back to (batch,frames,channels,height,width), where height and width is the new interpolated height and width
prompt = prompt.reshape(batch, num_frames, channels, height, width)
return prompt
def forward(self, pixel_values, interpolate_pad_encoding: bool = False):
height, width = (
(pixel_values.shape[-2], pixel_values.shape[-1])
if interpolate_pad_encoding
else (self.max_img_size, self.max_img_size)
)
if self.visual_prompter_apply not in ("add", "remove", "replace"):
raise ValueError(f"Invalid visual_prompter_apply value {self.visual_prompter_apply}")
if self.visual_prompter_apply in ("replace", "remove"):
visual_prompt_mask = torch.ones([height, width], dtype=pixel_values.dtype, device=pixel_values.device)
pixel_values *= visual_prompt_mask
if self.visual_prompter_apply in ("replace", "add"):
base = torch.zeros(1, self.num_frames, 3, self.base_size, self.base_size, device=pixel_values.device)
prompt = torch.cat([self.pad_left, base, self.pad_right], dim=4)
prompt = torch.cat([self.pad_up, prompt, self.pad_down], dim=3)
prompt = torch.cat(pixel_values.size(0) * [prompt])
if interpolate_pad_encoding:
prompt = self.interpolate_pad_encoding(prompt, height, width)
pixel_values = pixel_values + prompt.to(pixel_values.dtype)
return pixel_values
TVP_PROMPTER_CLASSES_MAPPING = {
"framedownpad": TvpFrameDownPadPrompter,
"framepad": TvpFramePadPrompter,
}
@auto_docstring(
custom_intro="""
The bare Tvp Model transformer outputting BaseModelOutputWithPooling object without any specific head on top.
"""
)
| TvpFramePadPrompter |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 33380,
"end": 33873
} | class ____(utils.Enum):
# status_request is defined in RFC 6066 and is used for what is commonly
# called OCSP Must-Staple when present in the TLS Feature extension in an
# X.509 certificate.
status_request = 5
# status_request_v2 is defined in RFC 6961 and allows multiple OCSP
# responses to be provided. It is not currently in use by clients or
# servers.
status_request_v2 = 17
_TLS_FEATURE_TYPE_TO_ENUM = {x.value: x for x in TLSFeatureType}
| TLSFeatureType |
python | RaRe-Technologies__gensim | gensim/test/test_corpora.py | {
"start": 10456,
"end": 11459
} | class ____(CorpusTestCase):
def setUp(self):
self.corpus_class = mmcorpus.MmCorpus
self.corpus = self.corpus_class(datapath('test_mmcorpus_no_index.mm'))
self.file_extension = '.mm'
def test_serialize_compressed(self):
# MmCorpus needs file write with seek => doesn't support compressed output (only input)
pass
def test_load(self):
self.assertEqual(self.corpus.num_docs, 9)
self.assertEqual(self.corpus.num_terms, 12)
self.assertEqual(self.corpus.num_nnz, 28)
# confirm we can iterate and that document values match expected for first three docs
it = iter(self.corpus)
self.assertEqual(next(it), [(0, 1.0), (1, 1.0), (2, 1.0)])
self.assertEqual(next(it), [])
self.assertEqual(next(it), [(2, 0.42371910849), (5, 0.6625174), (7, 1.0), (8, 1.0)])
# confirm that accessing document by index fails
self.assertRaises(RuntimeError, lambda: self.corpus[3])
| TestMmCorpusNoIndex |
python | ansible__ansible | lib/ansible/executor/module_common.py | {
"start": 28476,
"end": 41211
} | class ____(ModuleMetadata):
serialization_profile: str
metadata_versions: dict[t.Any, type[ModuleMetadata]] = {
1: ModuleMetadataV1,
}
_DEFAULT_LEGACY_METADATA = ModuleMetadataV1(serialization_profile='legacy')
def _get_module_metadata(module: ast.Module) -> ModuleMetadata:
# experimental module metadata; off by default
if not C.config.get_config_value('_MODULE_METADATA'):
return _DEFAULT_LEGACY_METADATA
metadata_nodes: list[ast.Assign] = []
for node in module.body:
if isinstance(node, ast.Assign):
if len(node.targets) == 1:
target = node.targets[0]
if isinstance(target, ast.Name):
if target.id == 'METADATA':
metadata_nodes.append(node)
if not metadata_nodes:
return _DEFAULT_LEGACY_METADATA
if len(metadata_nodes) > 1:
raise ValueError('Module METADATA must defined only once.')
metadata_node = metadata_nodes[0]
if not isinstance(metadata_node.value, ast.Constant):
raise TypeError(f'Module METADATA node must be {ast.Constant} not {type(metadata_node)}.')
unparsed_metadata = metadata_node.value.value
if not isinstance(unparsed_metadata, str):
raise TypeError(f'Module METADATA must be {str} not {type(unparsed_metadata)}.')
try:
parsed_metadata = yaml_load(unparsed_metadata)
except Exception as ex:
raise ValueError('Module METADATA must be valid YAML.') from ex
if not isinstance(parsed_metadata, dict):
raise TypeError(f'Module METADATA must parse to {dict} not {type(parsed_metadata)}.')
schema_version = parsed_metadata.pop('schema_version', None)
if not (metadata_type := metadata_versions.get(schema_version)):
raise ValueError(f'Module METADATA schema_version {schema_version} is unknown.')
try:
metadata = metadata_type(**parsed_metadata) # type: ignore
except Exception as ex:
raise ValueError('Module METADATA is invalid.') from ex
return metadata
def recursive_finder(
name: str,
module_fqn: str,
module_data: str | bytes,
zf: zipfile.ZipFile,
date_time: datetime.datetime,
extension_manager: _builder.ExtensionManager,
) -> ModuleMetadata:
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module and its module_utils files needs. (no longer actually recursive)
:arg name: Name of the python module we're examining
:arg module_fqn: Fully qualified name of the python module we're scanning
:arg module_data: string Python code of the module we're scanning
:arg zf: An open :python:class:`zipfile.ZipFile` object that holds the Ansible module payload
which we're assembling
"""
# py_module_cache maps python module names to a tuple of the code in the module
# and the pathname to the module.
# Here we pre-load it with modules which we create without bothering to
# read from actual files (In some cases, these need to differ from what ansible
# ships because they're namespace packages in the module)
# FIXME: do we actually want ns pkg behavior for these? Seems like they should just be forced to emptyish pkg stubs
py_module_cache = {
('ansible',): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n'
b'__version__="' + to_bytes(__version__) +
b'"\n__author__="' + to_bytes(__author__) + b'"\n',
'ansible/__init__.py'),
('ansible', 'module_utils'): (
b'from pkgutil import extend_path\n'
b'__path__=extend_path(__path__,__name__)\n',
'ansible/module_utils/__init__.py')}
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
tree = _compile_module_ast(name, module_data)
module_metadata = _get_module_metadata(tree)
finder = ModuleDepFinder(module_fqn, tree)
if not isinstance(module_metadata, ModuleMetadataV1):
raise NotImplementedError()
profile = module_metadata.serialization_profile
# the format of this set is a tuple of the module name and whether the import is ambiguous as a module name
# or an attribute of a module (e.g. from x.y import z <-- is z a module or an attribute of x.y?)
modules_to_process = [_ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports) for m in finder.submodules]
# include module_utils that are always required
modules_to_process.extend((
_ModuleUtilsProcessEntry.from_module(_loader),
_ModuleUtilsProcessEntry.from_module(_basic),
_ModuleUtilsProcessEntry.from_module_name(_json.get_module_serialization_profile_module_name(profile, True)),
_ModuleUtilsProcessEntry.from_module_name(_json.get_module_serialization_profile_module_name(profile, False)),
))
modules_to_process.extend(_ModuleUtilsProcessEntry.from_module_name(name) for name in extension_manager.module_names)
module_info: ModuleUtilLocatorBase
# we'll be adding new modules inline as we discover them, so just keep going til we've processed them all
while modules_to_process:
modules_to_process.sort() # not strictly necessary, but nice to process things in predictable and repeatable order
entry = modules_to_process.pop(0)
if entry.name_parts in py_module_cache:
# this is normal; we'll often see the same module imported many times, but we only need to process it once
continue
if entry.name_parts[0:2] == ('ansible', 'module_utils'):
module_info = LegacyModuleUtilLocator(entry.name_parts, is_ambiguous=entry.is_ambiguous,
mu_paths=module_utils_paths, child_is_redirected=entry.child_is_redirected)
elif entry.name_parts[0] == 'ansible_collections':
module_info = CollectionModuleUtilLocator(entry.name_parts, is_ambiguous=entry.is_ambiguous,
child_is_redirected=entry.child_is_redirected, is_optional=entry.is_optional)
else:
# FIXME: dot-joined result
display.warning('ModuleDepFinder improperly found a non-module_utils import %s'
% [entry.name_parts])
continue
# Could not find the module. Construct a helpful error message.
if not module_info.found:
if entry.is_optional:
# this was a best-effort optional import that we couldn't find, oh well, move along...
continue
# FIXME: use dot-joined candidate names
msg = 'Could not find imported module support code for {0}. Looked for ({1})'.format(module_fqn, module_info.candidate_names_joined)
raise AnsibleError(msg)
# check the cache one more time with the module we actually found, since the name could be different than the input
# eg, imported name vs module
if module_info.fq_name_parts in py_module_cache:
continue
tree = _compile_module_ast('.'.join(module_info.fq_name_parts), module_info.source_code)
finder = ModuleDepFinder('.'.join(module_info.fq_name_parts), tree, module_info.is_package)
modules_to_process.extend(_ModuleUtilsProcessEntry(m, True, False, is_optional=m in finder.optional_imports)
for m in finder.submodules if m not in py_module_cache)
# we've processed this item, add it to the output list
py_module_cache[module_info.fq_name_parts] = (module_info.source_code, module_info.output_path)
# ensure we process all ancestor package inits
accumulated_pkg_name = []
for pkg in module_info.fq_name_parts[:-1]:
accumulated_pkg_name.append(pkg) # we're accumulating this across iterations
normalized_name = tuple(accumulated_pkg_name) # extra machinations to get a hashable type (list is not)
if normalized_name not in py_module_cache:
modules_to_process.append(_ModuleUtilsProcessEntry(normalized_name, False, module_info.redirected, is_optional=entry.is_optional))
for py_module_name in py_module_cache:
source_code, py_module_file_name = py_module_cache[py_module_name]
zf.writestr(_make_zinfo(py_module_file_name, date_time, zf=zf), source_code)
if extension_manager.debugger_enabled and (origin := Origin.get_tag(source_code)) and origin.path:
extension_manager.source_mapping[origin.path] = py_module_file_name
mu_file = to_text(py_module_file_name, errors='surrogate_or_strict')
display.vvvvv("Including module_utils file %s" % mu_file)
return module_metadata
def _compile_module_ast(module_name: str, source_code: str | bytes) -> ast.Module:
origin = Origin.get_tag(source_code) or Origin.UNKNOWN
# compile the source, process all relevant imported modules
try:
tree = t.cast(ast.Module, compile(source_code, str(origin), 'exec', ast.PyCF_ONLY_AST))
except SyntaxError as ex:
raise AnsibleError(f"Unable to compile {module_name!r}.", obj=origin.replace(line_num=ex.lineno, col_num=ex.offset)) from ex
return tree
def _is_binary(b_module_data):
"""Heuristic to classify a file as binary by sniffing a 1k header; see https://stackoverflow.com/a/7392391"""
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _get_ansible_module_fqn(module_path):
"""
Get the fully qualified name for an ansible module based on its pathname
remote_module_fqn is the fully qualified name. Like ansible.modules.system.ping
Or ansible_collections.Namespace.Collection_name.plugins.modules.ping
.. warning:: This function is for ansible modules only. It won't work for other things
(non-module plugins, etc)
"""
remote_module_fqn = None
# Is this a core module?
match = CORE_LIBRARY_PATH_RE.search(module_path)
if not match:
# Is this a module in a collection?
match = COLLECTION_PATH_RE.search(module_path)
# We can tell the FQN for core modules and collection modules
if match:
path = match.group('path')
if '.' in path:
# FQNs must be valid as python identifiers. This sanity check has failed.
# we could check other things as well
raise ValueError('Module name (or path) was not a valid python identifier')
remote_module_fqn = '.'.join(path.split('/'))
else:
# Currently we do not handle modules in roles so we can end up here for that reason
raise ValueError("Unable to determine module's fully qualified name")
return remote_module_fqn
def _add_module_to_zip(
zf: zipfile.ZipFile,
date_time: datetime.datetime,
remote_module_fqn: str,
b_module_data: bytes,
module_path: str,
extension_manager: _builder.ExtensionManager,
) -> None:
"""Add a module from ansible or from an ansible collection into the module zip"""
module_path_parts = remote_module_fqn.split('.')
# Write the module
zip_module_path = '/'.join(module_path_parts) + '.py'
zf.writestr(
_make_zinfo(zip_module_path, date_time, zf=zf),
b_module_data
)
if extension_manager.debugger_enabled:
extension_manager.source_mapping[module_path] = zip_module_path
existing_paths: frozenset[str]
# Write the __init__.py's necessary to get there
if module_path_parts[0] == 'ansible':
# The ansible namespace is setup as part of the module_utils setup...
start = 2
existing_paths = frozenset()
else:
# ... but ansible_collections and other toplevels are not
start = 1
existing_paths = frozenset(zf.namelist())
for idx in range(start, len(module_path_parts)):
package_path = '/'.join(module_path_parts[:idx]) + '/__init__.py'
# If a collections module uses module_utils from a collection then most packages will have already been added by recursive_finder.
if package_path in existing_paths:
continue
# Note: We don't want to include more than one ansible module in a payload at this time
# so no need to fill the __init__.py with namespace code
zf.writestr(
_make_zinfo(package_path, date_time, zf=zf),
b''
)
@dataclasses.dataclass(kw_only=True, slots=True, frozen=True)
| ModuleMetadataV1 |
python | django__django | tests/model_forms/tests.py | {
"start": 2178,
"end": 2283
} | class ____(forms.ModelForm):
class Meta:
model = Product
fields = "__all__"
| ProductForm |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 67527,
"end": 69200
} | class ____(CType):
# Pythran object of a given type
to_py_function = "__Pyx_pythran_to_python"
is_pythran_expr = True
writable = True
has_attributes = 1
def __init__(self, pythran_type, org_buffer=None):
self.org_buffer = org_buffer
self.pythran_type = pythran_type
self.name = self.pythran_type
self.cname = self.pythran_type
self.from_py_function = "from_python<%s>" % (self.pythran_type)
self.scope = None
def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
assert not pyrex
return "%s %s" % (self.cname, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
# FIXME: fake C scope, might be better represented by a struct or C++ class scope
self.scope = scope = Symtab.CClassScope(
'', None, visibility="extern", parent_type=self
)
scope.directives = {}
scope.declare_var("ndim", c_long_type, pos=None, cname="value", is_cdef=True)
scope.declare_cproperty(
"shape", c_ptr_type(c_long_type), "__Pyx_PythranShapeAccessor",
doc="Pythran array shape",
visibility="extern",
nogil=True,
)
return True
def __eq__(self, other):
return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
def __ne__(self, other):
return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
def __hash__(self):
return hash(self.pythran_type)
| PythranExpr |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/path_registry.py | {
"start": 20766,
"end": 24423
} | class ____(_CreatesToken):
__slots__ = (
"key",
"parent",
"is_aliased_class",
"path",
"entity",
"natural_path",
)
has_entity = True
is_entity = True
parent: Union[RootRegistry, _PropRegistry]
key: _InternalEntityType[Any]
entity: _InternalEntityType[Any]
is_aliased_class: bool
def __init__(
self,
parent: Union[RootRegistry, _PropRegistry],
entity: _InternalEntityType[Any],
):
self.key = entity
self.parent = parent
self.is_aliased_class = entity.is_aliased_class
self.entity = entity
self.path = parent.path + (entity,)
# the "natural path" is the path that we get when Query is traversing
# from the lead entities into the various relationships; it corresponds
# to the structure of mappers and relationships. when we are given a
# path that comes from loader options, as of 1.3 it can have ac-hoc
# with_polymorphic() and other AliasedInsp objects inside of it, which
# are usually not present in mappings. So here we track both the
# "enhanced" path in self.path and the "natural" path that doesn't
# include those objects so these two traversals can be matched up.
# the test here for "(self.is_aliased_class or parent.is_unnatural)"
# are to avoid the more expensive conditional logic that follows if we
# know we don't have to do it. This conditional can just as well be
# "if parent.path:", it just is more function calls.
#
# This is basically the only place that the "is_unnatural" flag
# actually changes behavior.
if parent.path and (self.is_aliased_class or parent.is_unnatural):
# this is an infrequent code path used only for loader strategies
# that also make use of of_type().
if entity.mapper.isa(parent.natural_path[-1].mapper): # type: ignore # noqa: E501
self.natural_path = parent.natural_path + (entity.mapper,)
else:
self.natural_path = parent.natural_path + (
parent.natural_path[-1].entity, # type: ignore
)
# it seems to make sense that since these paths get mixed up
# with statements that are cached or not, we should make
# sure the natural path is cacheable across different occurrences
# of equivalent AliasedClass objects. however, so far this
# does not seem to be needed for whatever reason.
# elif not parent.path and self.is_aliased_class:
# self.natural_path = (self.entity._generate_cache_key()[0], )
else:
self.natural_path = self.path
def _truncate_recursive(self) -> _AbstractEntityRegistry:
return self.parent._truncate_recursive()[self.entity]
@property
def root_entity(self) -> _InternalEntityType[Any]:
return self.odd_element(0)
@property
def entity_path(self) -> PathRegistry:
return self
@property
def mapper(self) -> Mapper[Any]:
return self.entity.mapper
def __bool__(self) -> bool:
return True
def _getitem(
self, entity: Any
) -> Union[_PathElementType, _PathRepresentation, PathRegistry]:
if isinstance(entity, (int, slice)):
return self.path[entity]
elif entity in PathToken._intern:
return _TokenRegistry(self, PathToken._intern[entity])
else:
return _PropRegistry(self, entity)
if not TYPE_CHECKING:
__getitem__ = _getitem
| _AbstractEntityRegistry |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/asset_backfill.py | {
"start": 4355,
"end": 4929
} | class ____(
NamedTuple(
"_UnpartitionedAssetBackfillStatus",
[("asset_key", AssetKey), ("backfill_status", Optional[AssetBackfillStatus])],
)
):
def __new__(cls, asset_key: AssetKey, asset_backfill_status: Optional[AssetBackfillStatus]):
return super().__new__(
cls,
check.inst_param(asset_key, "asset_key", AssetKey),
check.opt_inst_param(
asset_backfill_status, "asset_backfill_status", AssetBackfillStatus
),
)
@whitelist_for_serdes
| UnpartitionedAssetBackfillStatus |
python | kamyu104__LeetCode-Solutions | Python/determine-if-string-halves-are-alike.py | {
"start": 29,
"end": 423
} | class ____(object):
def halvesAreAlike(self, s):
"""
:type s: str
:rtype: bool
"""
vowels = set("aeiouAEIOU")
cnt1 = cnt2 = 0
left, right = 0, len(s)-1
while left < right:
cnt1 += s[left] in vowels
cnt2 += s[right] in vowels
left += 1
right -= 1
return cnt1 == cnt2
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 24565,
"end": 24664
} | class ____(DatabaseError):
"""Wraps a DB-API IntegrityError."""
code = "gkpj"
| IntegrityError |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/circular1.py | {
"start": 96,
"end": 290
} | class ____:
# This should generate two errors because "str" refers to itself
# and it is a variable, so it's an illegal annotation.
str: str = ""
int = int
test: int
| Example1 |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 318642,
"end": 320073
} | class ____(Request):
"""
Get the list of task types used in the specified projects
:param projects: The list of projects which tasks will be analyzed. If not
passed or empty then all the company and public tasks will be analyzed
:type projects: Sequence[str]
"""
_service = "tasks"
_action = "get_types"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"projects": {
"description": "The list of projects which tasks will be analyzed. If not passed or empty then all the company and public tasks will be analyzed",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, projects: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetTypesRequest, self).__init__(**kwargs)
self.projects = projects
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
| GetTypesRequest |
python | kamyu104__LeetCode-Solutions | Python/last-visited-integers.py | {
"start": 37,
"end": 484
} | class ____(object):
def lastVisitedIntegers(self, words):
"""
:type words: List[str]
:rtype: List[int]
"""
PREV = "prev"
result, stk = [], []
i = -1
for x in words:
if x == PREV:
result.append(stk[i] if i >= 0 else -1)
i -= 1
continue
stk.append(int(x))
i = len(stk)-1
return result
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/perflint/PERF401.py | {
"start": 1100,
"end": 5812
} | class ____:
def append(self, x):
pass
def f():
items = [1, 2, 3, 4]
result = Foo()
for i in items:
result.append(i) # Ok
async def f():
items = [1, 2, 3, 4]
result = []
async for i in items:
if i % 2:
result.append(i) # PERF401
async def f():
items = [1, 2, 3, 4]
result = []
async for i in items:
result.append(i) # PERF401
async def f():
items = [1, 2, 3, 4]
result = [1, 2]
async for i in items:
result.append(i) # PERF401
def f():
result, _ = [1, 2, 3, 4], ...
for i in range(10):
result.append(i * 2) # PERF401
def f():
result = []
if True:
for i in range(10): # single-line comment 1 should be protected
# single-line comment 2 should be protected
if i % 2: # single-line comment 3 should be protected
result.append(i) # PERF401
def f():
result = [] # comment after assignment should be protected
for i in range(10): # single-line comment 1 should be protected
# single-line comment 2 should be protected
if i % 2: # single-line comment 3 should be protected
result.append(i) # PERF401
def f():
result = []
for i in range(10):
"""block comment stops the fix"""
result.append(i * 2) # Ok
def f(param):
# PERF401
# make sure the fix does not panic if there is no comments
if param:
new_layers = []
for value in param:
new_layers.append(value * 3)
def f():
result = []
var = 1
for _ in range(10):
result.append(var + 1) # PERF401
def f():
# make sure that `tmp` is not deleted
tmp = 1; result = [] # comment should be protected
for i in range(10):
result.append(i + 1) # PERF401
def f():
# make sure that `tmp` is not deleted
result = []; tmp = 1 # comment should be protected
for i in range(10):
result.append(i + 1) # PERF401
def f():
result = [] # comment should be protected
for i in range(10):
result.append(i * 2) # PERF401
def f():
result = []
result.append(1)
for i in range(10):
result.append(i * 2) # PERF401
def f():
result = []
result += [1]
for i in range(10):
result.append(i * 2) # PERF401
def f():
result = []
for val in range(5):
result.append(val * 2) # Ok
print(val)
def f():
result = []
for val in range(5):
result.append(val * 2) # PERF401
val = 1
print(val)
def f():
i = [1, 2, 3]
result = []
for i in i:
result.append(i + 1) # PERF401
def f():
result = []
for i in range( # Comment 1 should not be duplicated
(
2 # Comment 2
+ 1
)
): # Comment 3
if i % 2: # Comment 4
result.append(
(
i + 1,
# Comment 5
2,
)
) # PERF401
def f():
result: list[int] = []
for i in range(10):
result.append(i * 2) # PERF401
def f():
a, b = [1, 2, 3], [4, 5, 6]
result = []
for i in a, b:
result.append(i[0] + i[1]) # PERF401
return result
def f():
values = [1, 2, 3]
result = []
for a in values:
print(a)
for a in values:
result.append(a + 1) # PERF401
def f():
values = [1, 2, 3]
def g():
for a in values:
result.append(a + 1) # PERF401
result = []
def f():
values = [1, 2, 3]
result = []
for i in values:
result.append(i + 1) # Ok
del i
# The fix here must parenthesize the walrus operator
# https://github.com/astral-sh/ruff/issues/15047
def f():
items = []
for i in range(5):
if j := i:
items.append(j)
def f():
values = [1, 2, 3]
result = list() # this should be replaced with a comprehension
for i in values:
result.append(i + 1) # PERF401
def f():
src = [1]
dst = []
for i in src:
if True if True else False:
dst.append(i)
for i in src:
if lambda: 0:
dst.append(i)
def f():
i = "xyz"
result = []
for i in range(3):
result.append(x for x in [i])
def f():
i = "xyz"
result = []
for i in range(3):
result.append((x for x in [i]))
G_INDEX = None
def f():
global G_INDEX
result = []
for G_INDEX in range(3):
result.append(G_INDEX)
def f():
NL_INDEX = None
def x():
nonlocal NL_INDEX
result = []
for NL_INDEX in range(3):
result.append(NL_INDEX) | Foo |
python | huggingface__transformers | src/transformers/models/reformer/modeling_reformer.py | {
"start": 62455,
"end": 62907
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
| ReformerFeedForwardOutput |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py | {
"start": 13774,
"end": 14778
} | class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook):
op = ManagedKafkaDeleteTopicOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.delete_topic.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestManagedKafkaDeleteTopicOperator |
python | joerick__pyinstrument | test/test_cmdline_main.py | {
"start": 203,
"end": 2567
} | class ____(FrameRenderer):
def __init__(self, time=None, **kwargs):
self.time = time
super().__init__(**kwargs)
global fake_renderer_instance
fake_renderer_instance = self
print("instance")
def default_processors(self):
"""
Return a list of processors that this renderer uses by default.
"""
return []
def render(self, session) -> str:
return ""
def test_renderer_option(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
(tmp_path / "test_program.py").write_text(BUSY_WAIT_SCRIPT)
monkeypatch.setattr(
"sys.argv",
[
"pyinstrument",
"-r",
"test.test_cmdline_main.FakeRenderer",
"-p",
"time=percent_of_total",
"test_program.py",
],
)
monkeypatch.chdir(tmp_path)
global fake_renderer_instance
fake_renderer_instance = None
main()
assert fake_renderer_instance is not None
assert fake_renderer_instance.time == "percent_of_total"
def test_json_renderer_option(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
(tmp_path / "test_program.py").write_text(BUSY_WAIT_SCRIPT)
monkeypatch.setattr(
"sys.argv",
[
"pyinstrument",
"-r",
"test.test_cmdline_main.FakeRenderer",
"-p",
'processor_options={"some_option": 44}',
"test_program.py",
],
)
monkeypatch.chdir(tmp_path)
global fake_renderer_instance
fake_renderer_instance = None
main()
assert fake_renderer_instance is not None
assert fake_renderer_instance.processor_options["some_option"] == 44
def test_dotted_renderer_option(monkeypatch: pytest.MonkeyPatch, tmp_path: Path):
(tmp_path / "test_program.py").write_text(BUSY_WAIT_SCRIPT)
monkeypatch.setattr(
"sys.argv",
[
"pyinstrument",
"-r",
"test.test_cmdline_main.FakeRenderer",
"-p",
"processor_options.other_option=13",
"test_program.py",
],
)
monkeypatch.chdir(tmp_path)
global fake_renderer_instance
fake_renderer_instance = None
main()
assert fake_renderer_instance is not None
assert fake_renderer_instance.processor_options["other_option"] == 13
| FakeRenderer |
python | modin-project__modin | modin/tests/pandas/test_io.py | {
"start": 80217,
"end": 88111
} | class ____:
@check_file_leaks
@pytest.mark.parametrize("pathlike", [False, True])
def test_read_excel(self, pathlike, make_excel_file):
unique_filename = make_excel_file()
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=Path(unique_filename) if pathlike else unique_filename,
)
@check_file_leaks
@pytest.mark.parametrize("skiprows", [2, [1, 3], lambda x: x in [0, 2]])
def test_read_excel_skiprows(self, skiprows, make_excel_file):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=make_excel_file(),
skiprows=skiprows,
check_kwargs_callable=False,
)
@check_file_leaks
@pytest.mark.parametrize(
"dtype_backend", [lib.no_default, "numpy_nullable", "pyarrow"]
)
def test_read_excel_dtype_backend(self, make_excel_file, dtype_backend):
def comparator(df1, df2):
df_equals(df1, df2)
df_equals(df1.dtypes, df2.dtypes)
eval_io(
fn_name="read_excel",
# read_csv kwargs
io=make_excel_file(),
dtype_backend=dtype_backend,
comparator=comparator,
)
@check_file_leaks
def test_read_excel_engine(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=(UserWarning if StorageFormat.get() == "Pandas" else None),
# read_excel kwargs
io=make_excel_file(),
engine="openpyxl",
)
@check_file_leaks
def test_read_excel_index_col(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=(UserWarning if StorageFormat.get() == "Pandas" else None),
# read_excel kwargs
io=make_excel_file(),
index_col=0,
)
@check_file_leaks
def test_read_excel_all_sheets(self, make_excel_file):
unique_filename = make_excel_file()
pandas_df = pandas.read_excel(unique_filename, sheet_name=None)
modin_df = pd.read_excel(unique_filename, sheet_name=None)
assert isinstance(pandas_df, dict)
assert isinstance(modin_df, type(pandas_df))
assert pandas_df.keys() == modin_df.keys()
for key in pandas_df.keys():
df_equals(modin_df.get(key), pandas_df.get(key))
# TODO: Check pandas gh-#39250 as it was fixed
@pytest.mark.xfail(
(StorageFormat.get() == "Pandas" and Engine.get() != "Python"),
reason="pandas throws the exception. See pandas issue #39250 for more info",
)
@check_file_leaks
def test_read_excel_sheetname_title(self):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io="modin/tests/pandas/data/excel_sheetname_title.xlsx",
# FIXME: https://github.com/modin-project/modin/issues/7036
expected_exception=False,
)
@check_file_leaks
def test_excel_empty_line(self):
path = "modin/tests/pandas/data/test_emptyline.xlsx"
modin_df = pd.read_excel(path)
assert str(modin_df)
@check_file_leaks
def test_read_excel_empty_rows(self):
# Test parsing empty rows in middle of excel dataframe as NaN values
eval_io(
fn_name="read_excel",
io="modin/tests/pandas/data/test_empty_rows.xlsx",
)
@check_file_leaks
def test_read_excel_border_rows(self):
# Test parsing border rows as NaN values in excel dataframe
eval_io(
fn_name="read_excel",
io="modin/tests/pandas/data/test_border_rows.xlsx",
)
@check_file_leaks
def test_read_excel_every_other_nan(self):
# Test for reading excel dataframe with every other row as a NaN value
eval_io(
fn_name="read_excel",
io="modin/tests/pandas/data/every_other_row_nan.xlsx",
)
@check_file_leaks
def test_read_excel_header_none(self):
eval_io(
fn_name="read_excel",
io="modin/tests/pandas/data/every_other_row_nan.xlsx",
header=None,
)
@pytest.mark.parametrize(
"sheet_name",
[
"Sheet1",
"AnotherSpecialName",
"SpecialName",
"SecondSpecialName",
0,
1,
2,
3,
],
)
@check_file_leaks
def test_read_excel_sheet_name(self, sheet_name):
eval_io(
fn_name="read_excel",
# read_excel kwargs
io="modin/tests/pandas/data/modin_error_book.xlsx",
sheet_name=sheet_name,
# https://github.com/modin-project/modin/issues/5965
comparator_kwargs={"check_dtypes": False},
)
def test_ExcelFile(self, make_excel_file):
unique_filename = make_excel_file()
modin_excel_file = pd.ExcelFile(unique_filename)
pandas_excel_file = pandas.ExcelFile(unique_filename)
try:
df_equals(modin_excel_file.parse(), pandas_excel_file.parse())
assert modin_excel_file.io == unique_filename
finally:
modin_excel_file.close()
pandas_excel_file.close()
def test_ExcelFile_bytes(self, make_excel_file):
unique_filename = make_excel_file()
with open(unique_filename, mode="rb") as f:
content = f.read()
modin_excel_file = pd.ExcelFile(content)
pandas_excel_file = pandas.ExcelFile(content)
df_equals(modin_excel_file.parse(), pandas_excel_file.parse())
def test_read_excel_ExcelFile(self, make_excel_file):
unique_filename = make_excel_file()
with open(unique_filename, mode="rb") as f:
content = f.read()
modin_excel_file = pd.ExcelFile(content)
pandas_excel_file = pandas.ExcelFile(content)
df_equals(pd.read_excel(modin_excel_file), pandas.read_excel(pandas_excel_file))
@pytest.mark.parametrize("use_bytes_io", [False, True])
def test_read_excel_bytes(self, use_bytes_io, make_excel_file):
unique_filename = make_excel_file()
with open(unique_filename, mode="rb") as f:
io_bytes = f.read()
if use_bytes_io:
io_bytes = BytesIO(io_bytes)
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=io_bytes,
)
def test_read_excel_file_handle(self, make_excel_file):
unique_filename = make_excel_file()
with open(unique_filename, mode="rb") as f:
eval_io(
fn_name="read_excel",
# read_excel kwargs
io=f,
)
@pytest.mark.xfail(strict=False, reason="Flaky test, defaults to pandas")
def test_to_excel(self, tmp_path):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
unique_filename_modin = get_unique_filename(extension="xlsx", data_dir=tmp_path)
unique_filename_pandas = get_unique_filename(
extension="xlsx", data_dir=tmp_path
)
modin_writer = pandas.ExcelWriter(unique_filename_modin)
pandas_writer = pandas.ExcelWriter(unique_filename_pandas)
modin_df.to_excel(modin_writer)
pandas_df.to_excel(pandas_writer)
modin_writer.save()
pandas_writer.save()
assert assert_files_eq(unique_filename_modin, unique_filename_pandas)
@check_file_leaks
def test_read_excel_empty_frame(self, make_excel_file):
eval_io(
fn_name="read_excel",
modin_warning=(UserWarning if StorageFormat.get() == "Pandas" else None),
# read_excel kwargs
io=make_excel_file(),
usecols=[0],
index_col=0,
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
| TestExcel |
python | allegroai__clearml | clearml/backend_interface/task/hyperparams.py | {
"start": 264,
"end": 9405
} | class ____(object):
def __init__(self, task: Any) -> None:
self.task = task
def get_hyper_params(
self,
sections: Optional[Sequence[str]] = None,
selector: Optional[Callable[[dict], bool]] = None,
projector: Optional[Callable[[dict], Any]] = None,
return_obj: Optional[bool] = False,
) -> Dict[str, Union[Dict, Any]]:
"""
Get hyper-parameters for this task.
Returns a dictionary mapping user property name to user property details dict.
:param sections: Return only hyper-params in the provided sections
:param selector: A callable selecting which hyper-parameters should be returned
:param projector: A callable to project values before they are returned
:param return_obj: If True, returned dictionary values are API objects (tasks.ParamsItem). If ``projeictor
"""
if not Session.check_min_api_version("2.9"):
raise ValueError("Not supported by server")
task_id = self.task.task_id
res = self.task.session.send(tasks.GetHyperParamsRequest(tasks=[task_id]))
hyperparams = defaultdict(defaultdict)
if res.ok() and res.response.params:
for entry in res.response.params:
if entry.get("task") == task_id:
for item in entry.get("hyperparams", []):
# noinspection PyBroadException
try:
if (sections and item.get("section") not in sections) or (selector and not selector(item)):
continue
if return_obj:
item = tasks.ParamsItem()
hyperparams[item.get("section")][item.get("name")] = (
item if not projector else projector(item)
)
except Exception:
self.task.log.exception("Failed processing hyper-parameter")
return hyperparams
def edit_hyper_params(
self,
iterables: Union[
Mapping[str, Union[str, Dict, None]],
Iterable[Union[Dict, "tasks.ParamsItem"]],
],
replace: Optional[str] = None,
default_section: Optional[str] = None,
force_section: Optional[str] = None,
) -> bool:
"""
Set hyper-parameters for this task.
:param iterables: Hyper parameter iterables, each can be:
* A dictionary of string key (name) to either a string value (value), a tasks.ParamsItem or a dict
(hyperparam details). If ``default_section`` is not provided, each dict must contain a "section" field.
* An iterable of tasks.ParamsItem or dicts (each representing hyperparam details).
Each dict must contain a "name" field. If ``default_section`` is not provided, each dict must
also contain a "section" field.
:param replace: Optional replace strategy, values are:
* 'all' - provided hyper-params replace all existing hyper-params in task
* 'section' - only sections present in the provided hyper-params are replaced
* 'none' (default) - provided hyper-params will be merged into existing task hyper-params (i.e. will be
added or update existing hyper-params)
:param default_section: Optional section name to be used when section is not explicitly provided.
:param force_section: Optional section name to be used for all hyper-params.
"""
if not Session.check_min_api_version("2.9"):
raise ValueError("Not supported by server")
escape_unsafe = not Session.check_min_api_version("2.11")
if not tasks.ReplaceHyperparamsEnum.has_value(replace):
replace = None
def make_item(value: Union["tasks.ParamsItem", dict, tuple], name: Optional[str] = None) -> "tasks.ParamsItem":
if isinstance(value, tasks.ParamsItem):
a_item = value
elif isinstance(value, dict):
a_item = tasks.ParamsItem(**{k: None if v is None else str(v) for k, v in value.items()})
elif isinstance(value, tuple) and len(value) == 2 and isinstance(value[1], dict) and "value" in value[1]:
a_item = tasks.ParamsItem(
name=str(value[0]), **{k: None if v is None else str(v) for k, v in value[1].items()}
)
elif isinstance(value, tuple):
a_item = tasks.ParamsItem(name=str(value[0]), value=str(value[1]))
else:
a_item = tasks.ParamsItem(value=str(value))
if name:
a_item.name = str(name)
if not a_item.name:
raise ValueError("Missing hyper-param name for '{}'".format(value))
section = force_section or a_item.section or default_section
if not section:
raise ValueError("Missing hyper-param section for '{}'".format(value))
# force string value
if escape_unsafe:
a_item.section, a_item.name = self._escape_unsafe_values(section, a_item.name)
else:
a_item.section = section
return a_item
props = {}
if isinstance(iterables, dict):
props.update({name: make_item(name=name, value=value) for name, value in iterables.items()})
else:
for i in iterables:
item = make_item(i)
props.update({item.name: item})
if self.task.is_offline():
hyperparams = self.task.data.hyperparams or {}
hyperparams.setdefault("properties", tasks.SectionParams())
hyperparams["properties"].update(props)
self.task._save_data_to_offline_dir(hyperparams=hyperparams)
return True
res = self.task.session.send(
tasks.EditHyperParamsRequest(
task=self.task.task_id,
hyperparams=props.values(),
replace_hyperparams=replace,
),
)
if res.ok():
self.task.reload()
return True
return False
def delete_hyper_params(
self, *iterables: Iterable[Union[dict, Iterable[str], "tasks.ParamKey", "tasks.ParamsItem"]]
) -> bool:
"""
Delete hyper-parameters for this task.
:param iterables: Hyper parameter key iterables. Each an iterable whose possible values each represent
a hyper-parameter entry to delete, value formats are:
* A dictionary containing a 'section' and 'name' fields
* An iterable (e.g. tuple, list etc.) whose first two items denote 'section' and 'name'
* An API object of type tasks.ParamKey or tasks.ParamsItem whose section and name fields are not empty
"""
if not Session.check_min_api_version("2.9"):
raise ValueError("Not supported by server")
def get_key(value: Union[dict, Iterable[str], tasks.ParamKey, tasks.ParamsItem]) -> Tuple[str, str]:
if isinstance(value, dict):
key = (value.get("section"), value.get("name"))
elif isinstance(value, (tasks.ParamKey, tasks.ParamsItem)):
key = (value.section, value.name)
else:
key = tuple(map(str, value))[:2]
if not all(key):
raise ValueError("Missing section or name in '{}'".format(value))
return key
keys = {get_key(value) for iterable in iterables for value in iterable}
res = self.task.session.send(
tasks.DeleteHyperParamsRequest(
task=self.task.task_id,
hyperparams=[tasks.ParamKey(section=section, name=name) for section, name in keys],
),
)
if res.ok():
self.task.reload()
return True
return False
def _escape_unsafe_values(self, *values: str) -> Generator[str, None, None]:
"""Escape unsafe values (name, section name) for API version 2.10 and below"""
for value in values:
if value not in UNSAFE_NAMES_2_10:
yield value
else:
self.task.log.info(
"Converting unsafe hyper parameter name/section '{}' to '{}'".format(value, "_" + value)
)
yield "_" + value
UNSAFE_NAMES_2_10 = {
"ne",
"gt",
"gte",
"lt",
"lte",
"in",
"nin",
"mod",
"all",
"size",
"exists",
"not",
"elemMatch",
"type",
"within_distance",
"within_spherical_distance",
"within_box",
"within_polygon",
"near",
"near_sphere",
"max_distance",
"min_distance",
"geo_within",
"geo_within_box",
"geo_within_polygon",
"geo_within_center",
"geo_within_sphere",
"geo_intersects",
"contains",
"icontains",
"startswith",
"istartswith",
"endswith",
"iendswith",
"exact",
"iexact",
"match",
}
| HyperParams |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 19613,
"end": 21572
} | class ____(TestCase):
def test_basic(self):
for iterable, expected in (
# easy case
([0, 1, 2, 3], (0, 3)),
# min and max are not in the extremes + we have `int`s and `float`s
([3, 5.5, -1, 2], (-1, 5.5)),
# unordered collection
({3, 5.5, -1, 2}, (-1, 5.5)),
# with repetitions
([3, 5.5, float('-Inf'), 5.5], (float('-Inf'), 5.5)),
# other collections
('banana', ('a', 'n')),
({0: 1, 2: 100, 1: 10}, (0, 2)),
(range(3, 14), (3, 13)),
):
with self.subTest(iterable=iterable, expected=expected):
# check for expected results
self.assertTupleEqual(mi.minmax(iterable), expected)
# check for equality with built-in `min` and `max`
self.assertTupleEqual(
mi.minmax(iterable), (min(iterable), max(iterable))
)
def test_unpacked(self):
self.assertTupleEqual(mi.minmax(2, 3, 1), (1, 3))
self.assertTupleEqual(mi.minmax(12, 3, 4, key=str), (12, 4))
def test_iterables(self):
self.assertTupleEqual(mi.minmax(x for x in [0, 1, 2, 3]), (0, 3))
self.assertTupleEqual(
mi.minmax(map(str, [3, 5.5, 'a', 2])), ('2', 'a')
)
self.assertTupleEqual(
mi.minmax(filter(None, [0, 3, '', None, 10])), (3, 10)
)
def test_key(self):
self.assertTupleEqual(
mi.minmax({(), (1, 4, 2), 'abcde', range(4)}, key=len),
((), 'abcde'),
)
self.assertTupleEqual(
mi.minmax((x for x in [10, 3, 25]), key=str), (10, 3)
)
def test_default(self):
with self.assertRaises(ValueError):
mi.minmax([])
self.assertIs(mi.minmax([], default=None), None)
self.assertListEqual(mi.minmax([], default=[1, 'a']), [1, 'a'])
| MinMaxTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/components.py | {
"start": 13951,
"end": 14754
} | class ____(ValidationStrategy):
"""
Validate that stream names are unique across all report options in `report_options_list`.
"""
def validate(self, value: Any) -> None:
report_options_list = value
if not isinstance(report_options_list, list) or len(report_options_list) == 0:
return
stream_names = []
for report_option in report_options_list:
if report_option["stream_name"] in stream_names:
raise ValueError(
f"Stream names (`stream_name`) should be unique across all report options in `report_options_list`. Duplicate value: {report_option['stream_name']}"
)
stream_names.append(report_option["stream_name"])
@dataclass
| ValidateReportOptionsListStreamNameUniqueness |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 1626,
"end": 1832
} | class ____(RoleNameMixin, UsernameMixin, wtforms.Form):
def __init__(self, *args, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.user_service = user_service
| CreateRoleForm |
python | python-poetry__poetry | src/poetry/mixology/set_relation.py | {
"start": 37,
"end": 211
} | class ____:
"""
An enum of possible relationships between two sets.
"""
SUBSET = "subset"
DISJOINT = "disjoint"
OVERLAPPING = "overlapping"
| SetRelation |
python | Textualize__rich | tests/test_pretty.py | {
"start": 4968,
"end": 5161
} | class ____:
foo: int
bar: str
ignore: int = field(repr=False)
baz: List[str] = field(default_factory=list)
last: int = field(default=1, repr=False)
@dataclass
| ExampleDataclass |
python | ansible__ansible | test/units/_internal/templating/test_access.py | {
"start": 1124,
"end": 1362
} | class ____(LoggingTagAccessNotifier):
_type_interest = frozenset([ExampleSingletonTag])
def _notify(self, o: t.Any) -> t.Any:
super()._log(o) # get parent logging behavior
return o
| ExampleSingletonTagAccessNotifier |
python | getsentry__sentry | src/flagpole/conditions.py | {
"start": 5372,
"end": 5816
} | class ____(ConditionBase):
value: ContainsOperatorValueTypes
operator: str = dataclasses.field(default="not_contains")
def _operator_match(self, condition_property: Any, segment_name: str):
return not self._evaluate_contains(
condition_property=condition_property, segment_name=segment_name
)
EqualsOperatorValueTypes = int | float | str | bool | list[int] | list[float] | list[str]
| NotContainsCondition |
python | getsentry__sentry | src/sentry/integrations/aws_lambda/integration.py | {
"start": 2910,
"end": 7213
} | class ____(IntegrationInstallation, ServerlessMixin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._client = None
@property
def region(self):
return self.metadata["region"]
@property
def client(self):
if not self._client:
region = self.metadata["region"]
account_number = self.metadata["account_number"]
aws_external_id = self.metadata["aws_external_id"]
self._client = gen_aws_client(
account_number=account_number,
region=region,
aws_external_id=aws_external_id,
)
return self._client
def get_client(self) -> Any:
return self.client
def get_one_lambda_function(self, name):
# https://boto3.amazonaws.com/v1/documentation/api/1.22.12/reference/services/lambda.html
return self.client.get_function(FunctionName=name)["Configuration"]
def get_serialized_lambda_function(self, name):
function = self.get_one_lambda_function(name)
return self.serialize_lambda_function(function)
def serialize_lambda_function(self, function):
layers = get_function_layer_arns(function)
layer_arn = get_latest_layer_for_function(function)
function_runtime = function["Runtime"]
# find our sentry layer
sentry_layer_index = get_index_of_sentry_layer(layers, layer_arn)
if sentry_layer_index > -1:
sentry_layer = layers[sentry_layer_index]
# determine the version and if it's out of date
latest_version = get_latest_layer_version(function)
current_version = get_version_of_arn(sentry_layer)
out_of_date = latest_version > current_version
if function_runtime.startswith("python"):
# If env variable "SENTRY_INITIAL_HANDLER" is not present, then
# it is should be assumed that this function is not enabled!
env_variables = function.get("Environment", {}).get("Variables", {})
if "SENTRY_INITIAL_HANDLER" not in env_variables:
current_version = -1
out_of_date = False
else:
current_version = -1
out_of_date = False
return {
"name": function["FunctionName"],
"runtime": function_runtime,
"version": current_version,
"outOfDate": out_of_date,
"enabled": current_version > -1,
}
# ServerlessMixin interface
def get_serverless_functions(self):
"""
Returns a list of serverless functions
"""
functions = get_supported_functions(self.client)
functions.sort(key=lambda x: x["FunctionName"].lower())
return [self.serialize_lambda_function(function) for function in functions]
@wrap_lambda_updater()
def enable_function(self, target):
function = self.get_one_lambda_function(target)
config_data = self.get_config_data()
project_id = config_data["default_project_id"]
sentry_project_dsn = get_dsn_for_project(self.organization_id, project_id)
enable_single_lambda(self.client, function, sentry_project_dsn)
return self.get_serialized_lambda_function(target)
@wrap_lambda_updater()
def disable_function(self, target):
function = self.get_one_lambda_function(target)
layer_arn = get_latest_layer_for_function(function)
disable_single_lambda(self.client, function, layer_arn)
return self.get_serialized_lambda_function(target)
@wrap_lambda_updater()
def update_function_to_latest_version(self, target):
function = self.get_one_lambda_function(target)
layer_arn = get_latest_layer_for_function(function)
layers = get_function_layer_arns(function)
# update our layer if we find it
sentry_layer_index = get_index_of_sentry_layer(layers, layer_arn)
if sentry_layer_index > -1:
layers[sentry_layer_index] = layer_arn
self.client.update_function_configuration(
FunctionName=target,
Layers=layers,
)
return self.get_serialized_lambda_function(target)
| AwsLambdaIntegration |
python | catalyst-team__catalyst | catalyst/callbacks/metrics/segmentation.py | {
"start": 4129,
"end": 8047
} | class ____(BatchMetricCallback):
"""Dice metric callback.
Args:
input_key: input key to use for metric calculation, specifies our `y_pred`
target_key: output key to use for metric calculation, specifies our `y_true`
class_dim: indicates class dimension (K) for ``outputs`` and
``targets`` tensors (default = 1)
weights: class weights
class_names: class names
threshold: threshold for outputs binarization
log_on_batch: boolean flag to log computed metrics every batch
compute_per_class_metrics: boolean flag to compute per-class metrics
(default: SETTINGS.compute_per_class_metrics or False).
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import os
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.data import ToTensor
from catalyst.contrib import MNIST, IoULoss
model = nn.Sequential(
nn.Conv2d(1, 1, 3, 1, 1), nn.ReLU(),
nn.Conv2d(1, 1, 3, 1, 1), nn.Sigmoid(),
)
criterion = IoULoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()),
batch_size=32
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False),
batch_size=32
),
}
class CustomRunner(dl.SupervisedRunner):
def handle_batch(self, batch):
x = batch[self._input_key]
x_noise = (x + torch.rand_like(x)).clamp_(0, 1)
x_ = self.model(x_noise)
self.batch = {
self._input_key: x, self._output_key: x_, self._target_key: x
}
runner = CustomRunner(
input_key="features",
output_key="scores",
target_key="targets",
loss_key="loss"
)
# model training
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
callbacks=[
dl.IOUCallback(input_key="scores", target_key="targets"),
dl.DiceCallback(input_key="scores", target_key="targets"),
dl.TrevskyCallback(input_key="scores", target_key="targets", alpha=0.2),
],
logdir="./logdir",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
verbose=True,
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
input_key: str,
target_key: str,
class_dim: int = 1,
weights: Optional[List[float]] = None,
class_names: Optional[List[str]] = None,
threshold: Optional[float] = None,
log_on_batch: bool = True,
compute_per_class_metrics: bool = SETTINGS.compute_per_class_metrics,
prefix: str = None,
suffix: str = None,
):
"""Init."""
super().__init__(
metric=DiceMetric(
class_dim=class_dim,
weights=weights,
class_names=class_names,
threshold=threshold,
compute_per_class_metrics=compute_per_class_metrics,
prefix=prefix,
suffix=suffix,
),
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
| DiceCallback |
python | PrefectHQ__prefect | tests/server/orchestration/test_rules.py | {
"start": 2847,
"end": 40706
} | class ____:
async def test_orchestration_rules_are_context_managers(self, session, task_run):
side_effect = 0
class IllustrativeRule(BaseOrchestrationRule):
# we implement rules by inheriting from `BaseOrchestrationRule`
# in order to do so, we need to define three methods:
# when creating a rule, we need to specify lists of valid
# state types the rule can operate on
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
# a before-transition hook that fires upon entering the rule, returns None
# and is the only opportunity for a rule to modify the state transition
# by calling a state mutation method like `self.reject_transision`
async def before_transition(
self, initial_state, proposed_state, context
) -> None:
nonlocal side_effect
side_effect += 1
# an after-transition hook that returns None, fires after a state
# is validated and committed to the DB
async def after_transition(
self, initial_state, validated_state, context
) -> None:
nonlocal side_effect
side_effect += 1
# the cleanup step returns None, and allows a rule to revert side-effects caused
# by the before-transition hook in case the transition does not complete
async def cleanup(self, initial_state, validated_state, context) -> None:
nonlocal side_effect
side_effect -= 1
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
rule_as_context_manager = IllustrativeRule(ctx, *intended_transition)
context_call = MagicMock()
# rules govern logic by being used as a context manager
async with rule_as_context_manager as ctx:
context_call()
assert context_call.call_count == 1
async def test_valid_rules_fire_before_and_after_transitions(
self, session, task_run
):
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class MinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
before_transition_hook()
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# rules are valid if the initial and proposed state always match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
minimal_rule = MinimalRule(ctx, *intended_transition)
async with minimal_rule as ctx:
pass
assert await minimal_rule.invalid() is False
assert await minimal_rule.fizzled() is False
# before and after hooks fire for valid rules
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 1
assert cleanup_step.call_count == 0
async def test_invalid_rules_are_noops(self, session, task_run):
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class MinimalRule(BaseOrchestrationRule):
async def before_transition(self, initial_state, proposed_state, context):
before_transition_hook()
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# a rule is invalid if it is applied on initial and proposed states that do not match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (states.StateType.SCHEDULED, states.StateType.COMPLETED)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
# each rule receives a context as an argument and yields it back after
# entering its context--this way we can thread a common context
# through a series of nested rules
minimal_rule = MinimalRule(ctx, *intended_transition)
async with minimal_rule as ctx:
pass
assert await minimal_rule.invalid() is True
assert await minimal_rule.fizzled() is False
# none of the hooks fire for invalid rules
assert before_transition_hook.call_count == 0
assert after_transition_hook.call_count == 0
assert cleanup_step.call_count == 0
@pytest.mark.parametrize("mutating_state", ["initial", "proposed"])
async def test_fizzled_rules_fire_before_hooks_then_cleanup(
self, session, task_run, mutating_state
):
side_effect = 0
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class FizzlingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
# the before transition hook causes a side-effect
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effect
side_effect += 1
before_transition_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effect
side_effect += 1
after_transition_hook()
# the cleanup step allows a rule to revert side-effects caused
# by the before-transition hook in the event of a fizzle
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effect
side_effect -= 1
cleanup_step()
# this rule seems valid because the initial and proposed states match the intended transition
# if either the initial or proposed states change after the rule starts firing, it will fizzle
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
fizzling_rule = FizzlingRule(ctx, *intended_transition)
async with fizzling_rule as ctx:
# within the context, only the before-hook has fired and we can observe the side-effect
assert side_effect == 1
# mutating the proposed state inside the context will fizzle the rule
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(set(states.StateType) - {*intended_transition})
)
if mutating_state == "initial":
ctx.initial_state = mutated_state
elif mutating_state == "proposed":
ctx.proposed_state = mutated_state
# outside of the context the rule will have fizzled and the side effect was cleaned up
assert side_effect == 0
assert await fizzling_rule.invalid() is False
assert await fizzling_rule.fizzled() is True
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 0
assert cleanup_step.call_count == 1
async def test_rules_that_reject_state_do_not_fizzle_themselves(
self, session, task_run
):
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
# this rule mutates the proposed state type, but won't fizzle itself upon exiting
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(
set(states.StateType)
- {initial_state.type, proposed_state.type}
)
)
before_transition_hook()
# `BaseOrchestrationRule` provides hooks designed to mutate the proposed state
await self.reject_transition(
mutated_state, reason="for testing, of course"
)
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# this rule seems valid because the initial and proposed states match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
mutating_rule = StateMutatingRule(ctx, *intended_transition)
async with mutating_rule as ctx:
pass
assert await mutating_rule.invalid() is False
assert await mutating_rule.fizzled() is False
# despite the mutation, this rule is valid so before and after hooks will fire
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 1
assert cleanup_step.call_count == 0
async def test_rules_that_wait_do_not_fizzle_themselves(self, session, task_run):
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
# this rule mutates the proposed state type, but won't fizzle itself upon exiting
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(
set(states.StateType)
- {initial_state.type, proposed_state.type}
)
)
before_transition_hook()
# `BaseOrchestrationRule` provides hooks designed to mutate the proposed state
await self.delay_transition(42, reason="for testing, of course")
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# this rule seems valid because the initial and proposed states match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
mutating_rule = StateMutatingRule(ctx, *intended_transition)
async with mutating_rule as ctx:
pass
assert await mutating_rule.invalid() is False
assert await mutating_rule.fizzled() is False
# despite the mutation, this rule is valid so before and after hooks will fire
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 1
assert cleanup_step.call_count == 0
async def test_rules_that_abort_do_not_fizzle_themselves(self, session, task_run):
before_transition_hook = MagicMock()
after_transition_hook = MagicMock()
cleanup_step = MagicMock()
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
# this rule mutates the proposed state type, but won't fizzle itself upon exiting
mutated_state = proposed_state.model_copy()
mutated_state.type = random.choice(
list(
set(states.StateType)
- {initial_state.type, proposed_state.type}
)
)
before_transition_hook()
# `BaseOrchestrationRule` provides hooks designed to mutate the proposed state
await self.abort_transition(reason="for testing, of course")
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# this rule seems valid because the initial and proposed states match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
mutating_rule = StateMutatingRule(ctx, *intended_transition)
async with mutating_rule as ctx:
pass
assert await mutating_rule.invalid() is False
assert await mutating_rule.fizzled() is False
# despite the mutation, this rule is valid so before and after hooks will fire
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 1
assert cleanup_step.call_count == 0
async def test_rules_can_pass_parameters_via_context(self, session, task_run):
before_transition_hook = MagicMock()
special_message = None
class MessagePassingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
await self.update_context_parameters("a special message", "hello!")
# context parameters should not be sensitive to mutation
context.parameters["a special message"] = "I can't hear you"
class MessageReadingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
before_transition_hook()
nonlocal special_message
special_message = context.parameters["a special message"]
# this rule seems valid because the initial and proposed states match the intended transition
initial_state_type = states.StateType.PENDING
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = states.State(type=proposed_state_type)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
message_passer = MessagePassingRule(ctx, *intended_transition)
async with message_passer as ctx:
message_reader = MessageReadingRule(ctx, *intended_transition)
async with message_reader as ctx:
pass
assert before_transition_hook.call_count == 1
assert special_message == "hello!"
@pytest.mark.parametrize(
"intended_transition",
list(product([*states.StateType, None], [*states.StateType])),
ids=transition_names,
)
async def test_rules_that_raise_exceptions_during_before_transition(
self, session, task_run, intended_transition
):
outer_before_transition_hook = MagicMock()
before_transition_hook = MagicMock()
outer_after_transition_hook = MagicMock()
after_transition_hook = MagicMock()
outer_cleanup_step = MagicMock()
cleanup_step = MagicMock()
class MinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
outer_before_transition_hook()
async def after_transition(self, initial_state, validated_state, context):
outer_after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
outer_cleanup_step()
class RaisingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
before_transition_hook()
raise RuntimeError("Test!")
async def after_transition(self, initial_state, validated_state, context):
after_transition_hook()
async def cleanup(self, initial_state, validated_state, context):
cleanup_step()
# this rule seems valid because the initial and proposed states match the intended transition
initial_state_type, proposed_state_type = intended_transition
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = TaskOrchestrationContext(
session=session,
run=task_run,
initial_state=initial_state,
proposed_state=proposed_state,
)
async with contextlib.AsyncExitStack() as stack:
minimal_rule = MinimalRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(minimal_rule)
raising_rule = RaisingRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(raising_rule)
assert ctx.proposed_state is None, "Proposed state should be None"
assert await minimal_rule.fizzled() is True
assert await raising_rule.invalid() is False, (
"Rules that error on entry should be fizzled so they can try and clean up"
)
assert await raising_rule.fizzled() is True
assert outer_before_transition_hook.call_count == 1
assert outer_after_transition_hook.call_count == 0
assert outer_cleanup_step.call_count == 1, (
"All rules should clean up side effects"
)
assert before_transition_hook.call_count == 1
assert after_transition_hook.call_count == 0, (
"The after-transition hook should not run"
)
assert cleanup_step.call_count == 1, "All rules should clean up side effects"
assert isinstance(ctx.orchestration_error, RuntimeError)
@pytest.mark.parametrize("initial_state_type", ALL_ORCHESTRATION_STATES)
async def test_rules_enforce_initial_state_validity(
self, session, task_run, initial_state_type
):
proposed_state_type = None
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
pre_transition_hook = MagicMock()
post_transition_hook = MagicMock()
class StateEnforcingRule(BaseOrchestrationRule):
FROM_STATES = set(ALL_ORCHESTRATION_STATES) - {initial_state_type}
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
pre_transition_hook()
async def after_transition(self, initial_state, validated_state, context):
post_transition_hook()
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
state_enforcing_rule = StateEnforcingRule(ctx, *intended_transition)
async with state_enforcing_rule as ctx:
pass
assert await state_enforcing_rule.invalid()
assert pre_transition_hook.call_count == 0
assert post_transition_hook.call_count == 0
@pytest.mark.parametrize("proposed_state_type", ALL_ORCHESTRATION_STATES)
async def test_rules_enforce_proposed_state_validity(
self, session, task_run, proposed_state_type
):
initial_state_type = None
intended_transition = (initial_state_type, proposed_state_type)
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
pre_transition_hook = MagicMock()
post_transition_hook = MagicMock()
class StateEnforcingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = set(ALL_ORCHESTRATION_STATES) - {proposed_state_type}
async def before_transition(self, initial_state, proposed_state, context):
pre_transition_hook()
async def after_transition(self, initial_state, validated_state, context):
post_transition_hook()
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
state_enforcing_rule = StateEnforcingRule(ctx, *intended_transition)
async with state_enforcing_rule as ctx:
pass
assert await state_enforcing_rule.invalid()
assert pre_transition_hook.call_count == 0
assert post_transition_hook.call_count == 0
@pytest.mark.parametrize(
"intended_transition",
list(product([*states.StateType, None], [*states.StateType, None])),
ids=transition_names,
)
async def test_nested_valid_rules_fire_hooks(
self, session, task_run, intended_transition
):
side_effects = 0
first_before_hook = MagicMock()
second_before_hook = MagicMock()
first_after_hook = MagicMock()
second_after_hook = MagicMock()
cleanup_step = MagicMock()
# both of the rules produce side-effects on entry and exit, which we can test for
class FirstMinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effects
side_effects += 1
first_before_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects += 1
first_after_hook()
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects -= 1
cleanup_step()
class SecondMinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effects
side_effects += 1
second_before_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects += 1
second_after_hook()
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects -= 1
cleanup_step()
# both rules are valid
initial_state_type, proposed_state_type = intended_transition
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
# an ExitStack is a python builtin construction that allows us to
# nest an arbitrary number of contexts (and therefore, rules), in this test
# we'll enter the contexts one by one so we can follow what's happening
async with contextlib.AsyncExitStack() as stack:
# each rule receives a context as an argument and yields it back after
# entering its context--this way we can thread a common context
# through a series of nested rules
first_rule = FirstMinimalRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(first_rule)
# after entering the first context, only one before hook as fired
assert first_before_hook.call_count == 1
assert first_after_hook.call_count == 0
assert second_before_hook.call_count == 0
assert second_after_hook.call_count == 0
assert cleanup_step.call_count == 0
second_rule = SecondMinimalRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(second_rule)
# the second before hook fires after entering the second context
# note that no after hooks have fired yet
assert first_before_hook.call_count == 1
assert first_after_hook.call_count == 0
assert second_before_hook.call_count == 1
assert second_after_hook.call_count == 0
assert cleanup_step.call_count == 0
assert await first_rule.invalid() is False
assert await second_rule.invalid() is False
assert await first_rule.fizzled() is False
assert await second_rule.fizzled() is False
# both the first and second after hooks fired after exiting the contexts
# none of the rules fizzled, so the cleanup step is never called and side-effects are preserved
assert side_effects == 4
assert first_before_hook.call_count == 1
assert first_after_hook.call_count == 1
assert second_before_hook.call_count == 1
assert second_after_hook.call_count == 1
assert cleanup_step.call_count == 0
@pytest.mark.parametrize(
"intended_transition",
list(product([*states.StateType, None], [*states.StateType, None])),
ids=transition_names,
)
async def test_complex_nested_rules_interact_sensibly(
self, session, task_run, intended_transition
):
side_effects = 0
first_before_hook = MagicMock()
mutator_before_hook = MagicMock()
invalid_before_hook = MagicMock()
first_after_hook = MagicMock()
mutator_after_hook = MagicMock()
invalid_after_hook = MagicMock()
cleanup_after_fizzling = MagicMock()
mutator_cleanup = MagicMock()
invalid_cleanup = MagicMock()
# some of the rules produce side-effects on entry and exit, but also clean up on fizzling
# because one of the rules modifies the intended transition and itself doesn't produce side-effects
# we should see no side effects after exiting the rule contexts
class FirstMinimalRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effects
side_effects += 1
first_before_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects += 1
first_after_hook()
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects -= 1
cleanup_after_fizzling()
class StateMutatingRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
# this rule mutates the proposed state type, but won't fizzle itself upon exiting
mutated_state_type = random.choice(
list(
set(states.StateType)
- {
initial_state.type if initial_state else None,
proposed_state.type if proposed_state else None,
}
)
)
mutated_state = await commit_task_run_state(
session, task_run, mutated_state_type
)
mutator_before_hook()
# `BaseOrchestrationRule` provides hooks designed to mutate the proposed state
await self.reject_transition(
mutated_state, reason="testing my dear watson"
)
async def after_transition(self, initial_state, validated_state, context):
mutator_after_hook()
async def cleanup(self, initial_state, validated_state, context):
mutator_cleanup()
class InvalidatedRule(BaseOrchestrationRule):
FROM_STATES = ALL_ORCHESTRATION_STATES
TO_STATES = ALL_ORCHESTRATION_STATES
async def before_transition(self, initial_state, proposed_state, context):
nonlocal side_effects
side_effects += 1
invalid_before_hook()
async def after_transition(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects += 1
invalid_after_hook()
async def cleanup(self, initial_state, validated_state, context):
nonlocal side_effects
side_effects -= 1
invalid_cleanup()
# all rules start valid
initial_state_type, proposed_state_type = intended_transition
initial_state = await commit_task_run_state(
session, task_run, initial_state_type
)
proposed_state = (
states.State(type=proposed_state_type) if proposed_state_type else None
)
ctx = OrchestrationContext(
session=session,
initial_state=initial_state,
proposed_state=proposed_state,
run=task_run,
)
# an ExitStack is a python builtin construction that allows us to
# nest an arbitrary number of contexts (and therefore, rules), in this test
# we'll enter the contexts one by one so we can follow what's happening
async with contextlib.AsyncExitStack() as stack:
first_rule = FirstMinimalRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(first_rule)
# after entering the first context, only one before hook as fired
assert first_before_hook.call_count == 1
assert mutator_before_hook.call_count == 0
assert invalid_before_hook.call_count == 0
mutator_rule = StateMutatingRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(mutator_rule)
# the mutator fires after entering the second context and changes the proposed state
# this mutation will invalidate any subsequent rules and fizzle previous ones
assert first_before_hook.call_count == 1
assert mutator_before_hook.call_count == 1
assert invalid_before_hook.call_count == 0
invalidated_rule = InvalidatedRule(ctx, *intended_transition)
ctx = await stack.enter_async_context(invalidated_rule)
# invalid rule hooks don't fire, even after entering their context
assert first_before_hook.call_count == 1
assert mutator_before_hook.call_count == 1
assert invalid_before_hook.call_count == 0
# since no rules have had a chance to clean up, we can still
# observe the side-effect produced by the first rule
assert side_effects == 1
# an ExitStack exits contexts in the reverse order in which they were called
# once invalid always invalid--the invalid rule fires no hooks at all
assert await invalidated_rule.invalid() is True
assert await invalidated_rule.fizzled() is False
assert invalid_before_hook.call_count == 0
assert invalid_after_hook.call_count == 0
assert invalid_cleanup.call_count == 0
# the rule responsible for the mutation "knows about" the change to the proposed state, and remains valid
assert await mutator_rule.invalid() is False
assert await mutator_rule.fizzled() is False
assert mutator_before_hook.call_count == 1
assert mutator_after_hook.call_count == 1
assert mutator_cleanup.call_count == 0
# the first rule did not expect the proposed state to change, so the rule fizzles
# instead of firing the after-transition hook, the rule cleans up after itself
assert await first_rule.invalid() is False
assert await first_rule.fizzled() is True
assert first_before_hook.call_count == 1
assert first_after_hook.call_count == 0
assert cleanup_after_fizzling.call_count == 1
# because all fizzled rules cleaned up and invalid rules never fire, side-effects have been undone
assert side_effects == 0
| TestBaseOrchestrationRule |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-weaviate/tests/test_vector_stores_weaviate.py | {
"start": 6958,
"end": 15757
} | class ____:
def test_class(self):
names_of_base_classes = [b.__name__ for b in WeaviateVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.fixture(scope="class")
def client(self):
client = weaviate.connect_to_embedded()
yield client
client.close()
@pytest.fixture()
def vector_store(self, client):
vector_store = WeaviateVectorStore(
weaviate_client=client, index_name=TEST_COLLECTION_NAME
)
vector_store.clear() # Make sure that no leftover test collection exists from a previous test session (embedded Weaviate data gets persisted)
yield vector_store
vector_store.clear()
@pytest.fixture()
def vector_store_with_sample_nodes(self, vector_store):
nodes = [
TextNode(text="Hello world.", embedding=[0.0, 0.0, 0.3]),
TextNode(text="This is a test.", embedding=[0.3, 0.0, 0.0]),
]
vector_store.add(nodes)
return vector_store
def test_vector_store_with_custom_batch(self, client):
nodes = [
TextNode(text="Hello world.", embedding=[0.0, 0.0, 0.3]),
TextNode(text="This is a test.", embedding=[0.3, 0.0, 0.0]),
]
# default, dynamic batch
vector_store_default_dynamic = WeaviateVectorStore(
weaviate_client=client, index_name=TEST_COLLECTION_NAME
)
assert isinstance(client.batch._batch_mode, _DynamicBatching)
# custom, with fixed size
custom_batch = client.batch.fixed_size(
batch_size=123,
concurrent_requests=3,
consistency_level=weaviate.classes.config.ConsistencyLevel.ONE,
)
vector_store_fixed = WeaviateVectorStore(
weaviate_client=client,
index_name=TEST_COLLECTION_NAME,
client_kwargs={"custom_batch": custom_batch},
)
assert isinstance(client.batch._batch_mode, _FixedSizeBatching)
assert client.batch._batch_mode.batch_size == 123
assert client.batch._batch_mode.concurrent_requests == 3
assert (
client.batch._consistency_level
== weaviate.classes.config.ConsistencyLevel.ONE
)
vector_store_default_dynamic.clear()
vector_store_fixed.clear()
# test wrong value
try:
WeaviateVectorStore(
weaviate_client=client,
index_name=TEST_COLLECTION_NAME,
client_kwargs={"custom_batch": "wrong_value"},
)
AssertionError()
except ValueError:
assert True
def test_sync_basic_flow(self, vector_store_with_sample_nodes):
query = VectorStoreQuery(
query_embedding=[0.3, 0.0, 0.0],
similarity_top_k=10,
query_str="world",
mode=VectorStoreQueryMode.DEFAULT,
)
results = vector_store_with_sample_nodes.query(query)
assert len(results.nodes) == 2
assert results.nodes[0].text == "This is a test."
assert results.similarities[0] == 1.0
assert results.similarities[0] > results.similarities[1]
def test_hybrid_search(self, vector_store_with_sample_nodes):
query = VectorStoreQuery(
query_embedding=[0.0, 0.3, 0.0],
similarity_top_k=10,
query_str="world",
mode=VectorStoreQueryMode.HYBRID,
)
results = vector_store_with_sample_nodes.query(query)
assert len(results.nodes) == 2
assert results.nodes[0].text == "Hello world."
assert results.nodes[1].text == "This is a test."
assert results.similarities[0] > results.similarities[1]
def test_query_kwargs(self, vector_store_with_sample_nodes):
query = VectorStoreQuery(
query_embedding=[0.0, 0.3, 0.0],
similarity_top_k=2,
query_str="world",
mode=VectorStoreQueryMode.HYBRID,
)
results = vector_store_with_sample_nodes.query(
query,
max_vector_distance=0.0,
)
assert len(results.nodes) == 0
def test_can_query_collection_with_complex_property_types(self, client):
"""Verifies that it is possible to query data from collections that contain complex properties (e.g. a list of nested objects in one of the properties)."""
collection_name = "ComplexTypeInArrayTest"
client.collections.delete(collection_name)
collection = client.collections.create(
name=collection_name,
properties=[
wvc.config.Property(
name="text",
data_type=wvc.config.DataType.TEXT,
),
wvc.config.Property(
name="array_prop",
data_type=wvc.config.DataType.OBJECT_ARRAY,
nested_properties=[
wvc.config.Property(
name="nested_prop",
data_type=wvc.config.DataType.TEXT,
),
],
),
],
)
collection.data.insert(
{
"text": "Text of object containing complex properties",
"array_prop": [{"nested_prop": "nested_prop content"}],
},
vector=[1.0, 0.0, 0.0],
)
vector_store = WeaviateVectorStore(
weaviate_client=client,
index_name=collection_name,
)
query = VectorStoreQuery(
query_embedding=[1.0, 0.0, 0.0],
similarity_top_k=2,
query_str="world",
mode=VectorStoreQueryMode.DEFAULT,
)
results = vector_store.query(query)
assert len(results.nodes) == 1
assert results.nodes[0].text == "Text of object containing complex properties"
def test_sync_delete(self, vector_store):
node_to_be_deleted = TextNode(
text="Hello world.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="to_be_deleted")
},
embedding=[0.0, 0.0, 0.3],
)
node_to_keep = TextNode(
text="This is a test.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="to_be_kept")
},
embedding=[0.3, 0.0, 0.0],
)
nodes = [node_to_be_deleted, node_to_keep]
vector_store.add(nodes)
# First check that nothing gets deleted if no matching nodes are present
vector_store.delete(ref_doc_id="no_match_in_db")
query = VectorStoreQuery(
query_embedding=[0.3, 0.0, 0.0],
similarity_top_k=10,
query_str="test",
mode=VectorStoreQueryMode.DEFAULT,
)
results = vector_store.query(query)
assert len(results.nodes) == 2
# Now test actual deletion
vector_store.delete(ref_doc_id="to_be_deleted")
query = VectorStoreQuery(
query_embedding=[0.3, 0.0, 0.0],
similarity_top_k=10,
query_str="test",
mode=VectorStoreQueryMode.DEFAULT,
)
results = vector_store.query(query)
assert len(results.nodes) == 1
results.nodes[0].node_id == node_to_keep.node_id
async def test_async_methods_called_without_async_client(self, vector_store):
"""Makes sure that we present an easy to understand error message to the user if he did not not provide an async client, but tried to call async methods."""
with pytest.raises(AsyncClientNotProvidedError):
await vector_store.async_add(
[TextNode(text="Hello world.", embedding=[0.0, 0.0, 0.3])]
)
with pytest.raises(AsyncClientNotProvidedError):
await vector_store.adelete(ref_doc_id="no_match_in_db")
with pytest.raises(AsyncClientNotProvidedError):
await vector_store.adelete_nodes(node_ids=["sample_node_id"])
with pytest.raises(AsyncClientNotProvidedError):
await vector_store.aclear()
with pytest.raises(AsyncClientNotProvidedError):
query = VectorStoreQuery(
query_embedding=[0.3, 0.0, 0.0],
similarity_top_k=10,
query_str="test",
mode=VectorStoreQueryMode.DEFAULT,
)
results = await vector_store.aquery(query)
def test_sync_client_properties(self, vector_store):
assert isinstance(vector_store.client, weaviate.WeaviateClient)
with pytest.raises(AsyncClientNotProvidedError):
vector_store.async_client
| TestWeaviateSync |
python | PrefectHQ__prefect | tests/blocks/test_core.py | {
"start": 82500,
"end": 82589
} | class ____(Block):
_block_type_name = "No code Example"
message: str
| NoCodeExample |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 9742,
"end": 11160
} | class ____(ModelOutput):
r"""
input_ids (`torch.Tensor `of shape `(batch_size, sequence_length), *optional*):
The sequence used as a text prompt for the generation.
user_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*):
The audio codes used as audio user prompt for the generation. Has priority over `user_input_values` and represents the audio "tokens" of `user_input_values` once passed through the audio encoder.
moshi_audio_codes (`torch.Tensor `of shape `(batch_size, num_codebooks, sequence_length), *optional*):
The audio codes used as audio Moshi prompt for the generation. Has priority over `moshi_input_values` and represents the audio "tokens" of `moshi_input_values` once passed through the audio encoder.
attention_mask (`torch.LongTensor`) of shape `(batch_size, sequence_length)`, *optional*):
Attention mask to avoid performing attention on padding token indices. Mask values selected in `[0,
1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**.
"""
input_ids: Optional[torch.LongTensor] = None
user_audio_codes: Optional[torch.Tensor] = None
moshi_audio_codes: Optional[torch.Tensor] = None
attention_mask: Optional[torch.LongTensor] = None
# Copied from transformers.models.gemma.modeling_gemma.GemmaRMSNorm with Gemma->Moshi
| MoshiUnconditionalInput |
python | sqlalchemy__sqlalchemy | test/orm/test_selectin_relations.py | {
"start": 107129,
"end": 110993
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class GenericParent(Base):
__tablename__ = "generic_parent"
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "generic_parent",
}
class ParentA(GenericParent):
__tablename__ = "parent_a"
id = Column(
Integer, ForeignKey("generic_parent.id"), primary_key=True
)
children = relationship("ChildA", back_populates="parent")
__mapper_args__ = {"polymorphic_identity": "parent_a"}
class ParentB(GenericParent):
__tablename__ = "parent_b"
id = Column(
Integer, ForeignKey("generic_parent.id"), primary_key=True
)
children = relationship("ChildB", back_populates="parent")
__mapper_args__ = {"polymorphic_identity": "parent_b"}
class ChildA(Base):
__tablename__ = "child_a"
id = Column(Integer, primary_key=True)
parent_id = Column(
Integer, ForeignKey("parent_a.id"), nullable=False
)
parent = relationship("ParentA", back_populates="children")
class ChildB(Base):
__tablename__ = "child_b"
id = Column(Integer, primary_key=True)
parent_id = Column(
Integer, ForeignKey("parent_b.id"), nullable=False
)
parent = relationship("ParentB", back_populates="children")
@classmethod
def insert_data(cls, connection):
ParentA, ParentB, ChildA, ChildB = cls.classes(
"ParentA", "ParentB", "ChildA", "ChildB"
)
session = Session(connection)
parent_a = ParentA(id=1)
parent_b = ParentB(id=2)
for i in range(10):
parent_a.children.append(ChildA())
parent_b.children.append(ChildB())
session.add_all([parent_a, parent_b])
session.commit()
def test_load_both_wpoly(self):
GenericParent, ParentA, ParentB, ChildA, ChildB = self.classes(
"GenericParent", "ParentA", "ParentB", "ChildA", "ChildB"
)
session = fixture_session()
parent_types = with_polymorphic(GenericParent, [ParentA, ParentB])
with assert_engine(testing.db) as asserter_:
session.query(parent_types).options(
selectinload(parent_types.ParentA.children),
selectinload(parent_types.ParentB.children),
).all()
asserter_.assert_(
CompiledSQL(
"SELECT generic_parent.id AS generic_parent_id, "
"generic_parent.type AS generic_parent_type, "
"parent_a.id AS parent_a_id, parent_b.id AS parent_b_id "
"FROM generic_parent LEFT OUTER JOIN parent_a "
"ON generic_parent.id = parent_a.id LEFT OUTER JOIN parent_b "
"ON generic_parent.id = parent_b.id"
),
AllOf(
CompiledSQL(
"SELECT child_a.parent_id, "
"child_a.id FROM child_a "
"WHERE child_a.parent_id IN "
"(__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [1]}],
),
CompiledSQL(
"SELECT child_b.parent_id, "
"child_b.id FROM child_b "
"WHERE child_b.parent_id IN "
"(__[POSTCOMPILE_primary_keys])",
[{"primary_keys": [2]}],
),
),
)
| SameNamePolymorphicTest |
python | celery__celery | t/unit/concurrency/test_prefork.py | {
"start": 5572,
"end": 16274
} | class ____:
def setup_method(self):
pytest.importorskip('multiprocessing')
def test_gen_not_started(self):
def gen():
yield 1
assert not asynpool.gen_not_started(g)
yield 2
g = gen()
assert asynpool.gen_not_started(g)
next(g)
assert not asynpool.gen_not_started(g)
list(g)
assert not asynpool.gen_not_started(g)
def gen2():
yield 1
raise RuntimeError('generator error')
g = gen2()
assert asynpool.gen_not_started(g)
next(g)
assert not asynpool.gen_not_started(g)
with pytest.raises(RuntimeError):
next(g)
assert not asynpool.gen_not_started(g)
@patch('select.select', create=True)
def test_select(self, __select):
ebadf = socket.error()
ebadf.errno = errno.EBADF
with patch('select.poll', create=True) as poller:
poll = poller.return_value = Mock(name='poll.poll')
poll.return_value = {3}, set(), 0
assert asynpool._select({3}, poll=poll) == ({3}, set(), 0)
poll.return_value = {3}, set(), 0
assert asynpool._select({3}, None, {3}, poll=poll) == (
{3}, set(), 0,
)
eintr = socket.error()
eintr.errno = errno.EINTR
poll.side_effect = eintr
readers = {3}
assert asynpool._select(readers, poll=poll) == (set(), set(), 1)
assert 3 in readers
with patch('select.poll', create=True) as poller:
poll = poller.return_value = Mock(name='poll.poll')
poll.side_effect = ebadf
with patch('select.select') as selcheck:
selcheck.side_effect = ebadf
readers = {3}
assert asynpool._select(readers, poll=poll) == (
set(), set(), 1,
)
assert 3 not in readers
with patch('select.poll', create=True) as poller:
poll = poller.return_value = Mock(name='poll.poll')
poll.side_effect = MemoryError()
with pytest.raises(MemoryError):
asynpool._select({1}, poll=poll)
with patch('select.poll', create=True) as poller:
poll = poller.return_value = Mock(name='poll.poll')
with patch('select.select') as selcheck:
def se(*args):
selcheck.side_effect = MemoryError()
raise ebadf
poll.side_effect = se
with pytest.raises(MemoryError):
asynpool._select({3}, poll=poll)
with patch('select.poll', create=True) as poller:
poll = poller.return_value = Mock(name='poll.poll')
with patch('select.select') as selcheck:
def se2(*args):
selcheck.side_effect = socket.error()
selcheck.side_effect.errno = 1321
raise ebadf
poll.side_effect = se2
with pytest.raises(socket.error):
asynpool._select({3}, poll=poll)
with patch('select.poll', create=True) as poller:
poll = poller.return_value = Mock(name='poll.poll')
poll.side_effect = socket.error()
poll.side_effect.errno = 34134
with pytest.raises(socket.error):
asynpool._select({3}, poll=poll)
def test_select_unpatched(self):
with tempfile.TemporaryFile('w') as f:
_, writeable, _ = asynpool._select(writers={f, }, err={f, })
assert f.fileno() in writeable
with tempfile.TemporaryFile('r') as f:
readable, _, _ = asynpool._select(readers={f, }, err={f, })
assert f.fileno() in readable
def test_promise(self):
fun = Mock()
x = asynpool.promise(fun, (1,), {'foo': 1})
x()
assert x.ready
fun.assert_called_with(1, foo=1)
def test_Worker(self):
w = asynpool.Worker(Mock(), Mock())
w.on_loop_start(1234)
w.outq.put.assert_called_with((asynpool.WORKER_UP, (1234,)))
def test_iterate_file_descriptors_safely_source_data_list(self):
# Given: a list of integers that could be file descriptors
fd_iter = [1, 2, 3, 4, 5]
# Given: a mock hub method that does nothing to call
def _fake_hub(*args, **kwargs):
raise OSError
# When Calling the helper to iterate_file_descriptors_safely
iterate_file_descriptors_safely(
fd_iter, fd_iter, _fake_hub,
"arg1", "arg2", kw1="kw1", kw2="kw2",
)
# Then: all items were removed from the managed data source
assert fd_iter == [], "Expected all items removed from managed list"
def test_iterate_file_descriptors_safely_source_data_set(self):
# Given: a list of integers that could be file descriptors
fd_iter = {1, 2, 3, 4, 5}
# Given: a mock hub method that does nothing to call
def _fake_hub(*args, **kwargs):
raise OSError
# When Calling the helper to iterate_file_descriptors_safely
iterate_file_descriptors_safely(
fd_iter, fd_iter, _fake_hub,
"arg1", "arg2", kw1="kw1", kw2="kw2",
)
# Then: all items were removed from the managed data source
assert fd_iter == set(), "Expected all items removed from managed set"
def test_iterate_file_descriptors_safely_source_data_dict(self):
# Given: a list of integers that could be file descriptors
fd_iter = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5}
# Given: a mock hub method that does nothing to call
def _fake_hub(*args, **kwargs):
raise OSError
# When Calling the helper to iterate_file_descriptors_safely
iterate_file_descriptors_safely(
fd_iter, fd_iter, _fake_hub,
"arg1", "arg2", kw1="kw1", kw2="kw2",
)
# Then: all items were removed from the managed data source
assert fd_iter == {}, "Expected all items removed from managed dict"
def _get_hub(self):
hub = Hub()
hub.readers = {}
hub.writers = {}
hub.timer = Mock(name='hub.timer')
hub.timer._queue = [Mock()]
hub.fire_timers = Mock(name='hub.fire_timers')
hub.fire_timers.return_value = 1.7
hub.poller = Mock(name='hub.poller')
hub.close = Mock(name='hub.close()')
return hub
@t.skip.if_pypy
def test_schedule_writes_hub_remove_writer_ready_fd_not_in_all_inqueues(self):
pool = asynpool.AsynPool(threads=False)
hub = self._get_hub()
writer = Mock(name='writer')
reader = Mock(name='reader')
# add 2 fake fds with the same id
hub.add_reader(6, reader, 6)
hub.add_writer(6, writer, 6)
pool._all_inqueues.clear()
pool._create_write_handlers(hub)
# check schedule_writes write fds remove not remove the reader one from the hub.
hub.consolidate_callback(ready_fds=[6])
assert 6 in hub.readers
assert 6 not in hub.writers
@t.skip.if_pypy
def test_schedule_writes_hub_remove_writers_from_active_writers_when_get_index_error(self):
pool = asynpool.AsynPool(threads=False)
hub = self._get_hub()
writer = Mock(name='writer')
reader = Mock(name='reader')
# add 3 fake fds with the same id to reader and writer
hub.add_reader(6, reader, 6)
hub.add_reader(8, reader, 8)
hub.add_reader(9, reader, 9)
hub.add_writer(6, writer, 6)
hub.add_writer(8, writer, 8)
hub.add_writer(9, writer, 9)
# add fake fd to pool _all_inqueues to make sure we try to read from outbound_buffer
# set active_writes to 6 to make sure we remove all write fds except 6
pool._active_writes = {6}
pool._all_inqueues = {2, 6, 8, 9}
pool._create_write_handlers(hub)
# clear outbound_buffer to get IndexError when trying to pop any message
# in this case all active_writers fds will be removed from the hub
pool.outbound_buffer.clear()
hub.consolidate_callback(ready_fds=[2])
if {6, 8, 9} <= hub.readers.keys() and not {8, 9} <= hub.writers.keys():
assert True
else:
assert False
assert 6 in hub.writers
@t.skip.if_pypy
def test_schedule_writes_hub_remove_fd_only_from_writers_when_write_job_is_done(self):
pool = asynpool.AsynPool(threads=False)
hub = self._get_hub()
writer = Mock(name='writer')
reader = Mock(name='reader')
# add one writer and one reader with the same fd
hub.add_writer(2, writer, 2)
hub.add_reader(2, reader, 2)
assert 2 in hub.writers
# For test purposes to reach _write_job in schedule writes
pool._all_inqueues = {2}
worker = Mock("worker")
# this lambda need to return a number higher than 4
# to pass the while loop in _write_job function and to reach the hub.remove_writer
worker.send_job_offset = lambda header, HW: 5
pool._fileno_to_inq[2] = worker
pool._create_write_handlers(hub)
result = ApplyResult({}, lambda x: True)
result._payload = [None, None, -1]
pool.outbound_buffer.appendleft(result)
hub.consolidate_callback(ready_fds=[2])
assert 2 not in hub.writers
assert 2 in hub.readers
@t.skip.if_pypy
def test_register_with_event_loop__no_on_tick_dupes(self):
"""Ensure AsynPool's register_with_event_loop only registers
on_poll_start in the event loop the first time it's called. This
prevents a leak when the Consumer is restarted.
"""
pool = asynpool.AsynPool(threads=False)
hub = Mock(name='hub')
pool.register_with_event_loop(hub)
pool.register_with_event_loop(hub)
hub.on_tick.add.assert_called_once()
@t.skip.if_pypy
@patch('billiard.pool.Pool._create_worker_process')
def test_before_create_process_signal(self, create_process):
from celery import signals
on_worker_before_create_process = Mock()
signals.worker_before_create_process.connect(on_worker_before_create_process)
pool = asynpool.AsynPool(processes=1, threads=False)
create_process.assert_called_once_with(0)
on_worker_before_create_process.assert_any_call(
signal=signals.worker_before_create_process,
sender=pool,
)
@t.skip.if_win32
| test_AsynPool |
python | kamyu104__LeetCode-Solutions | Python/palindrome-partitioning.py | {
"start": 933,
"end": 1656
} | class ____(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
result = []
self.partitionRecu(result, [], s, 0)
return result
def partitionRecu(self, result, cur, s, i):
if i == len(s):
result.append(list(cur))
else:
for j in xrange(i, len(s)):
if self.isPalindrome(s[i: j + 1]):
cur.append(s[i: j + 1])
self.partitionRecu(result, cur, s, j + 1)
cur.pop()
def isPalindrome(self, s):
for i in xrange(len(s) / 2):
if s[i] != s[-(i + 1)]:
return False
return True
| Solution2 |
python | langchain-ai__langchain | libs/core/tests/unit_tests/test_tools.py | {
"start": 2934,
"end": 23282
} | class ____(BaseTool):
name: str = "structured_api"
args_schema: type[BaseModel] = _MockSchema
description: str = "A Structured Tool"
@override
def _run(self, *, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
return f"{arg1} {arg2} {arg3}"
async def _arun(self, *, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
raise NotImplementedError
def test_structured_args() -> None:
"""Test functionality with structured arguments."""
structured_api = _MockStructuredTool()
assert isinstance(structured_api, BaseTool)
assert structured_api.name == "structured_api"
expected_result = "1 True {'foo': 'bar'}"
args = {"arg1": 1, "arg2": True, "arg3": {"foo": "bar"}}
assert structured_api.run(args) == expected_result
def test_misannotated_base_tool_raises_error() -> None:
"""Test that a BaseTool with the incorrect typehint raises an exception."""
with pytest.raises(SchemaAnnotationError):
class _MisAnnotatedTool(BaseTool):
name: str = "structured_api"
# This would silently be ignored without the custom metaclass
args_schema: BaseModel = _MockSchema # type: ignore[assignment]
description: str = "A Structured Tool"
@override
def _run(self, *, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
return f"{arg1} {arg2} {arg3}"
async def _arun(
self, *, arg1: int, arg2: bool, arg3: dict | None = None
) -> str:
raise NotImplementedError
def test_forward_ref_annotated_base_tool_accepted() -> None:
"""Test that a using forward ref annotation syntax is accepted."""
class _ForwardRefAnnotatedTool(BaseTool):
name: str = "structured_api"
args_schema: "type[BaseModel]" = _MockSchema
description: str = "A Structured Tool"
@override
def _run(self, *, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
return f"{arg1} {arg2} {arg3}"
async def _arun(
self, *, arg1: int, arg2: bool, arg3: dict | None = None
) -> str:
raise NotImplementedError
def test_subclass_annotated_base_tool_accepted() -> None:
"""Test BaseTool child w/ custom schema isn't overwritten."""
class _ForwardRefAnnotatedTool(BaseTool):
name: str = "structured_api"
args_schema: type[_MockSchema] = _MockSchema
description: str = "A Structured Tool"
@override
def _run(self, *, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
return f"{arg1} {arg2} {arg3}"
async def _arun(
self, *, arg1: int, arg2: bool, arg3: dict | None = None
) -> str:
raise NotImplementedError
assert issubclass(_ForwardRefAnnotatedTool, BaseTool)
tool = _ForwardRefAnnotatedTool()
assert tool.args_schema == _MockSchema
def test_decorator_with_specified_schema() -> None:
"""Test that manually specified schemata are passed through to the tool."""
@tool(args_schema=_MockSchema)
def tool_func(*, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
return f"{arg1} {arg2} {arg3}"
assert isinstance(tool_func, BaseTool)
assert tool_func.args_schema == _MockSchema
@pytest.mark.skipif(
sys.version_info >= (3, 14),
reason="pydantic.v1 namespace not supported with Python 3.14+",
)
def test_decorator_with_specified_schema_pydantic_v1() -> None:
"""Test that manually specified schemata are passed through to the tool."""
class _MockSchemaV1(BaseModelV1):
"""Return the arguments directly."""
arg1: int
arg2: bool
arg3: dict | None = None
@tool(args_schema=cast("ArgsSchema", _MockSchemaV1))
def tool_func_v1(*, arg1: int, arg2: bool, arg3: dict | None = None) -> str:
return f"{arg1} {arg2} {arg3}"
assert isinstance(tool_func_v1, BaseTool)
assert tool_func_v1.args_schema == cast("ArgsSchema", _MockSchemaV1)
def test_decorated_function_schema_equivalent() -> None:
"""Test that a BaseTool without a schema meets expectations."""
@tool
def structured_tool_input(
*, arg1: int, arg2: bool, arg3: dict | None = None
) -> str:
"""Return the arguments directly."""
return f"{arg1} {arg2} {arg3}"
assert isinstance(structured_tool_input, BaseTool)
assert structured_tool_input.args_schema is not None
assert (
_schema(structured_tool_input.args_schema)["properties"]
== _schema(_MockSchema)["properties"]
== _normalize_schema(structured_tool_input.args)
)
def test_args_kwargs_filtered() -> None:
class _SingleArgToolWithKwargs(BaseTool):
name: str = "single_arg_tool"
description: str = "A single arged tool with kwargs"
@override
def _run(
self,
some_arg: str,
run_manager: CallbackManagerForToolRun | None = None,
**kwargs: Any,
) -> str:
return "foo"
async def _arun(
self,
some_arg: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
**kwargs: Any,
) -> str:
raise NotImplementedError
tool = _SingleArgToolWithKwargs()
assert tool.is_single_input
class _VarArgToolWithKwargs(BaseTool):
name: str = "single_arg_tool"
description: str = "A single arged tool with kwargs"
@override
def _run(
self,
*args: Any,
run_manager: CallbackManagerForToolRun | None = None,
**kwargs: Any,
) -> str:
return "foo"
async def _arun(
self,
*args: Any,
run_manager: AsyncCallbackManagerForToolRun | None = None,
**kwargs: Any,
) -> str:
raise NotImplementedError
tool2 = _VarArgToolWithKwargs()
assert tool2.is_single_input
def test_structured_args_decorator_no_infer_schema() -> None:
"""Test functionality with structured arguments parsed as a decorator."""
@tool(infer_schema=False)
def structured_tool_input(
arg1: int, arg2: float | datetime, opt_arg: dict | None = None
) -> str:
"""Return the arguments directly."""
return f"{arg1}, {arg2}, {opt_arg}"
assert isinstance(structured_tool_input, BaseTool)
assert structured_tool_input.name == "structured_tool_input"
args = {"arg1": 1, "arg2": 0.001, "opt_arg": {"foo": "bar"}}
with pytest.raises(ToolException):
assert structured_tool_input.run(args)
def test_structured_single_str_decorator_no_infer_schema() -> None:
"""Test functionality with structured arguments parsed as a decorator."""
@tool(infer_schema=False)
def unstructured_tool_input(tool_input: str) -> str:
"""Return the arguments directly."""
assert isinstance(tool_input, str)
return f"{tool_input}"
assert isinstance(unstructured_tool_input, BaseTool)
assert unstructured_tool_input.args_schema is None
assert unstructured_tool_input.run("foo") == "foo"
def test_structured_tool_types_parsed() -> None:
"""Test the non-primitive types are correctly passed to structured tools."""
class SomeEnum(Enum):
A = "a"
B = "b"
class SomeBaseModel(BaseModel):
foo: str
@tool
def structured_tool(
some_enum: SomeEnum,
some_base_model: SomeBaseModel,
) -> dict:
"""Return the arguments directly."""
return {
"some_enum": some_enum,
"some_base_model": some_base_model,
}
assert isinstance(structured_tool, StructuredTool)
args = {
"some_enum": SomeEnum.A.value,
"some_base_model": SomeBaseModel(foo="bar").model_dump(),
}
result = structured_tool.run(json.loads(json.dumps(args)))
expected = {
"some_enum": SomeEnum.A,
"some_base_model": SomeBaseModel(foo="bar"),
}
assert result == expected
@pytest.mark.skipif(
sys.version_info >= (3, 14),
reason="pydantic.v1 namespace not supported with Python 3.14+",
)
def test_structured_tool_types_parsed_pydantic_v1() -> None:
"""Test the non-primitive types are correctly passed to structured tools."""
class SomeBaseModel(BaseModelV1):
foo: str
class AnotherBaseModel(BaseModelV1):
bar: str
@tool
def structured_tool(some_base_model: SomeBaseModel) -> AnotherBaseModel:
"""Return the arguments directly."""
return AnotherBaseModel(bar=some_base_model.foo)
assert isinstance(structured_tool, StructuredTool)
expected = AnotherBaseModel(bar="baz")
for arg in [
SomeBaseModel(foo="baz"),
SomeBaseModel(foo="baz").dict(),
]:
args = {"some_base_model": arg}
result = structured_tool.run(args)
assert result == expected
def test_structured_tool_types_parsed_pydantic_mixed() -> None:
"""Test handling of tool with mixed Pydantic version arguments."""
class SomeBaseModel(BaseModelV1):
foo: str
class AnotherBaseModel(BaseModel):
bar: str
with pytest.raises(NotImplementedError):
@tool
def structured_tool(
some_base_model: SomeBaseModel, another_base_model: AnotherBaseModel
) -> None:
"""Return the arguments directly."""
def test_base_tool_inheritance_base_schema() -> None:
"""Test schema is correctly inferred when inheriting from BaseTool."""
class _MockSimpleTool(BaseTool):
name: str = "simple_tool"
description: str = "A Simple Tool"
@override
def _run(self, tool_input: str) -> str:
return f"{tool_input}"
@override
async def _arun(self, tool_input: str) -> str:
raise NotImplementedError
simple_tool = _MockSimpleTool()
assert simple_tool.args_schema is None
expected_args = {"tool_input": {"title": "Tool Input", "type": "string"}}
assert simple_tool.args == expected_args
def test_tool_lambda_args_schema() -> None:
"""Test args schema inference when the tool argument is a lambda function."""
tool = Tool(
name="tool",
description="A tool",
func=lambda tool_input: tool_input,
)
assert tool.args_schema is None
expected_args = {"tool_input": {"type": "string"}}
assert tool.args == expected_args
def test_structured_tool_from_function_docstring() -> None:
"""Test that structured tools can be created from functions."""
def foo(bar: int, baz: str) -> str:
"""Docstring.
Args:
bar: the bar value
baz: the baz value
"""
raise NotImplementedError
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.name == "foo"
assert structured_tool.args == {
"bar": {"title": "Bar", "type": "integer"},
"baz": {"title": "Baz", "type": "string"},
}
assert _schema(structured_tool.args_schema) == {
"properties": {
"bar": {"title": "Bar", "type": "integer"},
"baz": {"title": "Baz", "type": "string"},
},
"description": inspect.getdoc(foo),
"title": "foo",
"type": "object",
"required": ["bar", "baz"],
}
assert foo.__doc__ is not None
assert structured_tool.description == textwrap.dedent(foo.__doc__.strip())
def test_structured_tool_from_function_docstring_complex_args() -> None:
"""Test that structured tools can be created from functions."""
def foo(bar: int, baz: list[str]) -> str:
"""Docstring.
Args:
bar: int
baz: list[str]
"""
raise NotImplementedError
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.name == "foo"
assert structured_tool.args == {
"bar": {"title": "Bar", "type": "integer"},
"baz": {
"title": "Baz",
"type": "array",
"items": {"type": "string"},
},
}
assert _schema(structured_tool.args_schema) == {
"properties": {
"bar": {"title": "Bar", "type": "integer"},
"baz": {
"title": "Baz",
"type": "array",
"items": {"type": "string"},
},
},
"description": inspect.getdoc(foo),
"title": "foo",
"type": "object",
"required": ["bar", "baz"],
}
assert foo.__doc__ is not None
assert structured_tool.description == textwrap.dedent(foo.__doc__).strip()
def test_structured_tool_lambda_multi_args_schema() -> None:
"""Test args schema inference when the tool argument is a lambda function."""
tool = StructuredTool.from_function(
name="tool",
description="A tool",
func=lambda tool_input, other_arg: f"{tool_input}{other_arg}",
)
assert tool.args_schema is not None
expected_args = {
"tool_input": {"title": "Tool Input"},
"other_arg": {"title": "Other Arg"},
}
assert tool.args == expected_args
def test_tool_partial_function_args_schema() -> None:
"""Test args schema inference when the tool argument is a partial function."""
def func(tool_input: str, other_arg: str) -> str:
assert isinstance(tool_input, str)
assert isinstance(other_arg, str)
return tool_input + other_arg
tool = Tool(
name="tool",
description="A tool",
func=partial(func, other_arg="foo"),
)
assert tool.run("bar") == "barfoo"
def test_empty_args_decorator() -> None:
"""Test inferred schema of decorated fn with no args."""
@tool
def empty_tool_input() -> str:
"""Return a constant."""
return "the empty result"
assert isinstance(empty_tool_input, BaseTool)
assert empty_tool_input.name == "empty_tool_input"
assert empty_tool_input.args == {}
assert empty_tool_input.run({}) == "the empty result"
def test_tool_from_function_with_run_manager() -> None:
"""Test run of tool when using run_manager."""
def foo(bar: str, callbacks: CallbackManagerForToolRun | None = None) -> str: # noqa: D417
"""Docstring.
Args:
bar: str.
"""
assert callbacks is not None
return "foo" + bar
handler = FakeCallbackHandler()
tool = Tool.from_function(foo, name="foo", description="Docstring")
assert tool.run(tool_input={"bar": "bar"}, run_manager=[handler]) == "foobar"
assert tool.run("baz", run_manager=[handler]) == "foobaz"
def test_structured_tool_from_function_with_run_manager() -> None:
"""Test args and schema of structured tool when using callbacks."""
def foo( # noqa: D417
bar: int, baz: str, callbacks: CallbackManagerForToolRun | None = None
) -> str:
"""Docstring.
Args:
bar: int
baz: str
"""
assert callbacks is not None
return str(bar) + baz
handler = FakeCallbackHandler()
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.args == {
"bar": {"title": "Bar", "type": "integer"},
"baz": {"title": "Baz", "type": "string"},
}
assert _schema(structured_tool.args_schema) == {
"properties": {
"bar": {"title": "Bar", "type": "integer"},
"baz": {"title": "Baz", "type": "string"},
},
"description": inspect.getdoc(foo),
"title": "foo",
"type": "object",
"required": ["bar", "baz"],
}
assert (
structured_tool.run(
tool_input={"bar": "10", "baz": "baz"}, run_manger=[handler]
)
== "10baz"
)
def test_structured_tool_from_parameterless_function() -> None:
"""Test parameterless function of structured tool."""
def foo() -> str:
"""Docstring."""
return "invoke foo"
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.run({}) == "invoke foo"
assert structured_tool.run("") == "invoke foo"
def test_named_tool_decorator() -> None:
"""Test functionality when arguments are provided as input to decorator."""
@tool("search")
def search_api(query: str) -> str:
"""Search the API for the query."""
assert isinstance(query, str)
return f"API result - {query}"
assert isinstance(search_api, BaseTool)
assert search_api.name == "search"
assert not search_api.return_direct
assert search_api.run({"query": "foo"}) == "API result - foo"
def test_named_tool_decorator_return_direct() -> None:
"""Test functionality when arguments and return direct are provided as input."""
@tool("search", return_direct=True)
def search_api(query: str, *args: Any) -> str:
"""Search the API for the query."""
return "API result"
assert isinstance(search_api, BaseTool)
assert search_api.name == "search"
assert search_api.return_direct
assert search_api.run({"query": "foo"}) == "API result"
def test_unnamed_tool_decorator_return_direct() -> None:
"""Test functionality when only return direct is provided."""
@tool(return_direct=True)
def search_api(query: str) -> str:
"""Search the API for the query."""
assert isinstance(query, str)
return "API result"
assert isinstance(search_api, BaseTool)
assert search_api.name == "search_api"
assert search_api.return_direct
assert search_api.run({"query": "foo"}) == "API result"
def test_tool_with_kwargs() -> None:
"""Test functionality when only return direct is provided."""
@tool(return_direct=True)
def search_api(
arg_0: str,
arg_1: float = 4.3,
ping: str = "hi",
) -> str:
"""Search the API for the query."""
return f"arg_0={arg_0}, arg_1={arg_1}, ping={ping}"
assert isinstance(search_api, BaseTool)
result = search_api.run(
tool_input={
"arg_0": "foo",
"arg_1": 3.2,
"ping": "pong",
}
)
assert result == "arg_0=foo, arg_1=3.2, ping=pong"
result = search_api.run(
tool_input={
"arg_0": "foo",
}
)
assert result == "arg_0=foo, arg_1=4.3, ping=hi"
# For backwards compatibility, we still accept a single str arg
result = search_api.run("foobar")
assert result == "arg_0=foobar, arg_1=4.3, ping=hi"
def test_missing_docstring() -> None:
"""Test error is raised when docstring is missing."""
# expect to throw a value error if there's no docstring
with pytest.raises(ValueError, match="Function must have a docstring"):
@tool
def search_api(query: str) -> str:
return "API result"
@tool
class MyTool(BaseModel):
foo: str
assert not MyTool.description # type: ignore[attr-defined]
def test_create_tool_positional_args() -> None:
"""Test that positional arguments are allowed."""
test_tool = Tool("test_name", lambda x: x, "test_description")
assert test_tool.invoke("foo") == "foo"
assert test_tool.name == "test_name"
assert test_tool.description == "test_description"
assert test_tool.is_single_input
def test_create_tool_keyword_args() -> None:
"""Test that keyword arguments are allowed."""
test_tool = Tool(name="test_name", func=lambda x: x, description="test_description")
assert test_tool.is_single_input
assert test_tool.invoke("foo") == "foo"
assert test_tool.name == "test_name"
assert test_tool.description == "test_description"
async def test_create_async_tool() -> None:
"""Test that async tools are allowed."""
async def _test_func(x: str) -> str:
return x
test_tool = Tool(
name="test_name",
func=lambda x: x,
description="test_description",
coroutine=_test_func,
)
assert test_tool.is_single_input
assert test_tool.invoke("foo") == "foo"
assert test_tool.name == "test_name"
assert test_tool.description == "test_description"
assert test_tool.coroutine is not None
assert await test_tool.arun("foo") == "foo"
| _MockStructuredTool |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_reflection.py | {
"start": 30103,
"end": 31120
} | class ____(fixtures.TablesTest):
__only_on__ = "postgresql >= 10"
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"array_table",
metadata,
Column("id", INTEGER, primary_key=True),
Column("datas", ARRAY(INTEGER)),
Column("datass", ARRAY(INTEGER, dimensions=2)),
Column("datasss", ARRAY(INTEGER, dimensions=3)),
)
def test_array_table_is_reflected(self, connection):
metadata = MetaData()
table = Table("array_table", metadata, autoload_with=connection)
def assert_is_integer_array(data_type):
assert isinstance(data_type, ARRAY)
# posgres treats all arrays as one-dimensional arrays
assert isinstance(data_type.item_type, INTEGER)
assert_is_integer_array(table.c.datas.type)
assert_is_integer_array(table.c.datass.type)
assert_is_integer_array(table.c.datasss.type)
| ArrayReflectionTest |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 5909,
"end": 9443
} | class ____(AsyncHTTPTestCase):
def get_handlers(self):
return [
("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler),
]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
self.io_loop.run_sync(
lambda: stream.connect(("127.0.0.1", self.get_http_port()))
)
stream.write(
newline.join(headers + [utf8("Content-Length: %d" % len(body))])
+ newline
+ newline
+ body
)
start_line, headers, body = self.io_loop.run_sync(
lambda: read_stream_body(stream)
)
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch(
[
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join(
[
b"Content-Disposition: form-data; name=argument",
b"",
"\u00e1".encode(),
b"--1234567890",
'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode(),
b"",
"\u00fa".encode(),
b"--1234567890--",
b"",
]
),
)
data = json_decode(response)
self.assertEqual("\u00e9", data["header"])
self.assertEqual("\u00e1", data["argument"])
self.assertEqual("\u00f3", data["filename"])
self.assertEqual("\u00fa", data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
for newline in (b"\r\n", b"\n"):
response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"", newline=newline)
self.assertEqual(response, b"Hello world")
@gen_test
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket())
yield stream.connect(("127.0.0.1", self.get_http_port()))
yield stream.write(
b"\r\n".join(
[
b"POST /hello HTTP/1.1",
b"Host: 127.0.0.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n",
]
)
)
data = yield stream.read_until(b"\r\n\r\n")
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
first_line = yield stream.read_until(b"\r\n")
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
header_data = yield stream.read_until(b"\r\n\r\n")
headers = HTTPHeaders.parse(native_str(header_data.decode("latin1")))
body = yield stream.read_bytes(int(headers["Content-Length"]))
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
| HTTPConnectionTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/markup.py | {
"start": 958,
"end": 1750
} | class ____(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
.. versionadded:: 0.6
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
| BBCodeLexer |
python | sympy__sympy | sympy/functions/special/hyper.py | {
"start": 27090,
"end": 29364
} | class ____(DefinedFunction):
"""
A base class for "hyper representation functions".
This is used exclusively in ``hyperexpand()``, but fits more logically here.
pFq is branched at 1 if p == q+1. For use with slater-expansion, we want
define an "analytic continuation" to all polar numbers, which is
continuous on circles and on the ray t*exp_polar(I*pi). Moreover, we want
a "nice" expression for the various cases.
This base class contains the core logic, concrete derived classes only
supply the actual functions.
"""
@classmethod
def eval(cls, *args):
newargs = tuple(map(unpolarify, args[:-1])) + args[-1:]
if args != newargs:
return cls(*newargs)
@classmethod
def _expr_small(cls, x):
""" An expression for F(x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_small_minus(cls, x):
""" An expression for F(-x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_big(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n)*x), |x| > 1. """
raise NotImplementedError
@classmethod
def _expr_big_minus(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n + pi*I)*x), |x| > 1. """
raise NotImplementedError
def _eval_rewrite_as_nonrep(self, *args, **kwargs):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
minus = False
newargs = self.args[:-1] + (x,)
if not n.is_Integer:
minus = True
n -= S.Half
newerargs = newargs + (n,)
if minus:
small = self._expr_small_minus(*newargs)
big = self._expr_big_minus(*newerargs)
else:
small = self._expr_small(*newargs)
big = self._expr_big(*newerargs)
if big == small:
return small
return Piecewise((big, abs(x) > 1), (small, True))
def _eval_rewrite_as_nonrepsmall(self, *args, **kwargs):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
args = self.args[:-1] + (x,)
if not n.is_Integer:
return self._expr_small_minus(*args)
return self._expr_small(*args)
| HyperRep |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 12547,
"end": 12760
} | class ____(GreatExpectationsError):
def __init__(self, message: str) -> None:
self.message = f"Cannot initialize data asset: {message}"
super().__init__(self.message)
| DataAssetInitializationError |
python | pyodide__pyodide | src/tests/test_package_loading.py | {
"start": 17573,
"end": 44560
} | class ____:
def __init__(
self,
name: str,
source: str | None = None,
direct_url: dict[str, str] | None = None,
installer: str | None = None,
version: str = "0.0.1",
):
self.name = name
self.version = version
direct_url_json = json.dumps(direct_url) if direct_url else None
self._files: dict[str, str | None] = {
"PYODIDE_SOURCE": source,
"direct_url.json": direct_url_json,
"INSTALLER": installer,
}
@property
def dist_info_name(self):
# https://packaging.python.org/en/latest/specifications/name-normalization/#normalization
normalized_name = re.sub(r"[-_.]+", "-", self.name).lower()
return f"{normalized_name}-{self.version}.dist-info"
def write(self, base_dir: Path) -> None:
dist_info_dir = base_dir / self.dist_info_name
dist_info_dir.mkdir(exist_ok=True)
for key, value in self._files.items():
if value is not None:
(dist_info_dir / key).write_text(value)
with (dist_info_dir / "METADATA").open("w") as f:
# fmt: off
f.write(
"Metadata-Version: 2.1\n"
f"Name: {self.name}\n"
f"Version: {self.version}\n"
)
# fmt: on
def __repr__(self):
return self.name
result_dist_pairs = [
("default channel", DummyDistribution("A", source="pyodide")),
(
"default channel",
DummyDistribution(
"B",
source="pyodide",
direct_url={"url": "http://some.pkg.src/a/b/c.whl"},
installer="pip",
),
),
(
"http://some.pkg.src/a/b/c.whl",
DummyDistribution("C", source="http://some.pkg.src/a/b/c.whl"),
),
(
"http://some.pkg.src/a/b/c.whl",
DummyDistribution(
"D",
source="http://some.pkg.src/a/b/c.whl",
direct_url={"url": "http://a.b.c/x/y/z.whl"},
installer="pip",
),
),
(
"http://a.b.c/x/y/z.whl",
DummyDistribution(
"E", direct_url={"url": "http://a.b.c/x/y/z.whl"}, installer="pip"
),
),
("pip (index unknown)", DummyDistribution("F", installer="pip")),
("other (index unknown)", DummyDistribution("G", installer="other")),
("Unknown", DummyDistribution("H-H")),
]
@pytest.mark.parametrize("result,dist", result_dist_pairs)
def test_get_dist_source(result, dist, tmp_path):
from pyodide._package_loader import get_dist_source
dist.write(tmp_path)
assert (dist.name, result) == get_dist_source(tmp_path / dist.dist_info_name)
def test_init_loaded_packages(monkeypatch, tmp_path):
from pyodide import _package_loader
class loadedPackagesCls:
pass
loadedPackages = loadedPackagesCls()
monkeypatch.setattr(_package_loader, "SITE_PACKAGES", tmp_path)
monkeypatch.setattr(_package_loader, "loadedPackages", loadedPackages)
dists = [dist for [_, dist] in result_dist_pairs]
for dist in dists:
dist.write(tmp_path)
_package_loader.init_loaded_packages()
for [result, dist] in result_dist_pairs:
assert hasattr(loadedPackages, dist.name)
assert getattr(loadedPackages, dist.name) == result
@pytest.mark.xfail_browsers(node="Some fetch trouble")
@pytest.mark.skip_refcount_check
@pytest.mark.skip_pyproxy_check
@pytest.mark.requires_dynamic_linking
def test_custom_lockfile(selenium_standalone_noload):
selenium = selenium_standalone_noload
lock = selenium.run_js(
"""
let pyodide = await loadPyodide({fullStdLib: false, packages: ["micropip"]});
await pyodide.loadPackage("micropip")
return pyodide.runPythonAsync(`
import micropip
await micropip.install("hypothesis==6.47.3")
micropip.freeze()
`);
"""
)
custom_lockfile = DIST_PATH / "custom_lockfile.json"
custom_lockfile.write_text(lock)
try:
assert (
selenium.run_js(
"""
let pyodide = await loadPyodide({fullStdLib: false, lockFileURL: "custom_lockfile.json", packages: ["hypothesis"] });
return pyodide.runPython("import hypothesis; hypothesis.__version__")
"""
)
== "6.47.3"
)
finally:
custom_lockfile.unlink()
@pytest.mark.xfail_browsers(node="Some fetch trouble")
@pytest.mark.skip_refcount_check
@pytest.mark.skip_pyproxy_check
@pytest.mark.requires_dynamic_linking
def test_custom_lockfile_from_indexedDB(selenium_standalone_noload):
selenium = selenium_standalone_noload
lock = selenium.run_js(
"""
let pyodide = await loadPyodide({fullStdLib: false, packages: ["micropip"]});
await pyodide.loadPackage("micropip")
return pyodide.runPython(`
import micropip
micropip.freeze()
`);
"""
)
selenium.run_js(
f"""
localStorage.setItem("pyodide-lock.json", {json.dumps(lock)});
"""
)
selenium.run_js(
"""
lockfile = localStorage.getItem("pyodide-lock.json");
lockfileURL = URL.createObjectURL(new Blob([lockfile], {type: "application/json"}));
let pyodide2 = await loadPyodide({
fullStdLib: false,
lockFileURL: lockfileURL,
packages: ["micropip"],
});
await pyodide2.runPython(`import micropip`)
"""
)
def test_custom_lockfile_different_dir(
selenium_standalone_noload, tmp_path, httpserver
):
selenium = selenium_standalone_noload
orig_lockfile = DIST_PATH / "pyodide-lock.json"
custom_lockfile_name = "custom-lockfile.json"
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
test_file_path = Path(__file__).parent / "wheels" / test_file_name
lockfile_content = json.loads(orig_lockfile.read_text())
lockfile_content["packages"] = {
"dummy-pkg": {
"name": "dummy_pkg",
"version": "0.1.0",
"unvendor_tests": False,
"sha256": "22fc6330153be71220aea157ab135c53c7d34ff1a6d1d1a4705c95eef1a6f262",
"depends": [],
"file_name": test_file_name,
"install_dir": "site",
"package_type": "package",
"imports": [],
}
}
custom_lockfile_content = json.dumps(lockfile_content)
test_file_data = test_file_path.read_bytes()
# Setup httpserver to serve lockfile and wheel file
httpserver.expect_oneshot_request(f"/{custom_lockfile_name}").respond_with_data(
custom_lockfile_content.encode(),
content_type="application/json",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
httpserver.expect_oneshot_request(f"/{test_file_name}").respond_with_data(
test_file_data,
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
if selenium.browser == "node":
lockfile_url = f"{(tmp_path / custom_lockfile_name).resolve()}"
# For node, we still need to write the file locally
(tmp_path / custom_lockfile_name).write_text(custom_lockfile_content)
shutil.copy(test_file_path, tmp_path / test_file_name)
else:
lockfile_url = httpserver.url_for(f"/{custom_lockfile_name}")
selenium.run_js(
f"""
let pyodide = await loadPyodide({{fullStdLib: false, lockFileURL: {lockfile_url!r} }});
await pyodide.loadPackage("dummy_pkg", {{ checkIntegrity: false }});
return pyodide.runPython("import dummy_pkg")
"""
)
def test_lock_file_contents_error(selenium_standalone_noload):
selenium = selenium_standalone_noload
message = "Error: Can't pass both lockFileContents and lockFileURL"
with pytest.raises(selenium.JavascriptException, match=message):
selenium.run_js(
"""
await loadPyodide({
lockFileContents: "x",
lockFileURL: "y"
});
"""
)
def test_lock_file_contents_relative_file_name(selenium_standalone_noload, tmp_path):
selenium = selenium_standalone_noload
orig_lockfile = DIST_PATH / "pyodide-lock.json"
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
lockfile_content = json.loads(orig_lockfile.read_text())
lockfile_content["packages"] = {
"dummy-pkg": {
"name": "dummy_pkg",
"version": "0.1.0",
"unvendor_tests": False,
"sha256": "22fc6330153be71220aea157ab135c53c7d34ff1a6d1d1a4705c95eef1a6f262",
"depends": [],
"file_name": test_file_name,
"install_dir": "site",
"package_type": "package",
"imports": [],
}
}
message = 'Lock file file_name for package "dummy_pkg" is relative path "dummy_pkg-0.1.0-py3-none-any.whl" but no packageBaseUrl provided'
content = json.dumps(lockfile_content)
selenium.run_js(
"""
const py = await loadPyodide({
lockFileContents: %s,
});
await py.loadPackage("dummy_pkg");
"""
% content
)
assert message in selenium.logs
def test_lockfilecontents_package_base_url(
selenium_standalone_noload, tmp_path, httpserver
):
selenium = selenium_standalone_noload
orig_lockfile = DIST_PATH / "pyodide-lock.json"
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
test_file_path = Path(__file__).parent / "wheels" / test_file_name
lockfile_content = json.loads(orig_lockfile.read_text())
lockfile_content["packages"] = {
"dummy-pkg": {
"name": "dummy_pkg",
"version": "0.1.0",
"unvendor_tests": False,
"sha256": "22fc6330153be71220aea157ab135c53c7d34ff1a6d1d1a4705c95eef1a6f262",
"depends": [],
"file_name": test_file_name,
"install_dir": "site",
"package_type": "package",
"imports": [],
}
}
lockfile_content_json = json.dumps(lockfile_content)
test_file_data = test_file_path.read_bytes()
# Setup httpserver to serve the wheel file
httpserver.expect_oneshot_request(f"/{test_file_name}").respond_with_data(
test_file_data,
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
if selenium.browser == "node":
base_url = str(tmp_path)
# For node, we still need to copy the file locally
shutil.copy(test_file_path, tmp_path / test_file_name)
else:
base_url = f"http://{httpserver.host}:{httpserver.port}"
selenium.run_js(
f"""
let pyodide = await loadPyodide({{fullStdLib: false, lockFileContents: {lockfile_content_json!r}, packageBaseUrl: {base_url!r} }});
await pyodide.loadPackage("dummy_pkg", {{ checkIntegrity: false }});
return pyodide.runPython("import dummy_pkg")
"""
)
def test_lockfilecontents_absolute_file_name(
selenium_standalone_noload, tmp_path, httpserver
):
selenium = selenium_standalone_noload
orig_lockfile = DIST_PATH / "pyodide-lock.json"
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
test_file_path = Path(__file__).parent / "wheels" / test_file_name
dummy_pkg = {
"name": "dummy_pkg",
"version": "0.1.0",
"unvendor_tests": False,
"sha256": "22fc6330153be71220aea157ab135c53c7d34ff1a6d1d1a4705c95eef1a6f262",
"depends": [],
"install_dir": "site",
"package_type": "package",
"imports": [],
}
test_file_data = test_file_path.read_bytes()
# Setup httpserver to serve the wheel file
httpserver.expect_oneshot_request(f"/{test_file_name}").respond_with_data(
test_file_data,
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
if selenium.browser == "node":
base_url = str(tmp_path / test_file_name)
# For node, we still need to copy the file locally
shutil.copy(test_file_path, tmp_path / test_file_name)
else:
base_url = httpserver.url_for(f"/{test_file_name}")
dummy_pkg["file_name"] = base_url
lockfile_content = json.loads(orig_lockfile.read_text())
lockfile_content["packages"] = {"dummy-pkg": dummy_pkg}
lockfile_content_json = json.dumps(lockfile_content)
selenium.run_js(
f"""
let pyodide = await loadPyodide({{fullStdLib: false, lockFileContents: {lockfile_content_json!r} }});
await pyodide.loadPackage("dummy_pkg", {{ checkIntegrity: false }});
return pyodide.runPython("import dummy_pkg")
"""
)
@pytest.mark.parametrize(
"load_name, normalized_name, real_name",
[
(
"test-dummy-unNormalized",
"test-dummy-unnormalized",
"test-dummy-unNormalized",
),
(
"test-dummy_unnormalized",
"test-dummy-unnormalized",
"test-dummy-unNormalized",
),
],
)
@pytest.mark.requires_dynamic_linking # only required for fpcast-test
def test_normalized_name(selenium_standalone, load_name, normalized_name, real_name):
selenium = selenium_standalone
selenium.run_js(
f"""
const msgs = [];
await pyodide.loadPackage(
"{load_name}",
{{
messageCallback: (msg) => msgs.push(msg),
}}
)
const loaded = Object.keys(pyodide.loadedPackages);
assert(() => loaded.includes("{real_name}"));
const loadStartMsgs = msgs.filter((msg) => msg.startsWith("Loading"));
const loadEndMsgs = msgs.filter((msg) => msg.startsWith("Loaded"));
assert(() => loadStartMsgs.some((msg) => msg.includes("{real_name}")));
assert(() => loadEndMsgs.some((msg) => msg.includes("{real_name}")));
"""
)
def test_data_files_support(selenium_standalone, httpserver):
selenium = selenium_standalone
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
test_file_path = Path(__file__).parent / "wheels" / test_file_name
test_file_data = test_file_path.read_bytes()
httpserver.expect_oneshot_request("/" + test_file_name).respond_with_data(
test_file_data,
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
request_url = httpserver.url_for("/" + test_file_name)
selenium.run_js(
f"""
await pyodide.loadPackage("{request_url}");
"""
)
@run_in_pyodide
def _run(selenium):
import sys
from pathlib import Path
import dummy_pkg
assert dummy_pkg
assert (Path(sys.prefix) / "share" / "datafile").is_file(), "datafile not found"
assert (Path(sys.prefix) / "etc" / "datafile2").is_file(), "datafile2 not found"
_run(selenium)
def test_install_api(selenium_standalone, httpserver):
selenium = selenium_standalone
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
test_file_path = Path(__file__).parent / "wheels" / test_file_name
test_file_data = test_file_path.read_bytes()
install_dir = "/random_install_dir"
httpserver.expect_oneshot_request("/" + test_file_name).respond_with_data(
test_file_data,
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
request_url = httpserver.url_for("/" + test_file_name)
selenium.run_js(
f"""
wheelData = await fetch("{request_url}");
wheelDataArr = new Uint8Array(await wheelData.arrayBuffer());
await pyodide._api.install(
wheelDataArr,
"{test_file_name}",
"{install_dir}",
new Map([["INSTALLER", "pytest"]])
);
"""
)
@run_in_pyodide
def _run(selenium, pkg_dir):
import pathlib
d = pathlib.Path(pkg_dir)
assert d.is_dir(), f"Directory {d} not found"
assert (d / "dummy_pkg-0.1.0.dist-info").is_dir(), (
"dist-info directory not found"
)
assert (d / "dummy_pkg-0.1.0.dist-info" / "INSTALLER").is_file(), (
"INSTALLER file not found"
)
assert (d / "dummy_pkg").is_dir(), "package directory not found"
_run(selenium, install_dir)
def test_load_package_stream(selenium_standalone, httpserver):
selenium = selenium_standalone
test_file_name = "dummy_pkg-0.1.0-py3-none-any.whl"
test_file_path = Path(__file__).parent / "wheels" / test_file_name
test_file_data = test_file_path.read_bytes()
httpserver.expect_oneshot_request("/" + test_file_name).respond_with_data(
test_file_data,
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
url = httpserver.url_for("/" + test_file_name)
selenium.run_js(
"""
const logs = [];
const stdout = (msg) => { logs.push(msg); };
pyodide.setStdout({ batched: stdout });
await pyodide.loadPackage("%s");
assert(() => logs.length > 0);
assert(() => logs[0].startsWith("Loading dummy_pkg"));
assert(() => logs[1].startsWith("Loaded dummy_pkg"));
"""
% url
)
@pytest.mark.skip_refcount_check
def test_load_package_stream_and_callback(selenium_standalone, httpserver):
# messageCallback and errorCallback should still take precedence over stdout stream
selenium = selenium_standalone
micropip_path = get_micropip_wheel()
httpserver.expect_oneshot_request("/" + micropip_path.name).respond_with_data(
micropip_path.read_bytes(),
content_type="application/zip",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
url = httpserver.url_for("/" + micropip_path.name)
selenium.run_js(
"""
const logs = [];
const stdout = (msg) => { logs.push(msg); };
pyodide.setStdout({ batched: stdout });
await pyodide.loadPackage(["micropip", "%s"], { messageCallback: (msg) => logs.push(msg), errorCallback: (err) => logs.push(err) });
console.log(logs);
assert(() => logs.length > 0);
assert(() => logs[0].startsWith("Loading same package micropip from"));
assert(() => logs[1].startsWith("Loading micropip"));
assert(() => logs[2].startsWith("Loaded micropip"));
"""
% url
)
@pytest.mark.skip_refcount_check
def test_micropip_list_pyodide_package(selenium_standalone):
selenium = selenium_standalone
selenium.load_package("micropip")
selenium.run_js(
"""
await pyodide.runPythonAsync(`
import micropip
await micropip.install(
"test-dummy"
);
`);
"""
)
selenium.run_js(
"""
await pyodide.runPythonAsync(`
import micropip
pkgs = micropip.list()
assert "test-dummy" in pkgs
assert pkgs["test-dummy"].source.lower() == "pyodide"
`);
"""
)
@pytest.mark.skip_refcount_check
def test_micropip_list_loaded_from_js(selenium_standalone):
selenium = selenium_standalone
selenium.load_package("micropip")
selenium.run_js(
"""
await pyodide.loadPackage("test-dummy");
await pyodide.runPythonAsync(`
import micropip
pkgs = micropip.list()
assert "test-dummy" in pkgs
assert pkgs["test-dummy"].source.lower() == "pyodide"
`);
"""
)
@pytest.mark.skip_refcount_check
def test_micropip_install_non_normalized_package(selenium_standalone):
selenium = selenium_standalone
selenium.load_package("micropip")
selenium.run_async(
"""
import micropip
await micropip.install("test-dummy-unNormalized")
import dummy_unnormalized
"""
)
@only_node
def test_package_cache_dir(selenium_standalone_noload, tmp_path):
selenium = selenium_standalone_noload
package_cache_dir = tmp_path / "package_cache"
package_cache_dir.mkdir()
# copy one package in the distribution to the package cache dir
dummy_wheel = DIST_PATH / "test_dummy-1.0.0-py2.py3-none-any.whl"
shutil.copy(
dummy_wheel,
package_cache_dir / dummy_wheel.name,
)
selenium.run_js(
f"""
pyodide = await loadPyodide({{"packageCacheDir": "{package_cache_dir}"}});
"""
)
selenium.run_js(
"""
await pyodide.loadPackage("test-dummy");
return pyodide.runPython("import dummy");
"""
)
assert "Loaded test-dummy" in selenium.logs
# should not fallback to the cdn as the wheel is already in the package cache dir
assert "caching the wheel" not in selenium.logs
@only_node
def test_micropip_freeze_with_package_cache_dir(selenium_standalone_noload, tmp_path):
selenium = selenium_standalone_noload
package_cache_dir = tmp_path / "package_cache"
package_cache_dir.mkdir()
micropip_path = get_micropip_wheel()
shutil.copy(
micropip_path,
package_cache_dir / micropip_path.name,
)
selenium.run_js(
f"""
pyodide = await loadPyodide({{"packageCacheDir": "{package_cache_dir}"}});
"""
)
freezed_lockfile = selenium.run_js(
"""
await pyodide.loadPackage("micropip");
return pyodide.runPython("import micropip; micropip.freeze()");
"""
)
assert "Loaded micropip" in selenium.logs
# should not fallback to the cdn as the wheel is already in the package cache dir
assert "caching the wheel" not in selenium.logs
lockfile_content = json.loads(freezed_lockfile)
assert lockfile_content["packages"]["micropip"]["file_name"] == str(
package_cache_dir / micropip_path.name
)
@only_node
def test_package_manager_urls_node(selenium_standalone_noload, tmp_path):
selenium = selenium_standalone_noload
def with_slash(path: str | Path) -> str:
return str(path).rstrip("/") + "/"
version = selenium.run_js(
"""
pyodide = await loadPyodide();
return pyodide._api.version;
"""
)
jsdelivr_url = f"https://cdn.jsdelivr.net/pyodide/v{version}/full/"
# no option
selenium.run_js(
f"""
pyodide = await loadPyodide();
assert(() => pyodide._api.packageManager.cdnURL === `{jsdelivr_url}`);
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(DIST_PATH)}');
"""
)
# with packageCacheDir
selenium.run_js(
f"""
pyodide = await loadPyodide({{"packageCacheDir": "{tmp_path}"}});
assert(() => pyodide._api.packageManager.cdnURL === `{jsdelivr_url}`);
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(tmp_path)}');
"""
)
# with lockfileURL
lockfile_url = with_slash(tmp_path) + "pyodide-lock.json"
shutil.copy(DIST_PATH / "pyodide-lock.json", lockfile_url)
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileURL": "{lockfile_url}"}});
assert(() => pyodide._api.packageManager.cdnURL === `{jsdelivr_url}`);
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(tmp_path)}');
"""
)
# with lockfileContents
lockfile_contents = (DIST_PATH / "pyodide-lock.json").read_text()
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileContents": '{lockfile_contents}'}});
assert(() => pyodide._api.packageManager.cdnURL === `{jsdelivr_url}`);
assert(() => pyodide._api.packageManager.installBaseUrl === undefined);
"""
)
# with lockfileContents and packageBaseUrl
# cdn url should be replaced to the packageBaseUrl if packageBaseUrl is provided
lockfile_contents = (DIST_PATH / "pyodide-lock.json").read_text()
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileContents": '{lockfile_contents}', "packageBaseUrl": "{tmp_path}"}});
assert(() => pyodide._api.packageManager.cdnURL === `{with_slash(tmp_path)}`);
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(tmp_path)}');
"""
)
# with lockfileURL and packageCacheDir
package_cache_dir = tmp_path / "package_cache"
package_cache_dir.mkdir()
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileURL": "{lockfile_url}", "packageCacheDir": "{package_cache_dir}"}});
assert(() => pyodide._api.packageManager.cdnURL === `{jsdelivr_url}`);
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(package_cache_dir)}');
"""
)
@pytest.mark.xfail_browsers(node="no node")
def test_package_manager_urls_browsers(selenium_standalone_noload, httpserver):
selenium = selenium_standalone_noload
base_url = selenium.base_url
def with_slash(path: str | Path) -> str:
return str(path).rstrip("/") + "/"
# no option
selenium.run_js(
f"""
pyodide = await loadPyodide();
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(base_url)}');
"""
)
# with lockfileURL
httpserver.expect_oneshot_request(
"/pyodide-lock.json",
).respond_with_data(
(DIST_PATH / "pyodide-lock.json").read_text(),
content_type="application/json",
headers={"Access-Control-Allow-Origin": "*"},
status=200,
)
lockfile_url = httpserver.url_for("/pyodide-lock.json")
lockfile_base = lockfile_url.rsplit("/", 1)[0]
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileURL": "{lockfile_url}"}});
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(lockfile_base)}');
"""
)
# with lockfileContents
lockfile_contents = (DIST_PATH / "pyodide-lock.json").read_text()
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileContents": '{lockfile_contents}'}});
assert(() => pyodide._api.packageManager.installBaseUrl === undefined);
"""
)
# with lockfileContents and packageBaseUrl
# cdn url should be replaced to the packageBaseUrl if packageBaseUrl is provided
lockfile_contents = (DIST_PATH / "pyodide-lock.json").read_text()
base_url = "http://example.com/pyodide"
selenium.run_js(
f"""
pyodide = await loadPyodide({{"lockFileContents": '{lockfile_contents}', "packageBaseUrl": "{base_url}"}});
assert(() => pyodide._api.packageManager.installBaseUrl === '{with_slash(base_url)}');
"""
)
| DummyDistribution |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/expectation_diagnostics.py | {
"start": 1261,
"end": 19023
} | class ____(SerializableDictDot):
"""An immutable object created by Expectation.run_diagnostics.
It contains information introspected from the Expectation class, in formats that can be renderered at the command line, and by the Gallery.
It has three external-facing use cases:
1. `ExpectationDiagnostics.to_dict()` creates the JSON object that populates the Gallery.
2. `ExpectationDiagnostics.generate_checklist()` creates CLI-type string output to assist with development.
""" # noqa: E501 # FIXME CoP
# This object is taken directly from the Expectation class, without modification
examples: List[ExpectationTestDataCases]
gallery_examples: List[ExpectationTestDataCases]
# These objects are derived from the Expectation class
# They're a combination of direct introspection of existing properties,
# and instantiating the Expectation with test data and actually executing
# methods.
# For example, we can verify the existence of certain Renderers through
# introspection alone, but in order to see what they return, we need to
# instantiate the Expectation and actually run the method.
library_metadata: Union[AugmentedLibraryMetadata, ExpectationDescriptionDiagnostics]
description: ExpectationDescriptionDiagnostics
execution_engines: ExpectationExecutionEngineDiagnostics
renderers: List[ExpectationRendererDiagnostics]
metrics: List[ExpectationMetricDiagnostics]
tests: List[ExpectationTestDiagnostics]
backend_test_result_counts: List[ExpectationBackendTestResultCounts]
errors: List[ExpectationErrorDiagnostics]
maturity_checklist: ExpectationDiagnosticMaturityMessages
coverage_score: float
@override
def to_json_dict(self) -> dict:
result = convert_to_json_serializable(data=asdict(self))
result["execution_engines_list"] = sorted(
[engine for engine, _bool in result["execution_engines"].items() if _bool is True]
)
return result
def generate_checklist(self) -> str:
"""Generates the checklist in CLI-appropriate string format."""
str_ = self._convert_checks_into_output_message(
self.description["camel_name"],
self.library_metadata.maturity, # type: ignore[union-attr] # could be ExpectationDescriptionDiagnostics
self.maturity_checklist,
)
return str_
@staticmethod
def _check_library_metadata(
library_metadata: Union[AugmentedLibraryMetadata, ExpectationDescriptionDiagnostics],
) -> ExpectationDiagnosticCheckMessage:
"""Check whether the Expectation has a library_metadata object"""
sub_messages: list[ExpectationDiagnosticCheckMessageDict] = []
for problem in library_metadata.problems: # type: ignore[union-attr] # could be ExpectationDescriptionDiagnostics
sub_messages.append(
{
"message": problem,
"passed": False,
}
)
return ExpectationDiagnosticCheckMessage(
message="Has a valid library_metadata object",
passed=library_metadata.library_metadata_passed_checks, # type: ignore[union-attr] # could be ExpectationDescriptionDiagnostics
sub_messages=sub_messages,
)
@staticmethod
def _check_docstring(
description: ExpectationDescriptionDiagnostics,
) -> ExpectationDiagnosticCheckMessage:
"""Check whether the Expectation has an informative docstring"""
message = 'Has a docstring, including a one-line short description that begins with "Expect" and ends with a period' # noqa: E501 # FIXME CoP
if "short_description" in description:
short_description = description["short_description"]
else:
short_description = None
if short_description in {"", "\n", "TODO: Add a docstring here", None}:
return ExpectationDiagnosticCheckMessage(
message=message,
passed=False,
)
elif short_description.startswith("Expect ") and short_description.endswith("."):
return ExpectationDiagnosticCheckMessage(
message=message,
sub_messages=[
{
"message": f'"{short_description}"',
"passed": True,
}
],
passed=True,
)
else:
return ExpectationDiagnosticCheckMessage(
message=message,
sub_messages=[
{
"message": f'"{short_description}"',
"passed": False,
}
],
passed=False,
)
@classmethod
def _check_example_cases(
cls,
examples: List[ExpectationTestDataCases],
tests: List[ExpectationTestDiagnostics],
) -> ExpectationDiagnosticCheckMessage:
"""Check whether this Expectation has at least one positive and negative example case (and all test cases return the expected output)""" # noqa: E501 # FIXME CoP
message = "Has at least one positive and negative example case, and all test cases pass"
(
positive_case_count,
negative_case_count,
) = cls._count_positive_and_negative_example_cases(examples)
unexpected_case_count = cls._count_unexpected_test_cases(tests)
passed = (
(positive_case_count > 0) and (negative_case_count > 0) and (unexpected_case_count == 0)
)
print(positive_case_count, negative_case_count, unexpected_case_count, passed)
return ExpectationDiagnosticCheckMessage(
message=message,
passed=passed,
)
@staticmethod
def _check_core_logic_for_at_least_one_execution_engine(
backend_test_result_counts: List[ExpectationBackendTestResultCounts],
) -> ExpectationDiagnosticCheckMessage:
"""Check whether core logic for this Expectation exists and passes tests on at least one Execution Engine""" # noqa: E501 # FIXME CoP
sub_messages: List[ExpectationDiagnosticCheckMessageDict] = []
passed = False
message = "Has core logic and passes tests on at least one Execution Engine"
all_passing = [
backend_test_result
for backend_test_result in backend_test_result_counts
if backend_test_result.failing_names is None and backend_test_result.num_passed >= 1
]
if len(all_passing) > 0:
passed = True
for result in all_passing:
sub_messages.append(
{
"message": f"All {result.num_passed} tests for {result.backend} are passing", # noqa: E501 # FIXME CoP
"passed": True,
}
)
if not backend_test_result_counts:
sub_messages.append(
{
"message": "There are no test results",
"passed": False,
}
)
return ExpectationDiagnosticCheckMessage(
message=message,
passed=passed,
sub_messages=sub_messages,
)
@staticmethod
def _get_backends_from_test_results(
test_results: List[ExpectationTestDiagnostics],
) -> List[ExpectationBackendTestResultCounts]:
"""Has each tested backend and the number of passing/failing tests"""
backend_results = defaultdict(list)
backend_failing_names = defaultdict(list)
results: List[ExpectationBackendTestResultCounts] = []
for test_result in test_results:
backend_results[test_result.backend].append(test_result.test_passed)
if test_result.test_passed is False:
backend_failing_names[test_result.backend].append(test_result.test_title)
for backend in backend_results:
result_counts = ExpectationBackendTestResultCounts(
backend=backend,
num_passed=backend_results[backend].count(True),
num_failed=backend_results[backend].count(False),
failing_names=backend_failing_names.get(backend),
)
results.append(result_counts)
return results
@staticmethod
def _check_core_logic_for_all_applicable_execution_engines(
backend_test_result_counts: List[ExpectationBackendTestResultCounts],
) -> ExpectationDiagnosticCheckMessage:
"""Check whether core logic for this Expectation exists and passes tests on all applicable Execution Engines""" # noqa: E501 # FIXME CoP
sub_messages: list[ExpectationDiagnosticCheckMessageDict] = []
passed = False
message = (
"Has core logic that passes tests for all applicable Execution Engines and SQL dialects"
)
all_passing = [
backend_test_result
for backend_test_result in backend_test_result_counts
if backend_test_result.failing_names is None and backend_test_result.num_passed >= 1
]
some_failing = [
backend_test_result
for backend_test_result in backend_test_result_counts
if backend_test_result.failing_names is not None
]
if len(all_passing) > 0 and len(some_failing) == 0:
passed = True
for result in all_passing:
sub_messages.append(
{
"message": f"All {result.num_passed} tests for {result.backend} are passing",
"passed": True,
}
)
for result in some_failing:
sub_messages.append(
{
"message": f"Only {result.num_passed} / {result.num_passed + result.num_failed} tests for {result.backend} are passing", # noqa: E501 # FIXME CoP
"passed": False,
}
)
sub_messages.append(
{
"message": f" - Failing: {', '.join(result.failing_names)}", # type: ignore[arg-type] # FIXME CoP
"passed": False,
}
)
if not backend_test_result_counts:
sub_messages.append(
{
"message": "There are no test results",
"passed": False,
}
)
return ExpectationDiagnosticCheckMessage(
message=message,
passed=passed,
sub_messages=sub_messages,
)
@staticmethod
def _count_positive_and_negative_example_cases(
examples: List[ExpectationTestDataCases],
) -> Tuple[int, int]:
"""Scans examples and returns a 2-ple with the numbers of cases with success == True and success == False""" # noqa: E501 # FIXME CoP
positive_cases: int = 0
negative_cases: int = 0
for test_data_cases in examples:
for test in test_data_cases["tests"]:
success = test["output"].get("success")
if success is True:
positive_cases += 1
elif success is False:
negative_cases += 1
return positive_cases, negative_cases
@staticmethod
def _count_unexpected_test_cases(
test_diagnostics: Sequence[ExpectationTestDiagnostics],
) -> int:
"""Scans test_diagnostics and returns the number of cases that did not pass."""
unexpected_cases: int = 0
for test in test_diagnostics:
passed = test["test_passed"] is True
if not passed:
unexpected_cases += 1
return unexpected_cases
@staticmethod
def _convert_checks_into_output_message(
class_name: str,
maturity_level: str,
maturity_messages: ExpectationDiagnosticMaturityMessages,
) -> str:
"""Converts a list of checks into an output string (potentially nested), with ✔ to indicate checks that passed.""" # noqa: E501 # FIXME CoP
output_message = f"Completeness checklist for {class_name} ({maturity_level}):"
checks = (
maturity_messages.experimental + maturity_messages.beta + maturity_messages.production
)
for check in checks:
if check["passed"]:
output_message += f"\n ✔ {check['message']}"
else:
output_message += f"\n {check['message']}"
if "sub_messages" in check:
for sub_message in check["sub_messages"]:
if sub_message["passed"]:
output_message += f"\n ✔ {sub_message['message']}"
else:
output_message += f"\n {sub_message['message']}"
output_message += "\n"
return output_message
@staticmethod
def _check_input_validation(
expectation_instance,
examples: List[ExpectationTestDataCases],
) -> ExpectationDiagnosticCheckMessage:
"""Check that the validate_configuration exists and doesn't raise a config error"""
passed = False
sub_messages: list[ExpectationDiagnosticCheckMessageDict] = []
rx = re.compile(r"^[\s]+assert", re.MULTILINE)
try:
first_test = examples[0]["tests"][0]
except IndexError:
sub_messages.append(
{
"message": "No example found to get kwargs for ExpectationConfiguration",
"passed": passed,
}
)
else:
if "validate_configuration" not in expectation_instance.__class__.__dict__:
sub_messages.append(
{
"message": "No validate_configuration method defined on subclass",
"passed": passed,
}
)
else:
expectation_config = ExpectationConfiguration(
type=expectation_instance.expectation_type,
kwargs=first_test.input,
)
validate_configuration_source = inspect.getsource(
expectation_instance.__class__.validate_configuration
)
if rx.search(validate_configuration_source):
sub_messages.append(
{
"message": "Custom 'assert' statements in validate_configuration",
"passed": True,
}
)
else:
sub_messages.append(
{
"message": "Using default validate_configuration from template",
"passed": False,
}
)
try:
expectation_instance.validate_configuration(expectation_config)
except InvalidExpectationConfigurationError:
pass
else:
passed = True
return ExpectationDiagnosticCheckMessage(
message="Has basic input validation and type checking",
passed=passed,
sub_messages=sub_messages,
)
@staticmethod
def _check_renderer_methods(
expectation_instance,
) -> ExpectationDiagnosticCheckMessage:
"""Check if all statment renderers are defined"""
passed = False
# For now, don't include the "question", "descriptive", or "answer"
# types since they are so sparsely implemented
# all_renderer_types = {"diagnostic", "prescriptive", "question", "descriptive", "answer"}
all_renderer_types = {"diagnostic", "prescriptive"}
renderer_names = [
name
for name in dir(expectation_instance)
if name.endswith("renderer") and name.startswith("_")
]
renderer_types = {name.split("_")[1] for name in renderer_names}
if all_renderer_types & renderer_types == all_renderer_types:
passed = True
return ExpectationDiagnosticCheckMessage(
# message="Has all four statement Renderers: question, descriptive, prescriptive, diagnostic", # noqa: E501 # FIXME CoP
message="Has both statement Renderers: prescriptive and diagnostic",
passed=passed,
)
@staticmethod
def _check_full_test_suite(
library_metadata: Union[AugmentedLibraryMetadata, ExpectationDescriptionDiagnostics],
) -> ExpectationDiagnosticCheckMessage:
"""Check library_metadata to see if Expectation has a full test suite"""
return ExpectationDiagnosticCheckMessage(
message="Has a full suite of tests, as determined by a code owner",
passed=library_metadata.has_full_test_suite, # type: ignore[union-attr] # could be ExpectationDescriptionDiagnostics
)
@staticmethod
def _check_manual_code_review(
library_metadata: Union[AugmentedLibraryMetadata, ExpectationDescriptionDiagnostics],
) -> ExpectationDiagnosticCheckMessage:
"""Check library_metadata to see if a manual code review has been performed"""
return ExpectationDiagnosticCheckMessage(
message="Has passed a manual review by a code owner for code standards and style guides", # noqa: E501 # FIXME CoP
passed=library_metadata.manually_reviewed_code, # type: ignore[union-attr] # could be ExpectationDescriptionDiagnostics
)
| ExpectationDiagnostics |
python | RaRe-Technologies__gensim | gensim/models/atmodel.py | {
"start": 5177,
"end": 54236
} | class ____(LdaModel):
"""The constructor estimates the author-topic model parameters based on a training corpus."""
def __init__(self, corpus=None, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
Parameters
----------
corpus : iterable of list of (int, float), optional
Corpus in BoW format
num_topics : int, optional
Number of topics to be extracted from the training corpus.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
A mapping from word ids (integers) to words (strings).
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
chunksize : int, optional
Controls the size of the mini-batches.
passes : int, optional
Number of times the model makes a pass over the entire training data.
iterations : int, optional
Maximum number of times the model loops over each document.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
alpha : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on document-topic distribution, this can be:
* scalar for a symmetric prior over document-topic distribution,
* 1D array of length equal to num_topics to denote an asymmetric user defined prior for each topic.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'asymmetric': Uses a fixed normalized asymmetric prior of `1.0 / (topic_index + sqrt(num_topics))`,
* 'auto': Learns an asymmetric prior from the corpus (not available if `distributed==True`).
eta : {float, numpy.ndarray of float, list of float, str}, optional
A-priori belief on topic-word distribution, this can be:
* scalar for a symmetric prior over topic-word distribution,
* 1D array of length equal to num_words to denote an asymmetric user defined prior for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'symmetric': (default) Uses a fixed symmetric prior of `1.0 / num_topics`,
* 'auto': Learns an asymmetric prior from the corpus.
update_every : int, optional
Make updates in topic probability for latest mini-batch.
eval_every : int, optional
Calculate and estimate log perplexity for latest mini-batch.
gamma_threshold : float, optional
Threshold value of gamma(topic difference between consecutive two topics)
until which the iterations continue.
serialized : bool, optional
Indicates whether the input corpora to the model are simple lists
or saved to the hard-drive.
serialization_path : str, optional
Must be set to a filepath, if `serialized = True` is used.
minimum_probability : float, optional
Controls filtering the topics returned for a document (bow).
random_state : {int, numpy.random.RandomState}, optional
Set the state of the random number generator inside the author-topic model.
"""
# NOTE: this doesn't call constructor of a base class, but duplicates most of this code
# so we have to set dtype to float64 default here
self.dtype = np.float64
# NOTE: as distributed version of this model is not implemented, "distributed" is set to false. Some of the
# infrastructure to implement a distributed author-topic model is already in place,
# such as the AuthorTopicState.
distributed = False
self.dispatcher = None
self.numworkers = 1
self.id2word = id2word
if corpus is None and self.id2word is None:
raise ValueError(
"at least one of corpus/id2word must be specified, to establish input space dimensionality"
)
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif len(self.id2word) > 0:
self.num_terms = 1 + max(self.id2word.keys())
else:
self.num_terms = 0
if self.num_terms == 0:
raise ValueError("cannot compute the author-topic model over an empty collection (no terms)")
logger.info('Vocabulary consists of %d words.', self.num_terms)
self.author2doc = {}
self.doc2author = {}
self.distributed = distributed
self.num_topics = num_topics
self.num_authors = 0
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.total_docs = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.author2id = {}
self.id2author = {}
self.serialized = serialized
if serialized and not serialization_path:
raise ValueError(
"If serialized corpora are used, a the path to a folder "
"where the corpus should be saved must be provided (serialized_path)."
)
if serialized and serialization_path:
assert not isfile(serialization_path), \
"A file already exists at the serialization_path path; " \
"choose a different serialization_path, or delete the file."
self.serialization_path = serialization_path
# Initialize an empty self.corpus.
self.init_empty_corpus()
self.alpha, self.optimize_alpha = self.init_dir_prior(alpha, 'alpha')
assert self.alpha.shape == (self.num_topics,), \
"Invalid alpha shape. Got shape %s, but expected (%d, )" % (str(self.alpha.shape), self.num_topics)
self.eta, self.optimize_eta = self.init_dir_prior(eta, 'eta')
assert (self.eta.shape == (self.num_terms,) or self.eta.shape == (self.num_topics, self.num_terms)), (
"Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)" %
(str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms)
)
self.random_state = utils.get_random_state(random_state)
# VB constants
self.iterations = iterations
self.gamma_threshold = gamma_threshold
# Initialize the variational distributions q(beta|lambda) and q(theta|gamma)
self.state = AuthorTopicState(self.eta, (self.num_topics, self.num_terms), (self.num_authors, self.num_topics))
self.state.sstats = self.random_state.gamma(100., 1. / 100., (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
# if a training corpus was provided, start estimating the model right away
if corpus is not None and (author2doc is not None or doc2author is not None):
use_numpy = self.dispatcher is not None
self.update(corpus, author2doc, doc2author, chunks_as_numpy=use_numpy)
def __str__(self):
"""Get a string representation of object.
Returns
-------
str
String representation of current instance.
"""
return "%s<num_terms=%s, num_topics=%s, num_authors=%s, decay=%s, chunksize=%s>" % \
(self.__class__.__name__, self.num_terms, self.num_topics, self.num_authors, self.decay, self.chunksize)
def init_empty_corpus(self):
"""Initialize an empty corpus.
If the corpora are to be treated as lists, simply initialize an empty list.
If serialization is used, initialize an empty corpus using :class:`~gensim.corpora.mmcorpus.MmCorpus`.
"""
if self.serialized:
# Initialize the corpus as a serialized empty list.
# This corpus will be extended in self.update.
MmCorpus.serialize(self.serialization_path, []) # Serialize empty corpus.
self.corpus = MmCorpus(self.serialization_path) # Store serialized corpus object in self.corpus.
else:
# All input corpora are assumed to just be lists.
self.corpus = []
def extend_corpus(self, corpus):
"""Add new documents from `corpus` to `self.corpus`.
If serialization is used, then the entire corpus (`self.corpus`) is re-serialized and the new documents
are added in the process. If serialization is not used, the corpus, as a list of documents, is simply extended.
Parameters
----------
corpus : iterable of list of (int, float)
Corpus in BoW format
Raises
------
AssertionError
If serialized == False and corpus isn't list.
"""
if self.serialized:
# Re-serialize the entire corpus while appending the new documents.
if isinstance(corpus, MmCorpus):
# Check that we are not attempting to overwrite the serialized corpus.
assert self.corpus.input != corpus.input, \
'Input corpus cannot have the same file path as the model corpus (serialization_path).'
corpus_chain = chain(self.corpus, corpus) # A generator with the old and new documents.
# Make a temporary copy of the file where the corpus is serialized.
copyfile(self.serialization_path, self.serialization_path + '.tmp')
self.corpus.input = self.serialization_path + '.tmp' # Point the old corpus at this temporary file.
# Re-serialize the old corpus, and extend it with the new corpus.
MmCorpus.serialize(self.serialization_path, corpus_chain)
self.corpus = MmCorpus(self.serialization_path) # Store the new serialized corpus object in self.corpus.
remove(self.serialization_path + '.tmp') # Remove the temporary file again.
else:
# self.corpus and corpus are just lists, just extend the list.
# First check that corpus is actually a list.
assert isinstance(corpus, list), "If serialized == False, all input corpora must be lists."
self.corpus.extend(corpus)
def compute_phinorm(self, expElogthetad, expElogbetad):
r"""Efficiently computes the normalizing factor in phi.
Parameters
----------
expElogthetad: numpy.ndarray
Value of variational distribution :math:`q(\theta|\gamma)`.
expElogbetad: numpy.ndarray
Value of variational distribution :math:`q(\beta|\lambda)`.
Returns
-------
float
Value of normalizing factor.
"""
expElogtheta_sum = expElogthetad.sum(axis=0)
phinorm = expElogtheta_sum.dot(expElogbetad) + 1e-100
return phinorm
def inference(self, chunk, author2doc, doc2author, rhot, collect_sstats=False, chunk_doc_idx=None):
"""Give a `chunk` of sparse document vectors, update gamma for each author corresponding to the `chuck`.
Warnings
--------
The whole input chunk of document is assumed to fit in RAM, chunking of a large corpus must be done earlier
in the pipeline.
Avoids computing the `phi` variational parameter directly using the
optimization presented in `Lee, Seung: "Algorithms for non-negative matrix factorization", NIPS 2001
<https://papers.nips.cc/paper/1861-algorithms-for-non-negative-matrix-factorization.pdf>`_.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
rhot : float
Value of rho for conducting inference on documents.
collect_sstats : boolean, optional
If True - collect sufficient statistics needed to update the model's topic-word distributions, and return
`(gamma_chunk, sstats)`. Otherwise, return `(gamma_chunk, None)`. `gamma_chunk` is of shape
`len(chunk_authors) x self.num_topics`,where `chunk_authors` is the number of authors in the documents in
the current chunk.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
Returns
-------
(numpy.ndarray, numpy.ndarray)
gamma_chunk and sstats (if `collect_sstats == True`, otherwise - None)
"""
try:
len(chunk)
except TypeError:
# convert iterators/generators to plain list, so we have len() etc.
chunk = list(chunk)
if len(chunk) > 1:
logger.debug("performing inference on a chunk of %i documents", len(chunk))
# Initialize the variational distribution q(theta|gamma) for the chunk
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
# Stack all the computed gammas into this output array.
gamma_chunk = np.zeros((0, self.num_topics))
# Now, for each document d update gamma and phi w.r.t. all authors in those documents.
for d, doc in enumerate(chunk):
if chunk_doc_idx is not None:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get the IDs and counts of all the words in the current document.
# TODO: this is duplication of code in LdaModel. Refactor.
if doc and not isinstance(doc[0][0], (int, np.integer,)):
# make sure the term IDs are ints, otherwise np will get upset
ids = [int(idx) for idx, _ in doc]
else:
ids = [idx for idx, _ in doc]
ids = np.array(ids, dtype=int)
cts = np.fromiter((cnt for _, cnt in doc), dtype=int, count=len(doc))
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = np.fromiter((self.author2id[a] for a in self.doc2author[doc_no]), dtype=int)
gammad = self.state.gamma[authors_d, :] # gamma of document d before update.
tilde_gamma = gammad.copy() # gamma that will be updated.
# Compute the expectation of the log of the Dirichlet parameters theta and beta.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
expElogbetad = self.expElogbeta[:, ids]
# Compute the normalizing constant of phi for the current document.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Iterate between gamma and phi until convergence
for _ in range(self.iterations):
lastgamma = tilde_gamma.copy()
# Update gamma.
# phi is computed implicitly below,
dot = np.dot(cts / phinorm, expElogbetad.T)
for ai, a in enumerate(authors_d):
tilde_gamma[ai, :] = (
self.alpha
+ len(self.author2doc[self.id2author[a]]) * expElogthetad[ai, :] * dot
)
# Update gamma.
# Interpolation between document d's "local" gamma (tilde_gamma),
# and "global" gamma (gammad).
tilde_gamma = (1 - rhot) * gammad + rhot * tilde_gamma
# Update Elogtheta and Elogbeta, since gamma and lambda have been updated.
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
# Update the normalizing constant in phi.
phinorm = self.compute_phinorm(expElogthetad, expElogbetad)
# Check for convergence.
# Criterion is mean change in "local" gamma.
meanchange_gamma = mean_absolute_difference(tilde_gamma.ravel(), lastgamma.ravel())
gamma_condition = meanchange_gamma < self.gamma_threshold
if gamma_condition:
converged += 1
break
# End of iterations loop.
# Store the updated gammas in the model state.
self.state.gamma[authors_d, :] = tilde_gamma
# Stack the new gammas into the output array.
gamma_chunk = np.vstack([gamma_chunk, tilde_gamma])
if collect_sstats:
# Contribution of document d to the expected sufficient
# statistics for the M step.
expElogtheta_sum_a = expElogthetad.sum(axis=0)
sstats[:, ids] += np.outer(expElogtheta_sum_a.T, cts / phinorm)
if len(chunk) > 1:
logger.debug(
"%i/%i documents converged within %i iterations",
converged, len(chunk), self.iterations
)
if collect_sstats:
# This step finishes computing the sufficient statistics for the
# M step, so that
# sstats[k, w] = \sum_d n_{dw} * \sum_a phi_{dwak}
# = \sum_d n_{dw} * exp{Elogtheta_{ak} + Elogbeta_{kw}} / phinorm_{dw}.
sstats *= self.expElogbeta
return gamma_chunk, sstats
def do_estep(self, chunk, author2doc, doc2author, rhot, state=None, chunk_doc_idx=None):
"""Performs inference (E-step) on a chunk of documents, and accumulate the collected sufficient statistics.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
rhot : float
Value of rho for conducting inference on documents.
state : int, optional
Initializes the state for a new E iteration.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
Returns
-------
float
Value of gamma for training of model.
"""
# TODO: this method is somewhat similar to the one in LdaModel. Refactor if possible.
if state is None:
state = self.state
gamma, sstats = self.inference(
chunk, author2doc, doc2author, rhot,
collect_sstats=True, chunk_doc_idx=chunk_doc_idx
)
state.sstats += sstats
state.numdocs += len(chunk)
return gamma
def log_perplexity(self, chunk, chunk_doc_idx=None, total_docs=None):
"""Calculate per-word likelihood bound, using the `chunk` of documents as evaluation corpus.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
total_docs : int, optional
Initializes the value for total number of documents.
Returns
-------
float
Value of per-word likelihood bound.
"""
# TODO: This method is very similar to the one in LdaModel. Refactor.
if total_docs is None:
total_docs = len(chunk)
corpus_words = sum(cnt for document in chunk for _, cnt in document)
subsample_ratio = 1.0 * total_docs / len(chunk)
perwordbound = self.bound(chunk, chunk_doc_idx, subsample_ratio=subsample_ratio) / \
(subsample_ratio * corpus_words)
logger.info(
"%.3f per-word bound, %.1f perplexity estimate based on a corpus of %i documents with %i words",
perwordbound, np.exp2(-perwordbound), len(chunk), corpus_words
)
return perwordbound
def update(self, corpus=None, author2doc=None, doc2author=None, chunksize=None, decay=None, offset=None,
passes=None, update_every=None, eval_every=None, iterations=None,
gamma_threshold=None, chunks_as_numpy=False):
"""Train the model with new documents, by EM-iterating over `corpus` until the topics converge (or until the
maximum number of allowed iterations is reached).
Notes
-----
This update also supports updating an already trained model (`self`) with new documents from `corpus`;
the two models are then merged in proportion to the number of old vs. new documents.
This feature is still experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand, this equals the
online update of `'Online Learning for LDA' by Hoffman et al.`_
and is guaranteed to converge for any `decay` in (0.5, 1]. Additionally, for smaller corpus sizes, an
increasing `offset` may be beneficial (see Table 1 in the same paper).
If update is called with authors that already exist in the model, it will resume training on not only new
documents for that author, but also the previously seen documents. This is necessary for those authors' topic
distributions to converge.
Every time `update(corpus, author2doc)` is called, the new documents are to appended to all the previously seen
documents, and author2doc is combined with the previously seen authors.
To resume training on all the data seen by the model, simply call
:meth:`~gensim.models.atmodel.AuthorTopicModel.update`.
It is not possible to add new authors to existing documents, as all documents in `corpus` are assumed to be
new documents.
Parameters
----------
corpus : iterable of list of (int, float)
The corpus in BoW format.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of document IDs that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
chunksize : int, optional
Controls the size of the mini-batches.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to :math:`\\kappa` from
`'Online Learning for LDA' by Hoffman et al.`_
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to :math:`\\tau_0` from `'Online Learning for LDA' by Hoffman et al.`_
passes : int, optional
Number of times the model makes a pass over the entire training data.
update_every : int, optional
Make updates in topic probability for latest mini-batch.
eval_every : int, optional
Calculate and estimate log perplexity for latest mini-batch.
iterations : int, optional
Maximum number of times the model loops over each document
gamma_threshold : float, optional
Threshold value of gamma(topic difference between consecutive two topics)
until which the iterations continue.
chunks_as_numpy : bool, optional
Whether each chunk passed to :meth:`~gensim.models.atmodel.AuthorTopicModel.inference` should be a numpy
array of not. Numpy can in some settings turn the term IDs into floats, these will be converted back into
integers in inference, which incurs a performance hit. For distributed computing (not supported now)
it may be desirable to keep the chunks as numpy arrays.
"""
# use parameters given in constructor, unless user explicitly overrode them
if decay is None:
decay = self.decay
if offset is None:
offset = self.offset
if passes is None:
passes = self.passes
if update_every is None:
update_every = self.update_every
if eval_every is None:
eval_every = self.eval_every
if iterations is None:
iterations = self.iterations
if gamma_threshold is None:
gamma_threshold = self.gamma_threshold
# TODO: if deepcopy is not used here, something goes wrong. When unit tests are run (specifically "testPasses"),
# the process simply gets killed.
author2doc = deepcopy(author2doc)
doc2author = deepcopy(doc2author)
# TODO: it is not possible to add new authors to an existing document (all input documents are treated
# as completely new documents). Perhaps this functionality could be implemented.
# If it's absolutely necessary, the user can delete the documents that have new authors, and call update
# on them with the new and old authors.
if corpus is None:
# Just keep training on the already available data.
# Assumes self.update() has been called before with input documents and corresponding authors.
assert self.total_docs > 0, 'update() was called with no documents to train on.'
train_corpus_idx = [d for d in range(self.total_docs)]
num_input_authors = len(self.author2doc)
else:
if doc2author is None and author2doc is None:
raise ValueError(
'at least one of author2doc/doc2author must be specified, to establish input space dimensionality'
)
# If either doc2author or author2doc is missing, construct them from the other.
if doc2author is None:
doc2author = construct_doc2author(corpus, author2doc)
elif author2doc is None:
author2doc = construct_author2doc(doc2author)
# Number of authors that need to be updated.
num_input_authors = len(author2doc)
try:
len_input_corpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
logger.warning("AuthorTopicModel.update() called with an empty corpus")
return
self.total_docs += len_input_corpus
# Add new documents in corpus to self.corpus.
self.extend_corpus(corpus)
# Obtain a list of new authors.
new_authors = []
# Sorting the author names makes the model more reproducible.
for a in sorted(author2doc.keys()):
if not self.author2doc.get(a):
new_authors.append(a)
num_new_authors = len(new_authors)
# Add new authors do author2id/id2author dictionaries.
for a_id, a_name in enumerate(new_authors):
self.author2id[a_name] = a_id + self.num_authors
self.id2author[a_id + self.num_authors] = a_name
# Increment the number of total authors seen.
self.num_authors += num_new_authors
# Initialize the variational distributions q(theta|gamma)
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Combine author2doc with self.author2doc.
# First, increment the document IDs by the number of previously seen documents.
for a, doc_ids in author2doc.items():
doc_ids = [d + self.total_docs - len_input_corpus for d in doc_ids]
# For all authors in the input corpus, add the new documents.
for a, doc_ids in author2doc.items():
if self.author2doc.get(a):
# This is not a new author, append new documents.
self.author2doc[a].extend(doc_ids)
else:
# This is a new author, create index.
self.author2doc[a] = doc_ids
# Add all new documents to self.doc2author.
for d, a_list in doc2author.items():
self.doc2author[d] = a_list
# Train on all documents of authors in input_corpus.
train_corpus_idx = set()
# Collect all documents of authors.
for doc_ids in self.author2doc.values():
train_corpus_idx.update(doc_ids)
# Make the list of training documents unique.
train_corpus_idx = sorted(train_corpus_idx)
# train_corpus_idx is only a list of indexes, so "len" is valid.
lencorpus = len(train_corpus_idx)
if chunksize is None:
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = "online"
updateafter = min(lencorpus, update_every * self.numworkers * chunksize)
else:
updatetype = "batch"
updateafter = lencorpus
evalafter = min(lencorpus, (eval_every or 0) * self.numworkers * chunksize)
updates_per_pass = max(1, lencorpus / updateafter)
logger.info(
"running %s author-topic training, %s topics, %s authors, "
"%i passes over the supplied corpus of %i documents, updating model once "
"every %i documents, evaluating perplexity every %i documents, "
"iterating %ix with a convergence threshold of %f",
updatetype, self.num_topics, num_input_authors, passes, lencorpus, updateafter,
evalafter, iterations, gamma_threshold
)
if updates_per_pass * passes < 10:
logger.warning(
"too few updates, training might not converge; "
"consider increasing the number of passes or iterations to improve accuracy"
)
# rho is the "speed" of updating; TODO try other fncs
# pass_ + num_updates handles increasing the starting t for each pass,
# while allowing it to "reset" on the first pass of each update
def rho():
return pow(offset + pass_ + (self.num_updates / chunksize), -decay)
for pass_ in range(passes):
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset(self.state)
else:
# gamma is not needed in "other", thus its shape is (0, 0).
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
reallen = 0
for chunk_no, chunk_doc_idx in enumerate(
utils.grouper(train_corpus_idx, chunksize, as_numpy=chunks_as_numpy)):
chunk = [self.corpus[d] for d in chunk_doc_idx]
reallen += len(chunk) # keep track of how many documents we've processed so far
if eval_every and ((reallen == lencorpus) or ((chunk_no + 1) % (eval_every * self.numworkers) == 0)):
# log_perplexity requires the indexes of the documents being evaluated, to know what authors
# correspond to the documents.
self.log_perplexity(chunk, chunk_doc_idx, total_docs=lencorpus)
if self.dispatcher:
# add the chunk to dispatcher's job queue, so workers can munch on it
logger.info(
"PROGRESS: pass %i, dispatching documents up to #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# this will eventually block until some jobs finish, because the queue has a small finite length
self.dispatcher.putjob(chunk)
else:
logger.info(
"PROGRESS: pass %i, at document #%i/%i",
pass_, chunk_no * chunksize + len(chunk), lencorpus
)
# do_estep requires the indexes of the documents being trained on, to know what authors
# correspond to the documents.
gammat = self.do_estep(chunk, self.author2doc, self.doc2author, rho(), other, chunk_doc_idx)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
# perform an M step. determine when based on update_every, don't do this after every chunk
if update_every and (chunk_no + 1) % (update_every * self.numworkers) == 0:
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other # frees up memory
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
# endfor single corpus iteration
if reallen != lencorpus:
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
# finish any remaining updates
if self.dispatcher:
# distributed mode: wait for all workers to finish
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, pass_ > 0)
del other
def bound(self, chunk, chunk_doc_idx=None, subsample_ratio=1.0, author2doc=None, doc2author=None):
r"""Estimate the variational bound of documents from `corpus`.
:math:`\mathbb{E_{q}}[\log p(corpus)] - \mathbb{E_{q}}[\log q(corpus)]`
Notes
-----
There are basically two use cases of this method:
#. `chunk` is a subset of the training corpus, and `chunk_doc_idx` is provided,
indicating the indexes of the documents in the training corpus.
#. `chunk` is a test set (held-out data), and `author2doc` and `doc2author` corresponding to this test set
are provided. There must not be any new authors passed to this method, `chunk_doc_idx` is not needed
in this case.
Parameters
----------
chunk : iterable of list of (int, float)
Corpus in BoW format.
chunk_doc_idx : numpy.ndarray, optional
Assigns the value for document index.
subsample_ratio : float, optional
Used for calculation of word score for estimation of variational bound.
author2doc : dict of (str, list of int), optional
A dictionary where keys are the names of authors and values are lists of documents that the author
contributes to.
doc2author : dict of (int, list of str), optional
A dictionary where the keys are document IDs and the values are lists of author names.
Returns
-------
float
Value of variational bound score.
"""
# TODO: enable evaluation of documents with new authors. One could, for example, make it
# possible to pass a list of documents to self.inference with no author dictionaries,
# assuming all the documents correspond to one (unseen) author, learn the author's
# gamma, and return gamma (without adding it to self.state.gamma). Of course,
# collect_sstats should be set to false, so that the model is not updated w.r.t. these
# new documents.
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
expElogbeta = np.exp(Elogbeta)
gamma = self.state.gamma
if author2doc is None and doc2author is None:
# Evaluating on training documents (chunk of self.corpus).
author2doc = self.author2doc
doc2author = self.doc2author
if not chunk_doc_idx:
# If author2doc and doc2author are not provided, chunk is assumed to be a subset of
# self.corpus, and chunk_doc_idx is thus required.
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided. '
'Consult documentation of bound method.'
)
elif author2doc is not None and doc2author is not None:
# Training on held-out documents (documents not seen during training).
# All authors in dictionaries must still be seen during training.
for a in author2doc.keys():
if not self.author2doc.get(a):
raise ValueError('bound cannot be called with authors not seen during training.')
if chunk_doc_idx:
raise ValueError(
'Either author dictionaries or chunk_doc_idx must be provided, not both. '
'Consult documentation of bound method.'
)
else:
raise ValueError(
'Either both author2doc and doc2author should be provided, or neither. '
'Consult documentation of bound method.'
)
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
word_score = 0.0
theta_score = 0.0
for d, doc in enumerate(chunk):
if chunk_doc_idx:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
# Get all authors in current document, and convert the author names to integer IDs.
authors_d = np.fromiter((self.author2id[a] for a in self.doc2author[doc_no]), dtype=int)
ids = np.fromiter((id for id, _ in doc), dtype=int, count=len(doc)) # Word IDs in doc.
cts = np.fromiter((cnt for _, cnt in doc), dtype=int, count=len(doc)) # Word counts.
if d % self.chunksize == 0:
logger.debug("bound: at document #%i in chunk", d)
# Computing the bound requires summing over expElogtheta[a, k] * expElogbeta[k, v], which
# is the same computation as in normalizing phi.
phinorm = self.compute_phinorm(expElogtheta[authors_d, :], expElogbeta[:, ids])
word_score += np.log(1.0 / len(authors_d)) * sum(cts) + cts.dot(np.log(phinorm))
# Compensate likelihood for when `chunk` above is only a sample of the whole corpus. This ensures
# that the likelihood is always roughly on the same scale.
word_score *= subsample_ratio
# E[log p(theta | alpha) - log q(theta | gamma)]
for a in author2doc.keys():
a = self.author2id[a]
theta_score += np.sum((self.alpha - gamma[a, :]) * Elogtheta[a, :])
theta_score += np.sum(gammaln(gamma[a, :]) - gammaln(self.alpha))
theta_score += gammaln(np.sum(self.alpha)) - gammaln(np.sum(gamma[a, :]))
# theta_score is rescaled in a similar fashion.
# TODO: treat this in a more general way, similar to how it is done with word_score.
theta_score *= self.num_authors / len(author2doc)
# E[log p(beta | eta) - log q (beta | lambda)]
beta_score = 0.0
beta_score += np.sum((self.eta - _lambda) * Elogbeta)
beta_score += np.sum(gammaln(_lambda) - gammaln(self.eta))
sum_eta = np.sum(self.eta)
beta_score += np.sum(gammaln(sum_eta) - gammaln(np.sum(_lambda, 1)))
total_score = word_score + theta_score + beta_score
return total_score
def get_document_topics(self, word_id, minimum_probability=None):
"""Override :meth:`~gensim.models.ldamodel.LdaModel.get_document_topics` and simply raises an exception.
Warnings
--------
This method invalid for model, use :meth:`~gensim.models.atmodel.AuthorTopicModel.get_author_topics` or
:meth:`~gensim.models.atmodel.AuthorTopicModel.get_new_author_topics` instead.
Raises
------
NotImplementedError
Always.
"""
raise NotImplementedError(
'Method "get_document_topics" is not valid for the author-topic model. '
'Use the "get_author_topics" method.'
)
def get_new_author_topics(self, corpus, minimum_probability=None):
"""Infers topics for new author.
Infers a topic distribution for a new author over the passed corpus of docs,
assuming that all documents are from this single new author.
Parameters
----------
corpus : iterable of list of (int, float)
Corpus in BoW format.
minimum_probability : float, optional
Ignore topics with probability below this value, if None - 1e-8 is used.
Returns
-------
list of (int, float)
Topic distribution for the given `corpus`.
"""
def rho():
return pow(self.offset + 1 + 1, -self.decay)
def rollback_new_author_chages():
self.state.gamma = self.state.gamma[0:-1]
del self.author2doc[new_author_name]
a_id = self.author2id[new_author_name]
del self.id2author[a_id]
del self.author2id[new_author_name]
for new_doc_id in corpus_doc_idx:
del self.doc2author[new_doc_id]
try:
len_input_corpus = len(corpus)
except TypeError:
logger.warning("input corpus stream has no len(); counting documents")
len_input_corpus = sum(1 for _ in corpus)
if len_input_corpus == 0:
raise ValueError("AuthorTopicModel.get_new_author_topics() called with an empty corpus")
new_author_name = "placeholder_name"
# indexes representing the documents in the input corpus
corpus_doc_idx = list(range(self.total_docs, self.total_docs + len_input_corpus))
# Add the new placeholder author to author2id/id2author dictionaries.
num_new_authors = 1
author_id = self.num_authors
if new_author_name in self.author2id:
raise ValueError("self.author2id already has 'placeholder_name' author")
self.author2id[new_author_name] = author_id
self.id2author[author_id] = new_author_name
# Add new author in author2doc and doc into doc2author.
self.author2doc[new_author_name] = corpus_doc_idx
for new_doc_id in corpus_doc_idx:
self.doc2author[new_doc_id] = [new_author_name]
gamma_new = self.random_state.gamma(100., 1. / 100., (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
# Should not record the sstats, as we are going to delete the new author after calculated.
try:
gammat, _ = self.inference(
corpus, self.author2doc, self.doc2author, rho(),
collect_sstats=False, chunk_doc_idx=corpus_doc_idx
)
new_author_topics = self.get_author_topics(new_author_name, minimum_probability)
finally:
rollback_new_author_chages()
return new_author_topics
def get_author_topics(self, author_name, minimum_probability=None):
"""Get topic distribution the given author.
Parameters
----------
author_name : str
Name of the author for which the topic distribution needs to be estimated.
minimum_probability : float, optional
Sets the minimum probability value for showing the topics of a given author, topics with probability <
`minimum_probability` will be ignored.
Returns
-------
list of (int, float)
Topic distribution of an author.
Example
-------
.. sourcecode:: pycon
>>> from gensim.models import AuthorTopicModel
>>> from gensim.corpora import mmcorpus
>>> from gensim.test.utils import common_dictionary, datapath, temporary_file
>>> author2doc = {
... 'john': [0, 1, 2, 3, 4, 5, 6],
... 'jane': [2, 3, 4, 5, 6, 7, 8],
... 'jack': [0, 2, 4, 6, 8]
... }
>>>
>>> corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
>>>
>>> with temporary_file("serialized") as s_path:
... model = AuthorTopicModel(
... corpus, author2doc=author2doc, id2word=common_dictionary, num_topics=4,
... serialized=True, serialization_path=s_path
... )
...
... model.update(corpus, author2doc) # update the author-topic model with additional documents
>>>
>>> # construct vectors for authors
>>> author_vecs = [model.get_author_topics(author) for author in model.id2author.values()]
"""
author_id = self.author2id[author_name]
if minimum_probability is None:
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-8) # never allow zero values in sparse output
topic_dist = self.state.gamma[author_id, :] / sum(self.state.gamma[author_id, :])
author_topics = [
(topicid, topicvalue) for topicid, topicvalue in enumerate(topic_dist)
if topicvalue >= minimum_probability
]
return author_topics
def __getitem__(self, author_names, eps=None):
"""Get topic distribution for input `author_names`.
Parameters
----------
author_names : {str, list of str}
Name(s) of the author for which the topic distribution needs to be estimated.
eps : float, optional
The minimum probability value for showing the topics of a given author, topics with probability < `eps`
will be ignored.
Returns
-------
list of (int, float) **or** list of list of (int, float)
Topic distribution for the author(s), type depends on type of `author_names`.
"""
if isinstance(author_names, list):
items = []
for a in author_names:
items.append(self.get_author_topics(a, minimum_probability=eps))
else:
items = self.get_author_topics(author_names, minimum_probability=eps)
return items
| AuthorTopicModel |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 42061,
"end": 44867
} | class ____(unittest.TestCase):
"""Tests person in the ja_JP locale"""
def setUp(self):
self.fake = Faker("ja")
Faker.seed(0)
def test_person(self):
name = self.fake.name()
assert name
assert isinstance(name, str)
first_name = self.fake.first_name()
assert first_name
assert isinstance(first_name, str)
last_name = self.fake.last_name()
assert last_name
assert isinstance(last_name, str)
kana_name = self.fake.kana_name()
assert kana_name
assert isinstance(kana_name, str)
first_kana_name = self.fake.first_kana_name()
assert first_kana_name
assert isinstance(first_kana_name, str)
first_kana_name_male = self.fake.first_kana_name_male()
assert first_kana_name_male
assert isinstance(first_kana_name_male, str)
first_kana_name_female = self.fake.first_kana_name_female()
assert first_kana_name_female
assert isinstance(first_kana_name_female, str)
last_kana_name = self.fake.last_kana_name()
assert last_kana_name
assert isinstance(last_kana_name, str)
romanized_name = self.fake.romanized_name()
assert romanized_name
assert isinstance(romanized_name, str)
first_romanized_name = self.fake.first_romanized_name()
assert first_romanized_name
assert isinstance(first_romanized_name, str)
first_romanized_name_male = self.fake.first_romanized_name_male()
assert first_romanized_name_male
assert isinstance(first_romanized_name_male, str)
first_romanized_name_female = self.fake.first_romanized_name_female()
assert first_romanized_name_female
assert isinstance(first_romanized_name_female, str)
last_romanized_name = self.fake.last_romanized_name()
assert last_romanized_name
assert isinstance(last_romanized_name, str)
first_name_pair = self.fake.first_name_pair()
assert first_name_pair
assert len(first_name_pair) == 3
assert all(s for s in first_name_pair if isinstance(s, str))
first_name_male_pair = self.fake.first_name_male_pair()
assert first_name_male_pair
assert len(first_name_male_pair) == 3
assert all(s for s in first_name_male_pair if isinstance(s, str))
first_name_female_pair = self.fake.first_name_female_pair()
assert first_name_female_pair
assert len(first_name_female_pair) == 3
assert all(isinstance(s, str) for s in first_name_female_pair)
last_name_pair = self.fake.last_name_pair()
assert last_name_pair
assert len(last_name_pair) == 3
assert all(isinstance(s, str) for s in last_name_pair)
| TestJaJP |
python | getsentry__sentry | src/sentry/notifications/platform/types.py | {
"start": 1199,
"end": 1430
} | class ____(StrEnum):
"""
Avenues for a notification to be sent to that can be understood by a provider.
"""
EMAIL = "email"
CHANNEL = "channel"
DIRECT_MESSAGE = "direct_message"
| NotificationTargetResourceType |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py | {
"start": 1616,
"end": 14957
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
self.error = None
self.repeat_count = 2
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
self.read_coordination_events = {}
self.write_coordination_events = {}
# input values [4, 5, 6] are the common case for the tests; set defaults
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i] = threading.Event()
def dataset_fn(self, input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
def map_py_fn(x):
self.write_coordination_events[x].wait()
self.write_coordination_events[x].clear()
self.read_coordination_events[x].release()
if self.error:
err = self.error
self.error = None
raise err # pylint: disable=raising-bad-type
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
def _interleave(self, lists, cycle_length, block_length):
"""Python implementation of interleave used for testing."""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
@combinations.generate(
combinations.times(
combinations.combine(
input_lists=[[[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]],
expected_elements=[[
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6
]],
cycle_length=1,
block_length=1) +
combinations.combine(
input_lists=[[[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]],
expected_elements=[[
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6,
5, 6, 5, 6, 5, 6, 5, 6, 6
]],
cycle_length=2,
block_length=1) + combinations.combine(
input_lists=[[[4] * 4, [5] * 5, [6] * 6] * 2],
expected_elements=[[
4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6,
5, 5, 6, 6, 5, 5, 6, 6, 5, 6, 6
]],
cycle_length=2,
block_length=2) +
combinations.combine(
input_lists=[[[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4],
[], [6, 6, 6, 6, 6, 6]]],
expected_elements=[[
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]],
cycle_length=2,
block_length=1)))
def testPythonImplementation(self, input_lists, expected_elements,
cycle_length, block_length):
for index, (expected, produced) in enumerate(
itertools.zip_longest(
expected_elements,
self._interleave(input_lists, cycle_length, block_length))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def _clear_coordination_events(self):
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i].clear()
def _allow_all_map_threads(self):
for i in range(4, 7):
self.write_coordination_events[i].set()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testEmptyInput(self, sloppy):
# Empty input.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def _testNonEmptyInputIntoEmptyOutputs(self, sloppy):
# Non-empty input leading to empty output.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([0, 0, 0]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testTooManyReaders(self, sloppy=False):
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64))
return dataset
dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6])
dataset = dataset.repeat(self.repeat_count)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy))
get_next = self.getNext(dataset)
output_values = []
for _ in range(30):
output_values.append(self.evaluate(get_next()))
expected_values = self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2)
self.assertCountEqual(output_values, expected_values)
@combinations.generate(test_base.default_test_combinations())
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, cycle_length=1))
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testErrorsInInputFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).map(
map_fn).repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testErrorsInInterleaveFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
y = script_ops.py_func(map_py_fn, [x], x.dtype)
dataset = dataset.repeat(y)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testShutdownRace(self):
dataset = dataset_ops.Dataset.range(20)
map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1))
dataset = dataset.apply(
interleave_ops.parallel_interleave(
map_fn,
cycle_length=3,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
dataset = dataset.batch(32)
results = []
for _ in range(2):
elements = []
next_element = self.getNext(dataset)
try:
while True:
elements.extend(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
results.append(elements)
self.assertAllEqual(results[0], results[1])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
sloppy=[None, True, False], global_determinism=[True, False])))
def testDeterminismConfiguration(self, sloppy, global_determinism):
if sloppy is None:
expect_determinism = global_determinism
else:
expect_determinism = not sloppy
elements = list(range(1000))
def dataset_fn(delay_ms):
def interleave_fn(x):
ds = dataset_ops.Dataset.from_tensors(x)
if math_ops.equal(x, 0):
ds = ds.apply(testing.sleep(delay_ms * 1000))
else:
ds = ds.apply(testing.sleep(0))
return ds
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=10, sloppy=sloppy))
opts = options_lib.Options()
opts.deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(dataset_fn, expect_determinism, elements)
| ParallelInterleaveTest |
python | getsentry__sentry | src/sentry/types/group.py | {
"start": 63,
"end": 1814
} | class ____:
# GroupStatus.IGNORED
UNTIL_ESCALATING = 1
# Group is ignored/archived for a count/user count/duration
UNTIL_CONDITION_MET = 4
# Group is ignored/archived forever
FOREVER = 5
# GroupStatus.UNRESOLVED
ESCALATING = 2
ONGOING = 3
REGRESSED = 6
NEW = 7
UNRESOLVED_SUBSTATUS_CHOICES = {
GroupSubStatus.ONGOING,
GroupSubStatus.ESCALATING,
GroupSubStatus.REGRESSED,
GroupSubStatus.NEW,
}
IGNORED_SUBSTATUS_CHOICES = {
GroupSubStatus.UNTIL_ESCALATING,
GroupSubStatus.FOREVER,
GroupSubStatus.UNTIL_CONDITION_MET,
}
SUBSTATUS_UPDATE_CHOICES: Mapping[str, int] = {
"archived_until_escalating": GroupSubStatus.UNTIL_ESCALATING,
"archived_until_condition_met": GroupSubStatus.UNTIL_CONDITION_MET,
"archived_forever": GroupSubStatus.FOREVER,
"escalating": GroupSubStatus.ESCALATING,
"ongoing": GroupSubStatus.ONGOING,
"regressed": GroupSubStatus.REGRESSED,
"new": GroupSubStatus.NEW,
}
SUBSTATUS_TO_STR: Mapping[int, str] = {
GroupSubStatus.UNTIL_ESCALATING: "archived_until_escalating",
GroupSubStatus.UNTIL_CONDITION_MET: "archived_until_condition_met",
GroupSubStatus.FOREVER: "archived_forever",
GroupSubStatus.ESCALATING: "escalating",
GroupSubStatus.ONGOING: "ongoing",
GroupSubStatus.REGRESSED: "regressed",
GroupSubStatus.NEW: "new",
}
GROUP_SUBSTATUS_TO_GROUP_HISTORY_STATUS = {
GroupSubStatus.ESCALATING: "escalating",
GroupSubStatus.REGRESSED: "regressed",
GroupSubStatus.ONGOING: "unresolved",
GroupSubStatus.UNTIL_ESCALATING: "archived_until_escalating",
GroupSubStatus.FOREVER: "archived_forever",
GroupSubStatus.UNTIL_CONDITION_MET: "archived_until_condition_met",
}
| GroupSubStatus |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_extra_links.py | {
"start": 1678,
"end": 1901
} | class ____(BaseOperatorLink):
name = "S3"
operators = [CustomOperator]
def get_link(self, operator, ti_key):
return f"https://s3.amazonaws.com/airflow-logs/{operator.dag_id}/{operator.task_id}/"
| S3LogLink |
python | huggingface__transformers | tests/repo_utils/test_check_copies.py | {
"start": 1983,
"end": 2463
} | class ____(BertPreTrainedModel):
def __init__(self, config):
super().__init__()
self.bert = BertEncoder(config)
@add_docstring(BERT_DOCSTRING)
def forward(self, x):
return self.bert(x)
"""
MOCK_BERT_COPY_CODE = """from ...modeling_utils import PreTrainedModel
# Copied from transformers.models.bert.modeling_bert.bert_function
def bert_copy_function(x):
return x
# Copied from transformers.models.bert.modeling_bert.BertAttention
| BertModel |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 47085,
"end": 49064
} | class ____:
def test_ca_not_boolean(self):
with pytest.raises(TypeError):
x509.BasicConstraints(
ca="notbool", # type:ignore[arg-type]
path_length=None,
)
def test_path_length_not_ca(self):
with pytest.raises(ValueError):
x509.BasicConstraints(ca=False, path_length=0)
def test_path_length_not_int(self):
with pytest.raises(TypeError):
x509.BasicConstraints(
ca=True,
path_length=1.1, # type:ignore[arg-type]
)
with pytest.raises(TypeError):
x509.BasicConstraints(
ca=True,
path_length="notint", # type:ignore[arg-type]
)
def test_path_length_negative(self):
with pytest.raises(TypeError):
x509.BasicConstraints(ca=True, path_length=-1)
def test_repr(self):
na = x509.BasicConstraints(ca=True, path_length=None)
assert repr(na) == ("<BasicConstraints(ca=True, path_length=None)>")
def test_hash(self):
na = x509.BasicConstraints(ca=True, path_length=None)
na2 = x509.BasicConstraints(ca=True, path_length=None)
na3 = x509.BasicConstraints(ca=True, path_length=0)
assert hash(na) == hash(na2)
assert hash(na) != hash(na3)
def test_eq(self):
na = x509.BasicConstraints(ca=True, path_length=None)
na2 = x509.BasicConstraints(ca=True, path_length=None)
assert na == na2
def test_ne(self):
na = x509.BasicConstraints(ca=True, path_length=None)
na2 = x509.BasicConstraints(ca=True, path_length=1)
na3 = x509.BasicConstraints(ca=False, path_length=None)
assert na != na2
assert na != na3
assert na != object()
def test_public_bytes(self):
ext = x509.BasicConstraints(ca=True, path_length=None)
assert ext.public_bytes() == b"0\x03\x01\x01\xff"
| TestBasicConstraints |
python | wandb__wandb | wandb/sdk/artifacts/exceptions.py | {
"start": 2170,
"end": 2323
} | class ____(ValueError):
"""Raised when there are more items than expected in a collection.
Intended for internal use only.
"""
| TooManyItemsError |
python | openai__openai-python | src/openai/resources/vector_stores/vector_stores.py | {
"start": 32174,
"end": 33317
} | class ____:
def __init__(self, vector_stores: AsyncVectorStores) -> None:
self._vector_stores = vector_stores
self.create = _legacy_response.async_to_raw_response_wrapper(
vector_stores.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
vector_stores.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
vector_stores.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
vector_stores.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
vector_stores.delete,
)
self.search = _legacy_response.async_to_raw_response_wrapper(
vector_stores.search,
)
@cached_property
def files(self) -> AsyncFilesWithRawResponse:
return AsyncFilesWithRawResponse(self._vector_stores.files)
@cached_property
def file_batches(self) -> AsyncFileBatchesWithRawResponse:
return AsyncFileBatchesWithRawResponse(self._vector_stores.file_batches)
| AsyncVectorStoresWithRawResponse |
python | pypa__warehouse | warehouse/i18n/__init__.py | {
"start": 2695,
"end": 6255
} | class ____:
def _fail(self):
raise RuntimeError("Cannot use localizer without has_translations=True")
@property
def locale_name(self):
self._fail()
def pluralize(self, *args, **kwargs):
self._fail()
def translate(self, *args, **kwargs):
self._fail()
def translated_view(view, info):
if info.options.get("has_translations"):
# If this page can be translated, then we'll add a Vary: PyPI-Locale
# Vary header.
# Note: This will give weird results if hitting PyPI directly instead of through
# the Fastly VCL which sets PyPI-Locale.
return add_vary("PyPI-Locale")(view)
elif info.exception_only:
return view
else:
# If we're not using translations on this view, then we'll wrap the view
# with a wrapper that just ensures that the localizer cannot be used.
@functools.wraps(view)
def wrapped(context, request):
# This whole method is a little bit of an odd duck, we want to make
# sure that we don't actually *access* request.localizer, because
# doing so triggers the machinery to create a new localizer. So
# instead we will dig into the request object __dict__ to
# effectively do the same thing, just without triggering an access
# on request.localizer.
# Save the original session so that we can restore it once the
# inner views have been called.
nothing = object()
original_localizer = request.__dict__.get("localizer", nothing)
# This particular view hasn't been set to allow access to the
# translations, so we'll just assign an InvalidLocalizer to
# request.localizer
request.__dict__["localizer"] = InvalidLocalizer()
try:
# Invoke the real view
return view(context, request)
finally:
# Restore the original session so that things like
# pyramid_debugtoolbar can access it.
if original_localizer is nothing:
del request.__dict__["localizer"]
else:
request.__dict__["localizer"] = original_localizer
return wrapped
translated_view.options = {"has_translations"} # type: ignore
def includeme(config):
# Add the request attributes
config.add_request_method(_locale, name="locale", reify=True)
config.add_request_method(_localize, name="_")
# Register our translation directory.
config.add_translation_dirs("warehouse:locale/")
config.set_locale_negotiator(_negotiate_locale)
config.get_settings().setdefault(
"jinja2.i18n_extension", FallbackInternationalizationExtension
)
# Register our i18n/l10n filters for Jinja2
filters = config.get_settings().setdefault("jinja2.filters", {})
filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
filters.setdefault("format_datetime", "warehouse.i18n.filters:format_datetime")
filters.setdefault(
"format_rfc822_datetime", "warehouse.i18n.filters:format_rfc822_datetime"
)
filters.setdefault("format_number", "warehouse.i18n.filters:format_number")
jglobals = config.get_settings().setdefault("jinja2.globals", {})
jglobals.setdefault("KNOWN_LOCALES", "warehouse.i18n:KNOWN_LOCALES")
config.add_view_deriver(
translated_view, over="rendered_view", under=viewderivers.INGRESS
)
| InvalidLocalizer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 1933,
"end": 2212
} | class ____(Model):
new_field = models.CharField(max_length=10)
class Meta:
abstract = True
@property
def my_brand_new_property(self):
return 1
def my_beautiful_method(self):
return 2
# Abstract models with __str__
| AbstractTestModel2 |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 32958,
"end": 36500
} | class ____(BaseFormatter):
"""A Formatter for arbitrary mime-types.
Unlike other `_repr_<mimetype>_` methods,
`_repr_mimebundle_` should return mime-bundle data,
either the mime-keyed `data` dictionary or the tuple `(data, metadata)`.
Any mime-type is valid.
To define the callables that compute the mime-bundle representation of your
objects, define a :meth:`_repr_mimebundle_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
.. versionadded:: 6.1
"""
print_method = ObjectName('_repr_mimebundle_')
_return_type = dict
def _check_return(self, r, obj):
r = super(MimeBundleFormatter, self)._check_return(r, obj)
# always return (data, metadata):
if r is None:
return {}, {}
if not isinstance(r, tuple):
return r, {}
return r
@catch_format_error
def __call__(self, obj, include=None, exclude=None):
"""Compute the format for an object.
Identical to parent's method but we pass extra parameters to the method.
Unlike other _repr_*_ `_repr_mimebundle_` should allow extra kwargs, in
particular `include` and `exclude`.
"""
if self.enabled:
# lookup registered printer
try:
printer = self.lookup(obj)
except KeyError:
pass
else:
return printer(obj)
# Finally look for special method names
method = get_real_method(obj, self.print_method)
if method is not None:
return method(include=include, exclude=exclude)
return None
else:
return None
FormatterABC.register(BaseFormatter)
FormatterABC.register(PlainTextFormatter)
FormatterABC.register(HTMLFormatter)
FormatterABC.register(MarkdownFormatter)
FormatterABC.register(SVGFormatter)
FormatterABC.register(PNGFormatter)
FormatterABC.register(PDFFormatter)
FormatterABC.register(JPEGFormatter)
FormatterABC.register(LatexFormatter)
FormatterABC.register(JSONFormatter)
FormatterABC.register(JavascriptFormatter)
FormatterABC.register(IPythonDisplayFormatter)
FormatterABC.register(MimeBundleFormatter)
def format_display_data(obj, include=None, exclude=None):
"""Return a format data dict for an object.
By default all format types will be computed.
Parameters
----------
obj : object
The Python object whose format data will be computed.
Returns
-------
format_dict : dict
A dictionary of key/value pairs, one or each format that was
generated for the object. The keys are the format types, which
will usually be MIME type strings and the values and JSON'able
data structure containing the raw data for the representation in
that format.
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type string (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
"""
from .interactiveshell import InteractiveShell
return InteractiveShell.instance().display_formatter.format(
obj,
include,
exclude
)
| MimeBundleFormatter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 212235,
"end": 212696
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteDiscussion"""
__schema__ = github_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The id of the discussion to delete."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteDiscussionInput |
python | python__mypy | mypy/traverser.py | {
"start": 27680,
"end": 27991
} | class ____(TraverserVisitor):
def __init__(self) -> None:
self.inside_func = False
def visit_func_def(self, defn: FuncDef) -> None:
if not self.inside_func:
self.inside_func = True
super().visit_func_def(defn)
self.inside_func = False
| FuncCollectorBase |
python | sympy__sympy | sympy/plotting/intervalmath/interval_membership.py | {
"start": 73,
"end": 2385
} | class ____:
"""Represents a boolean expression returned by the comparison of
the interval object.
Parameters
==========
(a, b) : (bool, bool)
The first value determines the comparison as follows:
- True: If the comparison is True throughout the intervals.
- False: If the comparison is False throughout the intervals.
- None: If the comparison is True for some part of the intervals.
The second value is determined as follows:
- True: If both the intervals in comparison are valid.
- False: If at least one of the intervals is False, else
- None
"""
def __init__(self, a, b):
self._wrapped = (a, b)
def __getitem__(self, i):
try:
return self._wrapped[i]
except IndexError:
raise IndexError(
"{} must be a valid indexing for the 2-tuple."
.format(i))
def __len__(self):
return 2
def __iter__(self):
return iter(self._wrapped)
def __str__(self):
return "intervalMembership({}, {})".format(*self)
__repr__ = __str__
def __and__(self, other):
if not isinstance(other, intervalMembership):
raise ValueError(
"The comparison is not supported for {}.".format(other))
a1, b1 = self
a2, b2 = other
return intervalMembership(fuzzy_and([a1, a2]), fuzzy_and([b1, b2]))
def __or__(self, other):
if not isinstance(other, intervalMembership):
raise ValueError(
"The comparison is not supported for {}.".format(other))
a1, b1 = self
a2, b2 = other
return intervalMembership(fuzzy_or([a1, a2]), fuzzy_and([b1, b2]))
def __invert__(self):
a, b = self
return intervalMembership(fuzzy_not(a), b)
def __xor__(self, other):
if not isinstance(other, intervalMembership):
raise ValueError(
"The comparison is not supported for {}.".format(other))
a1, b1 = self
a2, b2 = other
return intervalMembership(fuzzy_xor([a1, a2]), fuzzy_and([b1, b2]))
def __eq__(self, other):
return self._wrapped == other
def __ne__(self, other):
return self._wrapped != other
| intervalMembership |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/requires_clang_or_gcc/package.py | {
"start": 217,
"end": 590
} | class ____(Package):
"""Simple package with no dependencies"""
homepage = "http://www.example.com"
url = "http://www.example.com/b-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
version("0.9", md5="abcd456789abcdef0123456789abcdef")
requires("%gcc", "%clang", policy="one_of")
depends_on("c", type="build")
| RequiresClangOrGcc |
python | bokeh__bokeh | src/bokeh/models/ui/panes.py | {
"start": 1436,
"end": 2813
} | class ____(UIElement):
""" A UI element that can hold other DOM-based UI elements.
``Pane`` is a basic building block of DOM-based UIs, and as such it
doesn't include any properties for controlling its position and other
visual aspects. These must be configured up by using CSS stylesheets.
If finer control is needed, use ``Panel`` or ``LayoutDOM`` derived
models instead.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
elements = List(Either(Instance(UIElement), Instance(DOMNode)), default=[], help="""
A collection of DOM-based UI elements attached to this pane.
This can include floating elements like tooltips, allowing to establish
a parent-child relationship between this and other UI elements.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Pane |
python | falconry__falcon | tests/test_request_media.py | {
"start": 10429,
"end": 11620
} | class ____:
async def on_get(self, req, resp):
await _check_error(req, True)
@pytest.mark.parametrize('body', ('{', ''))
def test_repeated_error(asgi, body):
client = create_client(
asgi, resource=RepeatedErrorAsync() if asgi else RepeatedError()
)
res = client.simulate_get('/', body=body, params={'empty': not bool(body)})
assert res.status == falcon.HTTP_IM_A_TEAPOT
def test_error_after_first_default(asgi):
async def _check_error(req, isasync):
assert (await req.get_media(42)) if isasync else req.get_media(42) == 42
try:
(await req.get_media()) if isasync else req.get_media()
except falcon.MediaNotFoundError:
raise falcon.HTTPStatus(falcon.HTTP_749)
raise falcon.HTTPStatus(falcon.HTTP_703)
class Res:
def on_get(self, req, resp):
util.async_to_sync(_check_error, req, False)
class ResAsync:
async def on_get(self, req, resp):
await _check_error(req, True)
client = create_client(asgi, resource=ResAsync() if asgi else Res())
res = client.simulate_get('/', body='')
assert res.status == falcon.HTTP_749
| RepeatedErrorAsync |
python | crytic__slither | slither/core/expressions/type_conversion.py | {
"start": 657,
"end": 1517
} | class ____(Expression):
def __init__(
self,
expression: Union[
"MemberAccess", "Literal", "CallExpression", "TypeConversion", "Identifier"
],
expression_type: Union["ElementaryType", "UserDefinedType", "TypeAliasContract"],
) -> None:
super().__init__()
assert isinstance(expression, Expression)
assert isinstance(expression_type, Type)
self._expression: Expression = expression
self._type: Type = expression_type
@property
def type(self) -> Type:
return self._type
@type.setter
def type(self, new_type: Type) -> None:
self._type = new_type
@property
def expression(self) -> Expression:
return self._expression
def __str__(self) -> str:
return str(self.type) + "(" + str(self.expression) + ")"
| TypeConversion |
python | getsentry__sentry | tests/sentry/hybridcloud/models/test_outbox.py | {
"start": 21250,
"end": 23725
} | class ____(TestCase):
def test_bulk_operations(self) -> None:
org = self.create_organization()
team = self.create_team(organization=org)
members = [
self.create_member(user_id=i + 1000, organization_id=org.id) for i in range(0, 10)
]
do_not_touch = OrganizationMemberTeam(
organizationmember=self.create_member(user_id=99, organization_id=org.id),
team=team,
role="ploy",
)
do_not_touch.save()
OrganizationMemberTeam.objects.bulk_create(
OrganizationMemberTeam(organizationmember=member, team=team) for member in members
)
with outbox_runner():
assert RegionOutbox.objects.count() == 10
assert OrganizationMemberTeam.objects.count() == 11
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert OrganizationMemberTeamReplica.objects.count() == 1
assert RegionOutbox.objects.count() == 0
assert OrganizationMemberTeam.objects.count() == 11
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert OrganizationMemberTeamReplica.objects.count() == 11
existing = OrganizationMemberTeam.objects.all().exclude(id=do_not_touch.id).all()
for obj in existing:
obj.role = "cow"
OrganizationMemberTeam.objects.bulk_update(existing, ["role"])
with outbox_runner():
assert RegionOutbox.objects.count() == 10
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert OrganizationMemberTeamReplica.objects.filter(role="cow").count() == 0
assert RegionOutbox.objects.count() == 0
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert OrganizationMemberTeamReplica.objects.filter(role="cow").count() == 10
OrganizationMemberTeam.objects.bulk_delete(existing)
with outbox_runner():
assert RegionOutbox.objects.count() == 10
assert OrganizationMemberTeam.objects.count() == 1
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert OrganizationMemberTeamReplica.objects.count() == 11
assert RegionOutbox.objects.count() == 0
with assume_test_silo_mode_of(OrganizationMemberTeamReplica):
assert OrganizationMemberTeamReplica.objects.count() == 1
@control_silo_test
| TestOutboxesManager |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/players.py | {
"start": 1540,
"end": 1702
} | class ____(ComputerPlayer):
def get_computer_move(self, game_state: GameState) -> Move | None:
return find_best_move(game_state)
| MinimaxComputerPlayerV1 |
python | getsentry__sentry | tests/snuba/test_snql_snuba.py | {
"start": 482,
"end": 3415
} | class ____(TestCase, SnubaTestCase):
def _insert_event_for_time(
self, ts: datetime, group_hash: str = "a" * 32, group_id: int | None = None
) -> str:
event_id = uuid.uuid4().hex
self.snuba_insert(
(
2,
"insert",
{
"event_id": event_id,
"primary_hash": group_hash,
"group_id": group_id if group_id else int(group_hash[:16], 16),
"project_id": self.project.id,
"message": "message",
"platform": "python",
"datetime": ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"data": {"received": time.mktime(ts.timetuple())},
},
{},
)
)
return event_id
@mock.patch("sentry.utils.metrics.incr")
def test_basic(self, mock_metrics_incr: mock.MagicMock) -> None:
now = datetime.now()
self._insert_event_for_time(now)
query = (
Query(match=Entity("events"))
.set_select([Function("count", [], "count")])
.set_groupby([Column("project_id")])
.set_where(
[
Condition(Column("project_id"), Op.EQ, self.project.id),
Condition(Column("timestamp"), Op.GTE, now - timedelta(days=1)),
Condition(Column("timestamp"), Op.LT, now + timedelta(days=1)),
]
)
)
request = Request(
dataset="events",
app_id="tests",
query=query,
tenant_ids={"referrer": "testing.test", "organization_id": 1},
)
result = snuba.raw_snql_query(request, referrer="referrer_not_in_enum")
assert len(result["data"]) == 1
assert result["data"][0] == {"count": 1, "project_id": self.project.id}
mock_metrics_incr.assert_any_call(
"snql.sdk.api.new_referrers", tags={"referrer": "referrer_not_in_enum"}
)
def test_cache(self) -> None:
"""Minimal test to verify if use_cache works"""
results = snuba.raw_snql_query(
Request(
dataset="events",
app_id="tests",
tenant_ids={"referrer": "testing.test", "organization_id": 1},
query=Query(
Entity("events"),
select=[Column("event_id")],
where=[
Condition(Column("project_id"), Op.EQ, self.project.id),
Condition(Column("timestamp"), Op.GTE, timezone.now() - timedelta(days=1)),
Condition(Column("timestamp"), Op.LT, timezone.now()),
],
limit=Limit(1),
),
),
use_cache=True,
)
assert results["data"] == []
| SnQLTest |
python | pytorch__pytorch | test/inductor/extension_backends/triton/device_interface.py | {
"start": 369,
"end": 3042
} | class ____(device_interface.DeviceInterface):
class Event(torch.Event):
def __init__(
self,
enable_timing: bool = False,
blocking: bool = False,
interprocess: bool = False,
) -> None:
self.enable_timing = enable_timing
self.recorded_time: int | None = None
def record(self, stream) -> None:
if not self.enable_timing:
return
assert self.recorded_time is None
self.recorded_time = time.perf_counter_ns()
def elapsed_time(self, end_event: DeviceInterface.Event) -> float:
assert self.recorded_time
assert end_event.recorded_time
# convert to ms
return (end_event.recorded_time - self.recorded_time) / 1000000
def wait(self, stream) -> None:
pass
def query(self) -> None:
pass
def synchronize(self) -> None:
pass
class device: # noqa: N801 invalid-class-name # pyright: ignore [reportIncompatibleVariableOverride]
def __init__(self, device) -> None:
self.device = device
class Worker(device_interface.DeviceInterface.Worker):
@staticmethod
def set_device(device: int) -> None:
# No device index for our backend
pass
@staticmethod
def current_device() -> int:
# No device index for our backend
return 0
@staticmethod
def get_device_properties(
device=None,
) -> DeviceProperties:
return DeviceProperties()
@staticmethod
def current_device() -> int:
return 0
@staticmethod
def set_device(device) -> None:
pass
@staticmethod
def device_count() -> int:
return 1
@staticmethod
def maybe_exchange_device(device: int) -> int:
assert device == 0, (
f"Only device index 0 is supported, tried to set index to {device}"
)
return 0 # previous device is always 0
@staticmethod
def exchange_device(device: int) -> int:
assert device == 0, (
f"Only device index 0 is supported, tried to set index to {device}"
)
return 0 # previous device is always 0
@staticmethod
def get_raw_stream(device_index: int):
return None
@staticmethod
def synchronize(device) -> None:
pass
# Can be mock patched by @patch decorator.
@staticmethod
def is_available() -> bool:
return True
@staticmethod
def get_compute_capability(device) -> int:
return 0
| DeviceInterface |
python | Textualize__textual | tests/command_palette/test_declare_sources.py | {
"start": 594,
"end": 712
} | class ____(App[None]):
def on_mount(self) -> None:
self.action_command_palette()
| AppWithActiveCommandPalette |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.