language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py | {
"start": 1223,
"end": 3381
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_reduce_labels=False,
):
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_reduce_labels = do_reduce_labels
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_reduce_labels": self.do_reduce_labels,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
| MobileNetV2ImageProcessingTester |
python | kubernetes-client__python | kubernetes/client/models/v1_object_reference.py | {
"start": 383,
"end": 10311
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'field_path': 'str',
'kind': 'str',
'name': 'str',
'namespace': 'str',
'resource_version': 'str',
'uid': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'field_path': 'fieldPath',
'kind': 'kind',
'name': 'name',
'namespace': 'namespace',
'resource_version': 'resourceVersion',
'uid': 'uid'
}
def __init__(self, api_version=None, field_path=None, kind=None, name=None, namespace=None, resource_version=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1ObjectReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._field_path = None
self._kind = None
self._name = None
self._namespace = None
self._resource_version = None
self._uid = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if field_path is not None:
self.field_path = field_path
if kind is not None:
self.kind = kind
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
if resource_version is not None:
self.resource_version = resource_version
if uid is not None:
self.uid = uid
@property
def api_version(self):
"""Gets the api_version of this V1ObjectReference. # noqa: E501
API version of the referent. # noqa: E501
:return: The api_version of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ObjectReference.
API version of the referent. # noqa: E501
:param api_version: The api_version of this V1ObjectReference. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def field_path(self):
"""Gets the field_path of this V1ObjectReference. # noqa: E501
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. # noqa: E501
:return: The field_path of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._field_path
@field_path.setter
def field_path(self, field_path):
"""Sets the field_path of this V1ObjectReference.
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. # noqa: E501
:param field_path: The field_path of this V1ObjectReference. # noqa: E501
:type: str
"""
self._field_path = field_path
@property
def kind(self):
"""Gets the kind of this V1ObjectReference. # noqa: E501
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ObjectReference.
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ObjectReference. # noqa: E501
:type: str
"""
self._kind = kind
@property
def name(self):
"""Gets the name of this V1ObjectReference. # noqa: E501
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ObjectReference.
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this V1ObjectReference. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ObjectReference. # noqa: E501
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ # noqa: E501
:return: The namespace of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ObjectReference.
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ # noqa: E501
:param namespace: The namespace of this V1ObjectReference. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def resource_version(self):
"""Gets the resource_version of this V1ObjectReference. # noqa: E501
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
:return: The resource_version of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""Sets the resource_version of this V1ObjectReference.
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency # noqa: E501
:param resource_version: The resource_version of this V1ObjectReference. # noqa: E501
:type: str
"""
self._resource_version = resource_version
@property
def uid(self):
"""Gets the uid of this V1ObjectReference. # noqa: E501
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids # noqa: E501
:return: The uid of this V1ObjectReference. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1ObjectReference.
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids # noqa: E501
:param uid: The uid of this V1ObjectReference. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ObjectReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ObjectReference):
return True
return self.to_dict() != other.to_dict()
| V1ObjectReference |
python | pypa__warehouse | warehouse/helpdesk/services.py | {
"start": 5598,
"end": 6632
} | class ____:
"""
An AdminNotificationService that sends notifications to a Slack webhook.
https://api.slack.com/messaging/webhooks
"""
def __init__(self, *, session: Session, webhook_url: str) -> None:
self.http = session
self.webhook_url = webhook_url
@classmethod
def create_service(
cls, _context, request: Request
) -> SlackAdminNotificationService:
"""
Create the service, given the context and request
"""
logging.debug("Creating SlackAdminNotificationService")
return cls(
session=request.http,
webhook_url=request.registry.settings["helpdesk.notification_service_url"],
)
def send_notification(self, *, payload: dict) -> None:
"""
Send a notification to a Slack webhook
"""
resp = self.http.post(
self.webhook_url,
json=payload,
timeout=10,
)
resp.raise_for_status()
return
| SlackAdminNotificationService |
python | spyder-ide__spyder | spyder/plugins/completion/providers/snippets/conftabs.py | {
"start": 1009,
"end": 9372
} | class ____(SpyderPreferencesTab):
TITLE = _('Snippets')
def __init__(self, parent):
super().__init__(parent)
self.snippets_language = 'python'
grammar_url = (
"<a href=\"{0}/specifications/specification-current#snippet_syntax\">"
"{1}</a>".format(LSP_URL, _('the LSP grammar')))
snippets_info_label = QLabel(
_("Spyder allows to define custom completion snippets to use "
"in addition to the ones offered by the Language Server "
"Protocol (LSP). Each snippet should follow {}.<br><br> "
"<b>Note:</b> All changes will be effective only when applying "
"the settings").format(grammar_url))
snippets_info_label.setOpenExternalLinks(True)
snippets_info_label.setWordWrap(True)
snippets_info_label.setAlignment(Qt.AlignJustify)
self.snippets_language_cb = SpyderComboBox(self)
self.snippets_language_cb.setToolTip(
_('Programming language provided by the LSP server'))
self.snippets_language_cb.setMinimumWidth(250)
self.snippets_language_cb.addItems(SUPPORTED_LANGUAGES_PY)
self.snippets_language_cb.setCurrentIndex(PYTHON_POS)
self.snippets_language_cb.currentTextChanged.connect(
self.change_language_snippets)
snippet_lang_label = QLabel(_('Language:'))
snippet_lang_layout = QHBoxLayout()
snippet_lang_layout.addWidget(snippet_lang_label)
snippet_lang_layout.addWidget(self.snippets_language_cb)
snippet_lang_layout.addStretch()
snippet_table_label = QLabel(_('Available snippets:'))
self.snippets_proxy = SnippetModelsProxy(self)
self.snippets_table = SnippetTable(
self, self.snippets_proxy, language=self.snippets_language)
self.snippets_table.setMaximumHeight(200)
snippets_table_layout = QHBoxLayout()
snippets_table_layout.addSpacing(2 * AppStyle.MarginSize)
snippets_table_layout.addWidget(self.snippets_table)
snippets_table_layout.addSpacing(2 * AppStyle.MarginSize)
# Buttons
self.new_snippet_btn = QPushButton(icon=ima.icon("edit_add"))
self.new_snippet_btn.setToolTip(_("Create a new snippet"))
self.delete_snippet_btn = QPushButton(icon=ima.icon("editclear"))
self.delete_snippet_btn.setToolTip(
_("Delete currently selected snippet")
)
self.delete_snippet_btn.setEnabled(False)
self.reset_snippets_btn = QPushButton(icon=ima.icon("restart"))
self.reset_snippets_btn.setToolTip(_("Reset to default values"))
self.export_snippets_btn = QPushButton(icon=ima.icon("fileexport"))
self.export_snippets_btn.setToolTip(_("Export snippets to JSON"))
self.import_snippets_btn = QPushButton(icon=ima.icon("fileimport"))
self.import_snippets_btn.setToolTip(_("Import snippets from JSON"))
# Slots connected to buttons
self.new_snippet_btn.clicked.connect(self.create_new_snippet)
self.reset_snippets_btn.clicked.connect(self.reset_default_snippets)
self.delete_snippet_btn.clicked.connect(self.delete_snippet)
self.export_snippets_btn.clicked.connect(self.export_snippets)
self.import_snippets_btn.clicked.connect(self.import_snippets)
# Buttons layout
btns = [
self.new_snippet_btn,
self.delete_snippet_btn,
self.reset_snippets_btn,
self.export_snippets_btn,
self.import_snippets_btn
]
sn_buttons_layout = QHBoxLayout()
sn_buttons_layout.addStretch()
for btn in btns:
btn.setIconSize(
QSize(AppStyle.ConfigPageIconSize, AppStyle.ConfigPageIconSize)
)
sn_buttons_layout.addWidget(btn)
sn_buttons_layout.addStretch()
# Snippets layout
snippets_layout = QVBoxLayout()
snippets_layout.addWidget(snippets_info_label)
snippets_layout.addSpacing(3 * AppStyle.MarginSize)
snippets_layout.addLayout(snippet_lang_layout)
snippets_layout.addSpacing(3 * AppStyle.MarginSize)
snippets_layout.addWidget(snippet_table_label)
snippets_layout.addLayout(snippets_table_layout)
snippets_layout.addSpacing(AppStyle.MarginSize)
snippets_layout.addLayout(sn_buttons_layout)
self.setLayout(snippets_layout)
def create_new_snippet(self):
self.snippets_table.show_editor(new_snippet=True)
def delete_snippet(self):
idx = self.snippets_table.currentIndex().row()
self.snippets_table.delete_snippet(idx)
self.set_modified(True)
self.delete_snippet_btn.setEnabled(False)
def reset_default_snippets(self):
language = self.snippets_language_cb.currentText()
default_snippets_lang = copy.deepcopy(
SNIPPETS.get(language.lower(), {}))
self.snippets_proxy.reload_model(
language.lower(), default_snippets_lang)
self.snippets_table.reset_plain()
self.set_modified(True)
def change_language_snippets(self, language):
self.snippets_table.update_language_model(language)
def export_snippets(self):
filename, _selfilter = getsavefilename(
self, _("Save snippets"),
'spyder_snippets.json',
filters='JSON (*.json)',
selectedfilter='',
options=QFileDialog.HideNameFilterDetails)
if filename:
filename = osp.normpath(filename)
self.snippets_proxy.export_snippets(filename)
def import_snippets(self):
filename, _sf = getopenfilename(
self,
_("Load snippets"),
filters='JSON (*.json)',
selectedfilter='',
options=QFileDialog.HideNameFilterDetails,
)
if filename:
filename = osp.normpath(filename)
valid, total, errors = self.snippets_proxy.import_snippets(
filename)
modified = True
if len(errors) == 0:
QMessageBox.information(
self,
_('All snippets imported'),
_('{0} snippets were loaded successfully').format(valid),
QMessageBox.Ok)
else:
if 'loading' in errors:
modified = False
QMessageBox.critical(
self,
_('JSON malformed'),
_('There was an error when trying to load the '
'provided JSON file: <tt>{0}</tt>').format(
errors['loading']),
QMessageBox.Ok
)
elif 'validation' in errors:
modified = False
QMessageBox.critical(
self,
_('Invalid snippet file'),
_('The provided snippet file does not comply with '
'the Spyder JSON snippets spec and therefore it '
'cannot be loaded.<br><br><tt>{}</tt>').format(
errors['validation']),
QMessageBox.Ok
)
elif 'syntax' in errors:
syntax_errors = errors['syntax']
msg = []
for syntax_key in syntax_errors:
syntax_err = syntax_errors[syntax_key]
msg.append('<b>{0}</b>: {1}'.format(
syntax_key, syntax_err))
err_msg = '<br>'.join(msg)
QMessageBox.warning(
self,
_('Incorrect snippet format'),
_('Spyder was able to load {0}/{1} snippets '
'correctly, please check the following snippets '
'for any syntax errors: '
'<br><br>{2}').format(valid, total, err_msg),
QMessageBox.Ok
)
self.set_modified(modified)
def apply_settings(self):
return self.snippets_proxy.save_snippets()
| SnippetsConfigTab |
python | doocs__leetcode | solution/1800-1899/1801.Number of Orders in the Backlog/Solution.py | {
"start": 0,
"end": 917
} | class ____:
def getNumberOfBacklogOrders(self, orders: List[List[int]]) -> int:
buy, sell = [], []
for p, a, t in orders:
if t == 0:
while a and sell and sell[0][0] <= p:
x, y = heappop(sell)
if a >= y:
a -= y
else:
heappush(sell, (x, y - a))
a = 0
if a:
heappush(buy, (-p, a))
else:
while a and buy and -buy[0][0] >= p:
x, y = heappop(buy)
if a >= y:
a -= y
else:
heappush(buy, (x, y - a))
a = 0
if a:
heappush(sell, (p, a))
mod = 10**9 + 7
return sum(v[1] for v in buy + sell) % mod
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/list/base.py | {
"start": 838,
"end": 5113
} | class ____(BaseIndex[IndexList]):
"""
Summary Index.
The summary index is a simple data structure where nodes are stored in
a sequence. During index construction, the document texts are
chunked up, converted to nodes, and stored in a list.
During query time, the summary index iterates through the nodes
with some optional filter parameters, and synthesizes an
answer from all the nodes.
Args:
text_qa_template (Optional[BasePromptTemplate]): A Question-Answer Prompt
(see :ref:`Prompt-Templates`).
NOTE: this is a deprecated field.
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
"""
index_struct_cls = IndexList
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexList] = None,
show_progress: bool = False,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(
nodes=nodes,
index_struct=index_struct,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[str, ListRetrieverMode] = ListRetrieverMode.DEFAULT,
llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
from llama_index.core.indices.list.retrievers import (
SummaryIndexEmbeddingRetriever,
SummaryIndexLLMRetriever,
SummaryIndexRetriever,
)
if retriever_mode == ListRetrieverMode.DEFAULT:
return SummaryIndexRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == ListRetrieverMode.EMBEDDING:
embed_model = embed_model or Settings.embed_model
return SummaryIndexEmbeddingRetriever(
self, object_map=self._object_map, embed_model=embed_model, **kwargs
)
elif retriever_mode == ListRetrieverMode.LLM:
llm = llm or Settings.llm
return SummaryIndexLLMRetriever(
self, object_map=self._object_map, llm=llm, **kwargs
)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
def _build_index_from_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**build_kwargs: Any,
) -> IndexList:
"""
Build the index from documents.
Args:
documents (List[BaseDocument]): A list of documents.
Returns:
IndexList: The created summary index.
"""
index_struct = IndexList()
nodes_with_progress = get_tqdm_iterable(
nodes, show_progress, "Processing nodes"
)
for n in nodes_with_progress:
index_struct.add_node(n)
return index_struct
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
for n in nodes:
self._index_struct.add_node(n)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
cur_node_ids = self._index_struct.nodes
cur_nodes = self._docstore.get_nodes(cur_node_ids)
nodes_to_keep = [n for n in cur_nodes if n.node_id != node_id]
self._index_struct.nodes = [n.node_id for n in nodes_to_keep]
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids = self._index_struct.nodes
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
# Legacy
GPTListIndex = SummaryIndex
# New name
ListIndex = SummaryIndex
| SummaryIndex |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 10228,
"end": 10313
} | class ____(OpcodeWithArg):
_FLAGS = HAS_JREL | HAS_ARGUMENT
__slots__ = ()
| FOR_ITER |
python | Pylons__pyramid | tests/test_scripts/dummy.py | {
"start": 84,
"end": 303
} | class ____:
def __init__(self, implicit, explicit):
self._implicit = implicit
self.explicit = explicit
self.name_to_alias = {}
def implicit(self):
return self._implicit
| DummyTweens |
python | Textualize__textual | examples/five_by_five.py | {
"start": 2083,
"end": 3320
} | class ____(Widget):
"""Header for the game.
Comprises of the title (``#app-title``), the number of moves ``#moves``
and the count of how many cells are turned on (``#progress``).
"""
moves = reactive(0)
"""int: Keep track of how many moves the player has made."""
filled = reactive(0)
"""int: Keep track of how many cells are filled."""
def compose(self) -> ComposeResult:
"""Compose the game header.
Returns:
ComposeResult: The result of composing the game header.
"""
with Horizontal():
yield Label(self.app.title, id="app-title")
yield Label(id="moves")
yield Label(id="progress")
def watch_moves(self, moves: int):
"""Watch the moves reactive and update when it changes.
Args:
moves (int): The number of moves made.
"""
self.query_one("#moves", Label).update(f"Moves: {moves}")
def watch_filled(self, filled: int):
"""Watch the on-count reactive and update when it changes.
Args:
filled (int): The number of cells that are currently on.
"""
self.query_one("#progress", Label).update(f"Filled: {filled}")
| GameHeader |
python | getsentry__sentry | src/sentry/api/serializers/models/discoversavedquery.py | {
"start": 1330,
"end": 6496
} | class ____(Serializer):
def partial_serialize_explore_query(self, query: ExploreSavedQuery) -> dict:
query_keys = [
"environment",
"query",
"range",
"start",
"end",
"interval",
]
data = {
"id": str(query.id),
"name": query.name,
"projects": [project.id for project in query.projects.all()],
"dataset": ExploreSavedQueryDataset.get_type_name(query.dataset),
"expired": False,
"isPrebuilt": query.prebuilt_id is not None,
"changedReason": query.changed_reason,
}
for key in query_keys:
if query.query.get(key) is not None:
data[key] = query.query[key]
# expire queries that are beyond the retention period
if "start" in query.query:
start, end = parse_timestamp(query.query["start"]), parse_timestamp(query.query["end"])
if start and end:
expired, modified_start = outside_retention_with_modified_start(
start, end, query.organization
)
data["expired"] = expired
data["start"] = modified_start.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if query.query.get("all_projects"):
data["projects"] = list(ALL_ACCESS_PROJECTS)
return data
def get_attrs(self, item_list, user, **kwargs):
result: DefaultDict[str, dict] = defaultdict(
lambda: {"created_by": {}, "explore_query": None}
)
service_serialized = user_service.serialize_many(
filter={
"user_ids": [
discover_saved_query.created_by_id
for discover_saved_query in item_list
if discover_saved_query.created_by_id
]
},
as_user=user if user.id else None,
)
serialized_users = {user["id"]: user for user in service_serialized}
# Batch fetch and serialize explore queries
explore_query_ids = [
discover_query.explore_query_id
for discover_query in item_list
if discover_query.explore_query_id is not None
and discover_query.dataset == DiscoverSavedQueryTypes.TRANSACTION_LIKE
]
if explore_query_ids:
explore_queries = ExploreSavedQuery.objects.filter(
id__in=explore_query_ids
).prefetch_related("projects")
serialized_explore_queries = {
query.id: self.partial_serialize_explore_query(query) for query in explore_queries
}
else:
serialized_explore_queries = {}
for discover_saved_query in item_list:
result[discover_saved_query]["created_by"] = serialized_users.get(
str(discover_saved_query.created_by_id)
)
if discover_saved_query.explore_query_id in serialized_explore_queries:
result[discover_saved_query]["explore_query"] = serialized_explore_queries.get(
discover_saved_query.explore_query_id
)
return result
def serialize(self, obj, attrs, user, **kwargs) -> DiscoverSavedQueryResponse:
query_keys = [
"environment",
"query",
"fields",
"widths",
"conditions",
"aggregations",
"range",
"start",
"end",
"orderby",
"limit",
"yAxis",
"display",
"topEvents",
"interval",
]
data: DiscoverSavedQueryResponse = {
"id": str(obj.id),
"name": obj.name,
"projects": [project.id for project in obj.projects.all()],
"version": obj.version or obj.query.get("version", 1),
"queryDataset": DiscoverSavedQueryTypes.get_type_name(obj.dataset),
"datasetSource": DATASET_SOURCES[obj.dataset_source],
"expired": False,
"dateCreated": obj.date_created,
"dateUpdated": obj.date_updated,
"createdBy": attrs.get("created_by"),
}
for key in query_keys:
if obj.query.get(key) is not None:
data[key] = obj.query[key] # type: ignore[literal-required]
# expire queries that are beyond the retention period
if "start" in obj.query:
start, end = parse_timestamp(obj.query["start"]), parse_timestamp(obj.query["end"])
if start and end:
expired, modified_start = outside_retention_with_modified_start(
start, end, obj.organization
)
data["expired"] = expired
data["start"] = modified_start.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if obj.query.get("all_projects"):
data["projects"] = list(ALL_ACCESS_PROJECTS)
if attrs.get("explore_query") is not None:
data["exploreQuery"] = attrs.get("explore_query")
return data
| DiscoverSavedQueryModelSerializer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/operator4.py | {
"start": 93,
"end": 279
} | class ____:
def __rmul__(self, a: A):
pass
def __rmatmul__(self, a: A):
pass
def __matmul__(self, a: A):
pass
a, b = A(), B()
v1 = a @ b
v2 = b @ a
| B |
python | django__django | django/db/migrations/state.py | {
"start": 25490,
"end": 30279
} | class ____(Apps):
"""
Subclass of the global Apps registry class to better handle dynamic model
additions and removals.
"""
def __init__(self, real_apps, models, ignore_swappable=False):
# Any apps in self.real_apps should have all their models included
# in the render. We don't use the original model instances as there
# are some variables that refer to the Apps object.
# FKs/M2Ms from real apps are also not included as they just
# mess things up with partial states (due to lack of dependencies)
self.real_models = []
for app_label in real_apps:
app = global_apps.get_app_config(app_label)
for model in app.get_models():
self.real_models.append(ModelState.from_model(model, exclude_rels=True))
# Populate the app registry with a stub for each application.
app_labels = {model_state.app_label for model_state in models.values()}
app_configs = [
AppConfigStub(label) for label in sorted([*real_apps, *app_labels])
]
super().__init__(app_configs)
# These locks get in the way of copying as implemented in clone(),
# which is called whenever Django duplicates a StateApps before
# updating it.
self._lock = None
self.ready_event = None
self.render_multiple([*models.values(), *self.real_models])
# There shouldn't be any operations pending at this point.
from django.core.checks.model_checks import _check_lazy_references
ignore = (
{make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set()
)
errors = _check_lazy_references(self, ignore=ignore)
if errors:
raise ValueError("\n".join(error.msg for error in errors))
@contextmanager
def bulk_update(self):
# Avoid clearing each model's cache for each change. Instead, clear
# all caches when we're finished updating the model instances.
ready = self.ready
self.ready = False
try:
yield
finally:
self.ready = ready
self.clear_cache()
def render_multiple(self, model_states):
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
if not model_states:
return
# Prevent that all model caches are expired for each render.
with self.bulk_update():
unrendered_models = model_states
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError(
"Cannot resolve bases for %r\nThis can happen if you are "
"inheriting models from an app with migrations (e.g. "
"contrib.auth)\n in an app with no migrations; see "
"https://docs.djangoproject.com/en/%s/topics/migrations/"
"#dependencies for more"
% (new_unrendered_models, get_docs_version())
)
unrendered_models = new_unrendered_models
def clone(self):
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
for app_label in self.app_configs:
app_config = AppConfigStub(app_label)
app_config.apps = clone
app_config.import_models()
clone.app_configs[app_label] = app_config
# No need to actually clone them, they'll never change
clone.real_models = self.real_models
return clone
def register_model(self, app_label, model):
self.all_models[app_label][model._meta.model_name] = model
if app_label not in self.app_configs:
self.app_configs[app_label] = AppConfigStub(app_label)
self.app_configs[app_label].apps = self
self.app_configs[app_label].models[model._meta.model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def unregister_model(self, app_label, model_name):
try:
del self.all_models[app_label][model_name]
del self.app_configs[app_label].models[model_name]
except KeyError:
pass
| StateApps |
python | eventlet__eventlet | eventlet/pools.py | {
"start": 121,
"end": 5971
} | class ____:
"""
Pool class implements resource limitation and construction.
There are two ways of using Pool: passing a `create` argument or
subclassing. In either case you must provide a way to create
the resource.
When using `create` argument, pass a function with no arguments::
http_pool = pools.Pool(create=httplib2.Http)
If you need to pass arguments, build a nullary function with either
`lambda` expression::
http_pool = pools.Pool(create=lambda: httplib2.Http(timeout=90))
or :func:`functools.partial`::
from functools import partial
http_pool = pools.Pool(create=partial(httplib2.Http, timeout=90))
When subclassing, define only the :meth:`create` method
to implement the desired resource::
class MyPool(pools.Pool):
def create(self):
return MyObject()
If using 2.5 or greater, the :meth:`item` method acts as a context manager;
that's the best way to use it::
with mypool.item() as thing:
thing.dostuff()
The maximum size of the pool can be modified at runtime via
the :meth:`resize` method.
Specifying a non-zero *min-size* argument pre-populates the pool with
*min_size* items. *max-size* sets a hard limit to the size of the pool --
it cannot contain any more items than *max_size*, and if there are already
*max_size* items 'checked out' of the pool, the pool will cause any
greenthread calling :meth:`get` to cooperatively yield until an item
is :meth:`put` in.
"""
def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None):
"""*order_as_stack* governs the ordering of the items in the free pool.
If ``False`` (the default), the free items collection (of items that
were created and were put back in the pool) acts as a round-robin,
giving each item approximately equal utilization. If ``True``, the
free pool acts as a FILO stack, which preferentially re-uses items that
have most recently been used.
"""
self.min_size = min_size
self.max_size = max_size
self.order_as_stack = order_as_stack
self.current_size = 0
self.channel = queue.LightQueue(0)
self.free_items = collections.deque()
if create is not None:
self.create = create
for x in range(min_size):
self.current_size += 1
self.free_items.append(self.create())
def get(self):
"""Return an item from the pool, when one is available. This may
cause the calling greenthread to block.
"""
if self.free_items:
return self.free_items.popleft()
self.current_size += 1
if self.current_size <= self.max_size:
try:
created = self.create()
except:
self.current_size -= 1
raise
return created
self.current_size -= 1 # did not create
return self.channel.get()
@contextmanager
def item(self):
""" Get an object out of the pool, for use with with statement.
>>> from eventlet import pools
>>> pool = pools.TokenPool(max_size=4)
>>> with pool.item() as obj:
... print("got token")
...
got token
>>> pool.free()
4
"""
obj = self.get()
try:
yield obj
finally:
self.put(obj)
def put(self, item):
"""Put an item back into the pool, when done. This may
cause the putting greenthread to block.
"""
if self.current_size > self.max_size:
self.current_size -= 1
return
if self.waiting():
try:
self.channel.put(item, block=False)
return
except queue.Full:
pass
if self.order_as_stack:
self.free_items.appendleft(item)
else:
self.free_items.append(item)
def resize(self, new_size):
"""Resize the pool to *new_size*.
Adjusting this number does not affect existing items checked out of
the pool, nor on any greenthreads who are waiting for an item to free
up. Some indeterminate number of :meth:`get`/:meth:`put`
cycles will be necessary before the new maximum size truly matches
the actual operation of the pool.
"""
self.max_size = new_size
def free(self):
"""Return the number of free items in the pool. This corresponds
to the number of :meth:`get` calls needed to empty the pool.
"""
return len(self.free_items) + self.max_size - self.current_size
def waiting(self):
"""Return the number of routines waiting for a pool item.
"""
return max(0, self.channel.getting() - self.channel.putting())
def create(self):
"""Generate a new pool item. In order for the pool to
function, either this method must be overriden in a subclass
or the pool must be constructed with the `create` argument.
It accepts no arguments and returns a single instance of
whatever thing the pool is supposed to contain.
In general, :meth:`create` is called whenever the pool exceeds its
previous high-water mark of concurrently-checked-out-items. In other
words, in a new pool with *min_size* of 0, the very first call
to :meth:`get` will result in a call to :meth:`create`. If the first
caller calls :meth:`put` before some other caller calls :meth:`get`,
then the first item will be returned, and :meth:`create` will not be
called a second time.
"""
raise NotImplementedError("Implement in subclass")
| Pool |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 113845,
"end": 114955
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write(self.xsrf_token)
def get_app_kwargs(self):
return dict(
xsrf_cookies=True, xsrf_cookie_kwargs=dict(httponly=True, expires_days=2)
)
def test_xsrf_httponly(self):
response = self.fetch("/")
self.assertIn("httponly;", response.headers["Set-Cookie"].lower())
self.assertIn("expires=", response.headers["Set-Cookie"].lower())
header = response.headers.get("Set-Cookie")
assert header is not None
match = re.match(".*; expires=(?P<expires>.+);.*", header)
assert match is not None
expires = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(
days=2
)
header_expires = email.utils.parsedate_to_datetime(match.groupdict()["expires"])
if header_expires.tzinfo is None:
header_expires = header_expires.replace(tzinfo=datetime.timezone.utc)
self.assertTrue(abs((expires - header_expires).total_seconds()) < 10)
| XSRFCookieKwargsTest |
python | google__pytype | pytype/pytd/booleq.py | {
"start": 9738,
"end": 17698
} | class ____:
"""Solver for boolean equations.
This solver computes the union of all solutions. I.e. rather than assigning
exactly one value to each variable, it will create a list of values for each
variable: All the values this variable has in any of the solutions.
To accomplish this, we use the following rewriting rules:
[1] (t in X && ...) || (t in Y && ...) --> t in (X | Y)
[2] t in X && t in Y --> t in (X & Y)
Applying these iteratively for each variable in turn ("extracting pivots")
reduces the system to one where we can "read off" the possible values for each
variable.
Attributes:
ANY_VALUE: A special value assigned to variables with no constraints.
variables: A list of all variables.
implications: A nested dictionary mapping variable names to values to
BooleanTerm instances. This is used to specify rules like "if x is 1, then
..."
ground_truth: An equation that needs to always be TRUE. If this is FALSE, or
can be reduced to FALSE, the system is unsolvable.
assignments: The solutions, a mapping of variables to values.
"""
ANY_VALUE = "?"
def __init__(self):
self.variables = set()
self.implications = collections.defaultdict(dict)
self.ground_truth = TRUE
self.assignments = None
def __str__(self):
lines = []
count_false, count_true = 0, 0
if self.ground_truth is not TRUE:
lines.append(f"always: {self.ground_truth}")
for var, value, implication in self._iter_implications():
# only print the "interesting" lines
if implication is FALSE:
count_false += 1
elif implication is TRUE:
count_true += 1
else:
lines.append(f"if {_Eq(var, value)} then {implication}")
return "%s\n(not shown: %d always FALSE, %d always TRUE)\n" % (
"\n".join(lines),
count_false,
count_true,
)
def __repr__(self):
lines = []
for var in self.variables:
lines.append(f"solver.register_variable({var!r})")
if self.ground_truth is not TRUE:
lines.append(f"solver.always_true({self.ground_truth!r})")
for var, value, implication in self._iter_implications():
lines.append(f"solver.implies({_Eq(var, value)!r}, {implication!r})")
return "\n" + "".join(line + "\n" for line in lines)
def register_variable(self, variable):
"""Register a variable. Call before calling solve()."""
self.variables.add(variable)
def always_true(self, formula):
"""Register a ground truth. Call before calling solve()."""
assert formula is not FALSE
self.ground_truth = And([self.ground_truth, formula])
def implies(self, e: BooleanTerm, implication: BooleanTerm) -> None:
"""Register an implication. Call before calling solve()."""
# COV_NF_START
if e is FALSE or e is TRUE:
raise AssertionError("Illegal equation")
# COV_NF_END
assert isinstance(e, _Eq)
assert e.right not in self.implications[e.left]
# Since _Eq sorts its arguments in reverse and variables start with "~"
# (ASCII value 126), e.left should always be the variable.
self.implications[e.left][e.right] = implication
def _iter_implications(self):
for var, value_to_implication in self.implications.items():
for value, implication in value_to_implication.items():
yield (var, value, implication)
def _get_nonfalse_values(self, var):
return {
value
for value, implication in self.implications[var].items()
if implication is not FALSE
}
def _get_first_approximation(self):
"""Get all (variable, value) combinations to consider.
This gets the (variable, value) combinations that the solver needs to
consider based on the equalities that appear in the implications. E.g.,
with the following implication:
t1 = v1 => t1 = t2 | t3 = v2
the combinations to consider are
(t1, v1) because t1 = v1 appears,
(t2, v1) because t1 = t2 and t1 = v1 appear, and
(t3, v2) because t3 = v2 appears.
Returns:
A dictionary D mapping strings (variables) to sets of strings
(values). For two variables t1 and t2, if t1 = t2 is a possible
assignment (by first approximation), then D[t1] and D[t2] point
to the same memory location.
"""
equalities = set(
chain(
implication.extract_equalities()
for (_, _, implication) in self._iter_implications()
)
).union(self.ground_truth.extract_equalities())
var_assignments = {}
value_assignments = {}
for var in self.variables:
var_assignments[var] = {var}
value_assignments[var] = self._get_nonfalse_values(var)
for var, value in equalities:
if value in self.variables:
other_var = value
value_assignments[var] |= value_assignments[other_var]
for var_assignment in var_assignments[other_var]:
var_assignments[var].add(var_assignment)
# Make the two variables point to the same sets of assignments so
# that further possible assignments for either are added to both.
var_assignments[var_assignment] = var_assignments[var]
value_assignments[var_assignment] = value_assignments[var]
else:
value_assignments[var].add(value)
return value_assignments
def _complete(self):
"""Insert missing implications.
Insert all implications needed to have one implication for every
(variable, value) combination returned by _get_first_approximation().
"""
for var, values in self._get_first_approximation().items():
for value in values:
if value not in self.implications[var]:
# Missing implications are typically needed for variable/value
# combinations not considered by the user, e.g. for auxiliary
# variables introduced when setting up the "main" equations.
self.implications[var][value] = TRUE
if not self.implications[var]:
# If a variable does not have any constraints, it can be anything.
self.implications[var][Solver.ANY_VALUE] = TRUE
def solve(self):
"""Solve the system of equations.
Returns:
An assignment, mapping strings (variables) to sets of strings (values).
"""
if self.assignments:
return self.assignments
self._complete()
assignments = {
var: self._get_nonfalse_values(var) for var in self.variables
}
ground_pivots = self.ground_truth.simplify(assignments).extract_pivots(
assignments
)
for pivot, possible_values in ground_pivots.items():
if pivot in assignments:
assignments[pivot] &= set(possible_values)
something_changed = True
while something_changed:
something_changed = False
and_terms = []
for var in self.variables:
or_terms = []
for value in assignments[var].copy():
implication = self.implications[var][value].simplify(assignments)
if implication is FALSE:
# As an example of what kind of code triggers this,
# see TestBoolEq.testFilter
assignments[var].remove(value)
something_changed = True
else:
or_terms.append(implication)
self.implications[var][value] = implication
and_terms.append(Or(or_terms))
d = And(and_terms)
for pivot, possible_values in d.extract_pivots(assignments).items():
if pivot in assignments:
length_before = len(assignments[pivot])
assignments[pivot] &= set(possible_values)
length_after = len(assignments[pivot])
something_changed |= length_before != length_after
self.register_variable = pytd_utils.disabled_function # pylint: disable=g-missing-from-attributes
self.implies = pytd_utils.disabled_function # pylint: disable=g-missing-from-attributes
self.assignments = assignments
return assignments
| Solver |
python | pytest-dev__pytest | src/_pytest/cacheprovider.py | {
"start": 8630,
"end": 10546
} | class ____:
def __init__(self, lfplugin: LFPlugin) -> None:
self.lfplugin = lfplugin
self._collected_at_least_one_failure = False
@hookimpl(wrapper=True)
def pytest_make_collect_report(
self, collector: nodes.Collector
) -> Generator[None, CollectReport, CollectReport]:
res = yield
if isinstance(collector, Session | Directory):
# Sort any lf-paths to the beginning.
lf_paths = self.lfplugin._last_failed_paths
# Use stable sort to prioritize last failed.
def sort_key(node: nodes.Item | nodes.Collector) -> bool:
return node.path in lf_paths
res.result = sorted(
res.result,
key=sort_key,
reverse=True,
)
elif isinstance(collector, File):
if collector.path in self.lfplugin._last_failed_paths:
result = res.result
lastfailed = self.lfplugin.lastfailed
# Only filter with known failures.
if not self._collected_at_least_one_failure:
if not any(x.nodeid in lastfailed for x in result):
return res
self.lfplugin.config.pluginmanager.register(
LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
)
self._collected_at_least_one_failure = True
session = collector.session
result[:] = [
x
for x in result
if x.nodeid in lastfailed
# Include any passed arguments (not trivial to filter).
or session.isinitpath(x.path)
# Keep all sub-collectors.
or isinstance(x, nodes.Collector)
]
return res
| LFPluginCollWrapper |
python | pandas-dev__pandas | pandas/tests/extension/base/printing.py | {
"start": 48,
"end": 1110
} | class ____:
"""Tests checking the formatting of your EA when printed."""
@pytest.mark.parametrize("size", ["big", "small"])
def test_array_repr(self, data, size):
if size == "small":
data = data[:5]
else:
data = type(data)._concat_same_type([data] * 20)
result = repr(data)
assert type(data).__name__ in result
assert f"Length: {len(data)}" in result
assert str(data.dtype) in result
if size == "big":
assert "..." in result
def test_array_repr_unicode(self, data):
result = str(data)
assert isinstance(result, str)
def test_series_repr(self, data):
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
def test_dataframe_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
def test_dtype_name_in_info(self, data):
buf = io.StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
result = buf.getvalue()
assert data.dtype.name in result
| BasePrintingTests |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/embedding_ops.py | {
"start": 3038,
"end": 8956
} | class ____(torch.nn.Module):
r"""
A quantized Embedding module with quantized packed weights as inputs.
We adopt the same interface as `torch.nn.Embedding`, please see
https://pytorch.org/docs/stable/generated/torch.nn.Embedding.html for documentation.
Similar to :class:`~torch.nn.Embedding`, attributes will be randomly
initialized at module creation time and will be overwritten later
Attributes:
weight (Tensor): the non-learnable quantized weights of the module of
shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.
Examples::
>>> m = nn.quantized.Embedding(num_embeddings=10, embedding_dim=12)
>>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8])
>>> output = m(indices)
>>> print(output.size())
torch.Size([9, 12])
"""
_version = 1
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: float = 2.0,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
dtype=torch.quint8,
) -> None:
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.dtype = dtype
if _weight is None:
scales = torch.ones(num_embeddings, dtype=torch.float)
zero_points = torch.zeros(num_embeddings, dtype=torch.float)
qweight = torch._empty_per_channel_affine_quantized(
[num_embeddings, embedding_dim],
scales=scales,
zero_points=zero_points,
axis=0,
dtype=torch.quint8,
)
else:
assert list(_weight.shape) == [
num_embeddings,
embedding_dim,
], "Shape of weight does not match num_embeddings and embedding_dim"
qweight = _weight
self._packed_params = EmbeddingPackedParams(
num_embeddings, embedding_dim, dtype
)
self._packed_params.set_weight(qweight)
def forward(self, indices: Tensor) -> Tensor:
if self.dtype == torch.quint4x2:
return torch.ops.quantized.embedding_4bit(
self._packed_params._packed_weight, indices
)
else:
return torch.ops.quantized.embedding_byte(
self._packed_params._packed_weight, indices
)
def _get_name(self):
return "QuantizedEmbedding"
def __repr__(self):
return _hide_packed_params_repr(self, EmbeddingPackedParams)
def extra_repr(self):
extra_repr_str = (
f"num_embeddings={self.num_embeddings}, embedding_dim={self.embedding_dim}, "
f"dtype={self._packed_params.dtype}, qscheme={self.weight().qscheme()}"
)
return extra_repr_str
def set_weight(self, w: torch.Tensor) -> None:
self._packed_params.set_weight(w)
def weight(self):
return self._packed_params._weight()
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a quantized embedding module from a float module
Args:
mod (Module): a float module, either produced by torch.ao.quantization
utilities or provided by user
"""
if hasattr(mod, "weight_fake_quant"):
assert type(mod) is torch.ao.nn.qat.Embedding, (
"nnq."
+ cls.__name__
+ ".from_float "
+ "with fake quant only works for "
+ torch.ao.nn.qat.Embedding.__name__
)
weight_observer = mod.weight_fake_quant
else:
assert type(mod) is nn.Embedding, (
"nnq."
+ cls.__name__
+ ".from_float only works for "
+ nn.Embedding.__name__
)
assert hasattr(mod, "qconfig"), (
"Embedding input float module must have qconfig defined"
)
from torch.ao.quantization import float_qparams_weight_only_qconfig
if mod.qconfig is not None and mod.qconfig.weight is not None: # type: ignore[union-attr]
weight_observer = mod.qconfig.weight() # type: ignore[union-attr, operator]
else:
weight_observer = float_qparams_weight_only_qconfig.weight()
dtype = weight_observer.dtype
is_float_qparams_qconfig = (
weight_observer.qscheme == torch.per_channel_affine_float_qparams
)
assert is_float_qparams_qconfig, (
"Embedding quantization is only supported with float_qparams_weight_only_qconfig."
)
assert dtype == torch.quint8 or dtype == torch.quint4x2, (
f"The only supported dtype for nnq.Embedding is torch.quint8 and torch.quint4x2, got {dtype}"
)
# Run the observer to calculate qparams.
weight_observer(mod.weight)
qweight = _quantize_weight(mod.weight.float(), weight_observer)
# Create quantized Embedding module and pass in the quantized weight
qembedding = Embedding(mod.num_embeddings, mod.embedding_dim)
qembedding.set_weight(qweight)
return qembedding
@classmethod
def from_reference(cls, ref_embedding):
qembedding = cls(
ref_embedding.num_embeddings,
ref_embedding.embedding_dim,
ref_embedding.padding_idx,
ref_embedding.max_norm,
ref_embedding.norm_type,
ref_embedding.scale_grad_by_freq,
ref_embedding.sparse,
ref_embedding.get_quantized_weight(),
ref_embedding.weight_dtype,
)
return qembedding
| Embedding |
python | ansible__ansible | lib/ansible/module_utils/facts/hardware/aix.py | {
"start": 843,
"end": 11817
} | class ____(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_count
- processor_cores
- processor_threads_per_core
- processor_vcpus
"""
platform = 'AIX'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
dmi_facts = self.get_dmi_facts()
vgs_facts = self.get_vgs_facts()
mount_facts = self.get_mount_facts()
devices_facts = self.get_device_facts()
uptime_facts = self.get_uptime_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(dmi_facts)
hardware_facts.update(vgs_facts)
hardware_facts.update(mount_facts)
hardware_facts.update(devices_facts)
hardware_facts.update(uptime_facts)
return hardware_facts
def get_cpu_facts(self):
cpu_facts = {}
cpu_facts['processor'] = []
# FIXME: not clear how to detect multi-sockets
cpu_facts['processor_count'] = 1
rc, out, err = self.module.run_command(
"/usr/sbin/lsdev -Cc processor"
)
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
cpu_facts['processor_cores'] = int(i)
rc, out, err = self.module.run_command(
"/usr/sbin/lsattr -El " + cpudev + " -a type"
)
data = out.split(' ')
cpu_facts['processor'] = [data[1]]
cpu_facts['processor_threads_per_core'] = 1
rc, out, err = self.module.run_command(
"/usr/sbin/lsattr -El " + cpudev + " -a smt_threads"
)
if out:
data = out.split(' ')
cpu_facts['processor_threads_per_core'] = int(data[1])
cpu_facts['processor_vcpus'] = (
cpu_facts['processor_cores'] * cpu_facts['processor_threads_per_core']
)
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
memory_facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
memory_facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
memory_facts['swaptotal_mb'] = swaptotal_mb
memory_facts['swapfree_mb'] = int(swaptotal_mb * (100 - percused) / 100)
return memory_facts
def get_uptime_facts(self):
uptime_facts = {}
# On AIX, there are no options to get the uptime directly in seconds.
# Your options are to parse the output of "who", "uptime", or "ps".
# Only "ps" always provides a field with seconds.
ps_bin = self.module.get_bin_path("ps")
if ps_bin is None:
return uptime_facts
ps_cmd = [ps_bin, "-p", "1", "-o", "etime="]
rc, out, err = self.module.run_command(ps_cmd)
if rc != 0:
return uptime_facts
# Parse out
if out:
lines = out.splitlines()
data = lines[0].replace(':', '-').split('-')
try:
days = int(data[0])
hours = int(data[1])
minutes = int(data[2])
seconds = int(data[3])
except (IndexError, ValueError):
return uptime_facts
# Calculate uptime in seconds
uptime_seconds = (days * 86400) + (hours * 3600) + (minutes * 60) + seconds
uptime_facts['uptime_seconds'] = int(uptime_seconds)
return uptime_facts
def get_dmi_facts(self):
dmi_facts = {}
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
dmi_facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
dmi_facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
dmi_facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
dmi_facts['product_name'] = data[1].strip()
return dmi_facts
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
vgs_facts = {}
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s -o | %s %s -p" % (lsvg_path, xargs_path, lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc == 0 and out:
vgs_facts['vgs'] = {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
vgs_facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path, m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)', out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*', m.group(0)):
pv_info = {'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
vgs_facts['vgs'][m.group(1)].append(pv_info)
return vgs_facts
def get_mount_facts(self):
mount_facts = {}
mount_facts['mounts'] = []
mounts = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
if mount_path:
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
mount = fields[1]
mount_info = {'mount': mount,
'device': fields[0],
'fstype': fields[2],
'options': fields[6],
'time': '%s %s %s' % (fields[3], fields[4], fields[5])}
mount_info.update(get_mount_size(mount))
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
mount_info = {'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype': fields[3],
'options': fields[7],
'time': '%s %s %s' % (fields[4], fields[5], fields[6])}
mounts.append(mount_info)
mount_facts['mounts'] = mounts
return mount_facts
def get_device_facts(self):
device_facts = {}
device_facts['devices'] = {}
lsdev_cmd = self.module.get_bin_path('lsdev')
lsattr_cmd = self.module.get_bin_path('lsattr')
if lsdev_cmd and lsattr_cmd:
rc, out_lsdev, err = self.module.run_command(lsdev_cmd)
for line in out_lsdev.splitlines():
field = line.split()
device_attrs = {}
device_name = field[0]
device_state = field[1]
device_type = field[2:]
lsattr_cmd_args = [lsattr_cmd, '-E', '-l', device_name]
rc, out_lsattr, err = self.module.run_command(lsattr_cmd_args)
for attr in out_lsattr.splitlines():
attr_fields = attr.split()
attr_name = attr_fields[0]
attr_parameter = attr_fields[1]
device_attrs[attr_name] = attr_parameter
device_facts['devices'][device_name] = {
'state': device_state,
'type': ' '.join(device_type),
'attributes': device_attrs
}
return device_facts
| AIXHardware |
python | django__django | tests/admin_inlines/admin.py | {
"start": 6880,
"end": 6948
} | class ____(admin.TabularInline):
model = SottoCapo
| SottoCapoInline |
python | jina-ai__jina | jina/serve/runtimes/gateway/health_model.py | {
"start": 477,
"end": 732
} | class ____(BaseModel):
"""Pydantic BaseModel for Jina status, used as the response model in REST app."""
jina: Dict
envs: Dict
class Config:
alias_generator = _to_camel_case
allow_population_by_field_name = True
| JinaInfoModel |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/agent.py | {
"start": 1049,
"end": 1342
} | class ____(BaseEvent):
"""
AgentChatWithStepStartEvent.
Args:
user_msg (str): User input message.
"""
user_msg: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentChatWithStepStartEvent"
| AgentChatWithStepStartEvent |
python | pytorch__pytorch | test/test_serialization.py | {
"start": 3772,
"end": 4256
} | class ____(torch.Tensor):
@staticmethod
def __new__(cls, elem, **kwargs):
assert elem.dtype is torch.uint8
assert not kwargs.get("requires_grad", False)
kwargs["requires_grad"] = False
return torch.Tensor._make_wrapper_subclass(cls, up_size(elem.shape), dtype=torch.uint4, **kwargs)
def __init__(self, elem):
self.elem = elem
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs=None):
pass
| UInt4Tensor |
python | pydata__xarray | asv_bench/benchmarks/indexing.py | {
"start": 2306,
"end": 3071
} | class ____:
def setup(self, key):
self.ds = xr.Dataset(
{
"var1": (("x", "y"), randn((nx, ny), frac_nan=0.1)),
"var2": (("x", "t"), randn((nx, nt))),
"var3": (("t",), randn(nt)),
},
coords={
"x": np.arange(nx),
"y": np.linspace(0, 1, ny),
"t": pd.date_range("1970-01-01", periods=nt, freq="D"),
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
# Benchmark how indexing is slowed down by adding many scalar variable
# to the dataset
# https://github.com/pydata/xarray/pull/9003
self.ds_large = self.ds.merge({f"extra_var{i}": i for i in range(400)})
| Base |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 117471,
"end": 117794
} | class ____(sgqlc.types.Enum):
"""Properties by which team member connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order team members by creation time
* `LOGIN`: Order team members by login
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT", "LOGIN")
| TeamMemberOrderField |
python | tensorflow__tensorflow | tensorflow/compiler/tests/runtime_shape_check_test.py | {
"start": 1118,
"end": 2624
} | class ____(xla_test.XLATestCase):
def testUniqueDifferentSizes(self):
"""Test that we correctly check for shape mismatches at runtime."""
if 'tpu' in self.device.lower():
self.skipTest('We do not check shapes on TPU')
with ops.device(f'device:{self.device}:0'):
@def_function.function(jit_compile=True)
def f(x, y):
return array_ops.unique(x).y + array_ops.unique(y).y
f(constant_op.constant([3.1, 3.2]), constant_op.constant([3.3, 3.2]))
with self.assertRaisesRegex(errors.InternalError, 'different size'):
f(
constant_op.constant([3.1, 3.2]),
constant_op.constant([3.1, 3.2, 3.3]))
def testWhereOpDifferentSizes(self):
"""Test shape mismatches with multiple dimensions."""
if 'tpu' in self.device.lower():
self.skipTest('We do not check shapes on TPU')
with ops.device(f'device:{self.device}:0'):
@def_function.function(jit_compile=True)
def f(x, y):
return array_ops.where(x) + array_ops.where(y)
f(
constant_op.constant([[3.1, 3.2, 0], [3.1, 3.2, 0]]),
constant_op.constant([[3.3, 3.2, 0, 0, 0], [3.3, 3.2, 0, 0, 0]]))
with self.assertRaisesRegex(errors.InternalError, 'different size'):
f(
constant_op.constant([[3.1, 3.2, 0], [3.1, 3.2, 0]]),
constant_op.constant([[3.3, 3.2, 0, 0, 0], [3.3, 3.2, 3.3, 0, 0]]))
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| RuntimeShapeCheckTest |
python | chroma-core__chroma | chromadb/api/collection_configuration.py | {
"start": 626,
"end": 890
} | class ____(TypedDict, total=False):
search_nprobe: int
write_nprobe: int
space: Space
ef_construction: int
ef_search: int
max_neighbors: int
reassign_neighbor_count: int
split_threshold: int
merge_threshold: int
| SpannConfiguration |
python | sqlalchemy__sqlalchemy | test/orm/test_relationship_criteria.py | {
"start": 84336,
"end": 90461
} | class ____(fixtures.DeclarativeMappedTest):
"""test #10223"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Temperature(Base):
__tablename__ = "temperature"
id: Mapped[int] = mapped_column(primary_key=True)
pointless_flag: Mapped[bool]
class Color(Base):
__tablename__ = "color"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column(String(50))
temperature_id: Mapped[int] = mapped_column(
ForeignKey("temperature.id")
)
temperature: Mapped[Temperature] = relationship()
room_connections = Table(
"room_connections",
Base.metadata,
Column(
"room_a_id",
Integer,
# mariadb does not like this FK constraint
# ForeignKey("room.id"),
primary_key=True,
),
Column(
"room_b_id",
Integer,
# mariadb does not like this FK constraint
# ForeignKey("room.id"),
primary_key=True,
),
)
class Room(Base):
__tablename__ = "room"
id: Mapped[int] = mapped_column(primary_key=True)
token: Mapped[str] = mapped_column(String(50))
color_id: Mapped[int] = mapped_column(ForeignKey("color.id"))
color: Mapped[Color] = relationship()
connected_rooms: Mapped[List["Room"]] = relationship( # noqa: F821
secondary=room_connections,
primaryjoin=id == room_connections.c.room_a_id,
secondaryjoin=id == room_connections.c.room_b_id,
)
@classmethod
def insert_data(cls, connection):
Room, Temperature, Color = cls.classes("Room", "Temperature", "Color")
with Session(connection) as session:
warm = Temperature(pointless_flag=True)
cool = Temperature(pointless_flag=True)
session.add_all([warm, cool])
red = Color(name="red", temperature=warm)
orange = Color(name="orange", temperature=warm)
blue = Color(name="blue", temperature=cool)
green = Color(name="green", temperature=cool)
session.add_all([red, orange, blue, green])
red1 = Room(token="Red-1", color=red)
red2 = Room(token="Red-2", color=red)
orange2 = Room(token="Orange-2", color=orange)
blue1 = Room(token="Blue-1", color=blue)
blue2 = Room(token="Blue-2", color=blue)
green1 = Room(token="Green-1", color=green)
red1.connected_rooms = [red2, blue1, green1]
red2.connected_rooms = [red1, blue2, orange2]
blue1.connected_rooms = [red1, blue2, green1]
blue2.connected_rooms = [red2, blue1, orange2]
session.add_all([red1, red2, blue1, blue2, green1, orange2])
session.commit()
@testing.variation(
"join_on_relationship", ["alone", "with_and", "no", "omit"]
)
def test_selectinload(self, join_on_relationship):
Room, Temperature, Color = self.classes("Room", "Temperature", "Color")
similar_color = aliased(Color)
subquery = (
select(Color.id)
.join(
similar_color,
similar_color.temperature_id == Color.temperature_id,
)
.where(similar_color.name == "red")
)
if join_on_relationship.alone:
subquery = subquery.join(Color.temperature).where(
Temperature.pointless_flag == True
)
elif join_on_relationship.with_and:
subquery = subquery.join(
Color.temperature.and_(Temperature.pointless_flag == True)
)
elif join_on_relationship.no:
subquery = subquery.join(
Temperature, Color.temperature_id == Temperature.id
).where(Temperature.pointless_flag == True)
elif join_on_relationship.omit:
pass
else:
join_on_relationship.fail()
session = fixture_session()
room_result = session.scalars(
select(Room)
.order_by(Room.id)
.join(Room.color.and_(Color.name == "red"))
.options(
selectinload(
Room.connected_rooms.and_(Room.color_id.in_(subquery))
)
)
).unique()
self._assert_result(room_result)
def test_contains_eager(self):
Room, Temperature, Color = self.classes("Room", "Temperature", "Color")
similar_color = aliased(Color)
subquery = (
select(Color.id)
.join(
similar_color,
similar_color.temperature_id == Color.temperature_id,
)
.join(Color.temperature.and_(Temperature.pointless_flag == True))
.where(similar_color.name == "red")
)
room_alias = aliased(Room)
session = fixture_session()
room_result = session.scalars(
select(Room)
.order_by(Room.id, room_alias.id)
.join(Room.color.and_(Color.name == "red"))
.join(
room_alias,
Room.connected_rooms.of_type(room_alias).and_(
room_alias.color_id.in_(subquery)
),
)
.options(contains_eager(Room.connected_rooms.of_type(room_alias)))
).unique()
self._assert_result(room_result)
def _assert_result(self, room_result):
eq_(
[
(
each_room.token,
[room.token for room in each_room.connected_rooms],
)
for each_room in room_result
],
[
("Red-1", ["Red-2"]),
("Red-2", ["Red-1", "Orange-2"]),
],
)
| SubqueryCriteriaTest |
python | sphinx-doc__sphinx | sphinx/search/__init__.py | {
"start": 5212,
"end": 6428
} | class ____:
"""The search index as JavaScript file that calls a function
on the documentation search object to register the index.
"""
PREFIX = 'Search.setIndex('
SUFFIX = ')'
def dumps(self, data: Any) -> str:
data_json = json.dumps(data, separators=(',', ':'), sort_keys=True)
return self.PREFIX + data_json + self.SUFFIX
def loads(self, s: str) -> Any:
data = s[len(self.PREFIX) : -len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not s.endswith(self.SUFFIX):
msg = 'invalid data'
raise ValueError(msg)
return json.loads(data)
def dump(self, data: Any, f: _WritableStream[str]) -> None:
f.write(self.dumps(data))
def load(self, f: _ReadableStream[str]) -> Any:
return self.loads(f.read())
js_index = _JavaScriptIndex()
def _is_meta_keywords(
node: nodes.meta,
lang: str | None,
) -> bool:
if node.get('name') == 'keywords':
meta_lang = node.get('lang')
if meta_lang is None or meta_lang == lang:
# lang not specified or matched to html_search_language
return True
return False
@dataclasses.dataclass
| _JavaScriptIndex |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/api_endpoints/test_auth.py | {
"start": 1262,
"end": 1744
} | class ____:
@pytest.fixture(autouse=True)
def set_attrs(self, minimal_app_for_auth_api):
self.app = minimal_app_for_auth_api
sm = self.app.appbuilder.sm
delete_user(self.app, "test")
role_admin = sm.find_role("Admin")
sm.add_user(
username="test",
first_name="test",
last_name="test",
email="test@fab.org",
role=role_admin,
password="test",
)
| BaseTestAuth |
python | django__django | tests/select_for_update/models.py | {
"start": 138,
"end": 201
} | class ____(Country):
join_date = models.DateField()
| EUCountry |
python | doocs__leetcode | solution/1500-1599/1563.Stone Game V/Solution.py | {
"start": 63,
"end": 908
} | class ____:
def stoneGameV(self, stoneValue: List[int]) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= j:
return 0
ans = l = 0
r = s[j + 1] - s[i]
for k in range(i, j):
l += stoneValue[k]
r -= stoneValue[k]
if l < r:
if ans >= l * 2:
continue
ans = max(ans, l + dfs(i, k))
elif l > r:
if ans >= r * 2:
break
ans = max(ans, r + dfs(k + 1, j))
else:
ans = max(ans, max(l + dfs(i, k), r + dfs(k + 1, j)))
return ans
s = list(accumulate(stoneValue, initial=0))
return dfs(0, len(stoneValue) - 1)
| Solution |
python | ray-project__ray | python/ray/dashboard/modules/job/job_agent.py | {
"start": 692,
"end": 7832
} | class ____(dashboard_utils.DashboardAgentModule):
def __init__(self, dashboard_agent):
super().__init__(dashboard_agent)
self._job_manager = None
@routes.post("/api/job_agent/jobs/")
@optional_utils.deny_browser_requests()
@optional_utils.init_ray_and_catch_exceptions()
async def submit_job(self, req: Request) -> Response:
result = await parse_and_validate_request(req, JobSubmitRequest)
# Request parsing failed, returned with Response object.
if isinstance(result, Response):
return result
else:
submit_request = result
request_submission_id = submit_request.submission_id or submit_request.job_id
try:
ray._common.usage.usage_lib.record_library_usage("job_submission")
submission_id = await self.get_job_manager().submit_job(
entrypoint=submit_request.entrypoint,
submission_id=request_submission_id,
runtime_env=submit_request.runtime_env,
metadata=submit_request.metadata,
entrypoint_num_cpus=submit_request.entrypoint_num_cpus,
entrypoint_num_gpus=submit_request.entrypoint_num_gpus,
entrypoint_memory=submit_request.entrypoint_memory,
entrypoint_resources=submit_request.entrypoint_resources,
)
resp = JobSubmitResponse(job_id=submission_id, submission_id=submission_id)
except (TypeError, ValueError):
return Response(
text=traceback.format_exc(),
status=aiohttp.web.HTTPBadRequest.status_code,
)
except Exception:
return Response(
text=traceback.format_exc(),
status=aiohttp.web.HTTPInternalServerError.status_code,
)
return Response(
text=json.dumps(dataclasses.asdict(resp)),
content_type="application/json",
status=aiohttp.web.HTTPOk.status_code,
)
@routes.post("/api/job_agent/jobs/{job_or_submission_id}/stop")
@optional_utils.deny_browser_requests()
@optional_utils.init_ray_and_catch_exceptions()
async def stop_job(self, req: Request) -> Response:
job_or_submission_id = req.match_info["job_or_submission_id"]
job = await find_job_by_ids(
self._dashboard_agent.gcs_client,
self.get_job_manager().job_info_client(),
job_or_submission_id,
)
if not job:
return Response(
text=f"Job {job_or_submission_id} does not exist",
status=aiohttp.web.HTTPNotFound.status_code,
)
if job.type is not JobType.SUBMISSION:
return Response(
text="Can only stop submission type jobs",
status=aiohttp.web.HTTPBadRequest.status_code,
)
try:
stopped = self.get_job_manager().stop_job(job.submission_id)
resp = JobStopResponse(stopped=stopped)
except Exception:
return Response(
text=traceback.format_exc(),
status=aiohttp.web.HTTPInternalServerError.status_code,
)
return Response(
text=json.dumps(dataclasses.asdict(resp)), content_type="application/json"
)
@routes.delete("/api/job_agent/jobs/{job_or_submission_id}")
@optional_utils.init_ray_and_catch_exceptions()
async def delete_job(self, req: Request) -> Response:
job_or_submission_id = req.match_info["job_or_submission_id"]
job = await find_job_by_ids(
self._dashboard_agent.gcs_client,
self.get_job_manager().job_info_client(),
job_or_submission_id,
)
if not job:
return Response(
text=f"Job {job_or_submission_id} does not exist",
status=aiohttp.web.HTTPNotFound.status_code,
)
if job.type is not JobType.SUBMISSION:
return Response(
text="Can only delete submission type jobs",
status=aiohttp.web.HTTPBadRequest.status_code,
)
try:
deleted = await self.get_job_manager().delete_job(job.submission_id)
resp = JobDeleteResponse(deleted=deleted)
except Exception:
return Response(
text=traceback.format_exc(),
status=aiohttp.web.HTTPInternalServerError.status_code,
)
return Response(
text=json.dumps(dataclasses.asdict(resp)), content_type="application/json"
)
@routes.get("/api/job_agent/jobs/{job_or_submission_id}/logs")
@optional_utils.init_ray_and_catch_exceptions()
async def get_job_logs(self, req: Request) -> Response:
job_or_submission_id = req.match_info["job_or_submission_id"]
job = await find_job_by_ids(
self._dashboard_agent.gcs_client,
self.get_job_manager().job_info_client(),
job_or_submission_id,
)
if not job:
return Response(
text=f"Job {job_or_submission_id} does not exist",
status=aiohttp.web.HTTPNotFound.status_code,
)
if job.type is not JobType.SUBMISSION:
return Response(
text="Can only get logs of submission type jobs",
status=aiohttp.web.HTTPBadRequest.status_code,
)
resp = JobLogsResponse(
logs=self.get_job_manager().get_job_logs(job.submission_id)
)
return Response(
text=json.dumps(dataclasses.asdict(resp)), content_type="application/json"
)
@routes.get("/api/job_agent/jobs/{job_or_submission_id}/logs/tail")
@optional_utils.init_ray_and_catch_exceptions()
async def tail_job_logs(self, req: Request) -> Response:
job_or_submission_id = req.match_info["job_or_submission_id"]
job = await find_job_by_ids(
self._dashboard_agent.gcs_client,
self.get_job_manager().job_info_client(),
job_or_submission_id,
)
if not job:
return Response(
text=f"Job {job_or_submission_id} does not exist",
status=aiohttp.web.HTTPNotFound.status_code,
)
if job.type is not JobType.SUBMISSION:
return Response(
text="Can only get logs of submission type jobs",
status=aiohttp.web.HTTPBadRequest.status_code,
)
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(req)
async for lines in self._job_manager.tail_job_logs(job.submission_id):
await ws.send_str(lines)
return ws
def get_job_manager(self):
if not self._job_manager:
self._job_manager = JobManager(
self._dashboard_agent.gcs_client, self._dashboard_agent.log_dir
)
return self._job_manager
async def run(self, server):
pass
@staticmethod
def is_minimal_module():
return False
| JobAgent |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/json_viewer.py | {
"start": 3478,
"end": 7113
} | class ____(MetaflowCardComponent):
"""
A component for displaying YAML data with syntax highlighting and collapsible sections.
This component provides a rich view of YAML data with proper formatting and syntax highlighting.
Example:
```python
from metaflow.cards import YAMLViewer
from metaflow import current
data = {
"database": {
"host": "localhost",
"port": 5432,
"credentials": {"username": "admin", "password": "secret"}
},
"features": ["auth", "logging", "monitoring"]
}
yaml_viewer = YAMLViewer(data, collapsible=True)
current.card.append(yaml_viewer)
```
Parameters
----------
data : Any
The data to display as YAML. Will be serialized to YAML format.
collapsible : bool, default True
Whether to make the YAML viewer collapsible.
max_height : str, optional
Maximum height for the viewer (CSS value like "300px" or "20rem").
show_copy_button : bool, default True
Whether to show a copy-to-clipboard button.
"""
type = "yamlViewer"
REALTIME_UPDATABLE = True
def __init__(
self,
data: Any,
collapsible: bool = True,
max_height: Optional[str] = None,
show_copy_button: bool = True,
title: Optional[str] = None,
):
self._data = data
self._collapsible = collapsible
self._max_height = max_height
self._show_copy_button = show_copy_button
self._title = title
def update(self, data: Any):
"""
Update the YAML data.
Parameters
----------
data : Any
New data to display as YAML.
"""
self._data = data
def _to_yaml_string(self, data: Any) -> str:
"""
Convert data to YAML string format using vendored YAML module.
"""
try:
if isinstance(data, str):
# Try to parse as JSON first, then convert to YAML
try:
import json
parsed = json.loads(data)
yaml_result = yaml.dump(
parsed, default_flow_style=False, indent=2, sort_keys=False
)
return (
str(yaml_result)
if yaml_result is not None
else "# Empty YAML result"
)
except json.JSONDecodeError:
# If not JSON, return as-is
return data
else:
yaml_result = yaml.dump(
data, default_flow_style=False, indent=2, sort_keys=False
)
return (
str(yaml_result)
if yaml_result is not None
else "# Empty YAML result"
)
except Exception as e:
# Fallback to JSON on any error
import json
return f"# Error converting to YAML: {str(e)}\n{json.dumps(data, indent=2, default=str)}"
@with_default_component_id
@render_safely
def render(self):
yaml_string = self._to_yaml_string(self._data)
data = {
"type": self.type,
"id": self.component_id,
"yaml_string": yaml_string,
"collapsible": self._collapsible,
"show_copy_button": self._show_copy_button,
"title": self._title or "YAML",
}
if self._max_height:
data["max_height"] = self._max_height
return data
| YAMLViewer |
python | getsentry__sentry-python | tests/integrations/celery/integration_tests/__init__.py | {
"start": 143,
"end": 1499
} | class ____(Scheduler):
"""
A custom scheduler that starts tasks immediately after starting Celery beat.
"""
def setup_schedule(self):
super().setup_schedule()
for _, entry in self.schedule.items():
self.apply_entry(entry)
def tick(self):
# Override tick to prevent the normal schedule cycle
return 1
def kill_beat(beat_pid_file, delay_seconds=1):
"""
Terminates Celery Beat after the given `delay_seconds`.
"""
logger.info("Starting Celery Beat killer...")
time.sleep(delay_seconds)
pid = int(open(beat_pid_file, "r").read())
logger.info("Terminating Celery Beat...")
os.kill(pid, signal.SIGTERM)
def run_beat(celery_app, runtime_seconds=1, loglevel="warning", quiet=True):
"""
Run Celery Beat that immediately starts tasks.
The Celery Beat instance is automatically terminated after `runtime_seconds`.
"""
logger.info("Starting Celery Beat...")
pid_file = os.path.join(tempfile.mkdtemp(), f"celery-beat-{os.getpid()}.pid")
t = threading.Thread(
target=kill_beat,
args=(pid_file,),
kwargs={"delay_seconds": runtime_seconds},
)
t.start()
beat_instance = celery_app.Beat(
loglevel=loglevel,
quiet=quiet,
pidfile=pid_file,
)
beat_instance.run()
| ImmediateScheduler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/hashability2.py | {
"start": 360,
"end": 397
} | class ____:
__hash__: None = None
| C |
python | wepe__MachineLearning | DeepLearning Tutorials/FaceRecognition_CNN(olivettifaces)/use_CNN_olivettifaces.py | {
"start": 2213,
"end": 2656
} | class ____(object):
def __init__(self, input, params_W,params_b, n_in, n_out,
activation=T.tanh):
self.input = input
self.W = params_W
self.b = params_b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
self.params = [self.W, self.b]
#卷积+采样层(conv+maxpooling)
| HiddenLayer |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 43058,
"end": 43559
} | class ____(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Django/Jinja'
aliases = ['css+django', 'css+jinja']
alias_filenames = ['*.css']
mimetypes = ['text/css+django', 'text/css+jinja']
def __init__(self, **options):
super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
| CssDjangoLexer |
python | huggingface__transformers | src/transformers/models/dinov2/modeling_dinov2.py | {
"start": 13643,
"end": 14355
} | class ____(nn.Module):
def __init__(self, config) -> None:
super().__init__()
in_features = out_features = config.hidden_size
hidden_features = int(config.hidden_size * config.mlp_ratio)
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.weights_in(hidden_state)
x1, x2 = hidden_state.chunk(2, dim=-1)
hidden = nn.functional.silu(x1) * x2
return self.weights_out(hidden)
| Dinov2SwiGLUFFN |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 14416,
"end": 16114
} | class ____(themeable):
"""
Make themeable also accept a sequence to values
This makes it possible to apply a different style value similar artists.
e.g.
theme(axis_text_x=element_text(color=("red", "green", "blue")))
The number of values in the list must match the number of objects
targeted by the themeable..
"""
def set(
self, artists: Sequence[Artist], props: Optional[dict[str, Any]] = None
):
if props is None:
props = self.properties
n = len(artists)
sequence_props = {}
for name, value in props.items():
if (
isinstance(value, (list, tuple, np.ndarray))
and len(value) == n
):
sequence_props[name] = value
for key in sequence_props:
del props[key]
for a in artists:
a.set(**props)
for name, values in sequence_props.items():
for a, value in zip(artists, values):
a.set(**{name: value})
def blend_alpha(
properties: dict[str, Any], key: str = "color"
) -> dict[str, Any]:
"""
Blend color with alpha
When setting color property values of matplotlib objects,
for a color with an alpha channel, we don't want the alpha
property if any to have any effect on that color.
"""
if (color := properties.get(key)) is not None:
if "alpha" in properties:
properties[key] = to_rgba(color, properties["alpha"])
properties["alpha"] = None
elif has_alpha_channel(color):
properties["alpha"] = None
return properties
# element_text themeables
| MixinSequenceOfValues |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 74154,
"end": 77453
} | class ____(_BlendedMixin, Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to transform the
*x*-axis and *y_transform* to transform the *y*-axis.
You will generally not call this constructor directly but use the
`blended_transform_factory` function instead, which can determine
automatically which kind of blended transform to create.
"""
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
@property
def depth(self):
return max(self._x.depth, self._y.depth)
def contains_branch(self, other):
# A blended transform cannot possibly contain a branch from two
# different transforms.
return False
is_affine = property(lambda self: self._x.is_affine and self._y.is_affine)
has_inverse = property(
lambda self: self._x.has_inverse and self._y.has_inverse)
def frozen(self):
# docstring inherited
return blended_transform_factory(self._x.frozen(), self._y.frozen())
def transform_non_affine(self, values):
# docstring inherited
if self._x.is_affine and self._y.is_affine:
return values
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(values)
if x.input_dims == 2:
x_points = x.transform_non_affine(values)[:, 0:1]
else:
x_points = x.transform_non_affine(values[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(values)[:, 1:]
else:
y_points = y.transform_non_affine(values[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if (isinstance(x_points, np.ma.MaskedArray) or
isinstance(y_points, np.ma.MaskedArray)):
return np.ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
def inverted(self):
# docstring inherited
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
def get_affine(self):
# docstring inherited
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# We already know the transforms are separable, so we can skip
# setting b and c to zero.
mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
| BlendedGenericTransform |
python | spack__spack | lib/spack/spack/solver/input_analysis.py | {
"start": 15142,
"end": 16107
} | class ____(Counter):
def _compute_cache_values(self) -> None:
self._possible_dependencies, virtuals, _ = self.possible_graph.possible_dependencies(
*self.specs, allowed_deps=self.all_types
)
self._possible_virtuals.update(virtuals)
def possible_packages_facts(self, gen: "spack.solver.asp.ProblemInstanceBuilder", fn) -> None:
gen.h2("Maximum number of nodes (packages)")
for package_name in sorted(self.possible_dependencies()):
gen.fact(fn.max_dupes(package_name, 1))
gen.newline()
gen.h2("Maximum number of nodes (virtual packages)")
for package_name in sorted(self.possible_virtuals()):
gen.fact(fn.max_dupes(package_name, 1))
gen.newline()
gen.h2("Possible package in link-run subDAG")
for name in sorted(self.possible_dependencies()):
gen.fact(fn.possible_in_link_run(name))
gen.newline()
| NoDuplicatesCounter |
python | huggingface__transformers | src/transformers/models/blt/configuration_blt.py | {
"start": 2874,
"end": 4888
} | class ____(PreTrainedConfig):
"""
Configuration class for the Blt Local Decoder component.
"""
model_type = "blt_local_decoder"
default_theta = 500000.0
def __init__(
self,
vocab_size: Optional[int] = 260,
cross_attn_all_layers: Optional[bool] = True,
cross_attn_k: Optional[int] = 2,
hidden_size_global: Optional[int] = 2048,
hidden_size: Optional[int] = 1024,
num_attention_heads: Optional[int] = 16,
num_key_value_heads: Optional[int] = None,
num_hidden_layers: Optional[int] = 9,
rms_norm_eps: Optional[float] = 1e-5,
dropout: Optional[float] = 0.0,
max_position_embeddings: Optional[int] = 24576,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
hidden_act: Optional[str] = "silu",
intermediate_size: Optional[int] = 2816,
initializer_range: Optional[float] = 0.02,
**kwargs,
):
self.vocab_size = vocab_size
self.cross_attn_all_layers = cross_attn_all_layers
self.cross_attn_k = cross_attn_k
self.hidden_size_global = hidden_size_global
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads or num_attention_heads
self.head_dim = hidden_size // num_attention_heads
self.intermediate_size = intermediate_size or int(8 * hidden_size / 3)
self.num_hidden_layers = num_hidden_layers
self.rms_norm_eps = rms_norm_eps
self.dropout = dropout
self.max_position_embeddings = max_position_embeddings
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rope_parameters = rope_parameters
# Remove tie_word_embeddings from kwargs to avoid duplicate parameter error
kwargs.pop("tie_word_embeddings", None)
super().__init__(**kwargs, tie_word_embeddings=False)
| BltLocalDecoderConfig |
python | pytest-dev__pytest | testing/test_cacheprovider.py | {
"start": 9385,
"end": 39068
} | class ____:
def test_lastfailed_usecase(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.setattr("sys.dont_write_bytecode", True)
p = pytester.makepyfile(
"""
def test_1(): assert 0
def test_2(): assert 0
def test_3(): assert 1
"""
)
result = pytester.runpytest(str(p))
result.stdout.fnmatch_lines(["*2 failed*"])
p = pytester.makepyfile(
"""
def test_1(): assert 1
def test_2(): assert 1
def test_3(): assert 0
"""
)
result = pytester.runpytest(str(p), "--lf")
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"run-last-failure: rerun previous 2 failures",
"*= 2 passed, 1 deselected in *",
]
)
result = pytester.runpytest(str(p), "--lf")
result.stdout.fnmatch_lines(
[
"collected 3 items",
"run-last-failure: no previously failed tests, not deselecting items.",
"*1 failed*2 passed*",
]
)
pytester.path.joinpath(".pytest_cache", ".git").mkdir(parents=True)
result = pytester.runpytest(str(p), "--lf", "--cache-clear")
result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
assert pytester.path.joinpath(".pytest_cache", "README.md").is_file()
assert pytester.path.joinpath(".pytest_cache", ".git").is_dir()
# Run this again to make sure clear-cache is robust
if os.path.isdir(".pytest_cache"):
shutil.rmtree(".pytest_cache")
result = pytester.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
def test_failedfirst_order(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_a="def test_always_passes(): pass",
test_b="def test_always_fails(): assert 0",
)
result = pytester.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines(["test_a.py*", "test_b.py*"])
result = pytester.runpytest("--ff")
# Test order will be failing tests first
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: rerun previous 1 failure first",
"test_b.py*",
"test_a.py*",
]
)
def test_lastfailed_failedfirst_order(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_a="def test_always_passes(): assert 1",
test_b="def test_always_fails(): assert 0",
)
result = pytester.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines(["test_a.py*", "test_b.py*"])
result = pytester.runpytest("--lf", "--ff")
# Test order will be failing tests first
result.stdout.fnmatch_lines(["test_b.py*"])
result.stdout.no_fnmatch_line("*test_a.py*")
def test_lastfailed_difference_invocations(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.setattr("sys.dont_write_bytecode", True)
pytester.makepyfile(
test_a="""
def test_a1(): assert 0
def test_a2(): assert 1
""",
test_b="def test_b1(): assert 0",
)
p = pytester.path.joinpath("test_a.py")
p2 = pytester.path.joinpath("test_b.py")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 failed*"])
result = pytester.runpytest("--lf", p2)
result.stdout.fnmatch_lines(["*1 failed*"])
pytester.makepyfile(test_b="def test_b1(): assert 1")
result = pytester.runpytest("--lf", p2)
result.stdout.fnmatch_lines(["*1 passed*"])
result = pytester.runpytest("--lf", p)
result.stdout.fnmatch_lines(
[
"collected 2 items / 1 deselected / 1 selected",
"run-last-failure: rerun previous 1 failure",
"*= 1 failed, 1 deselected in *",
]
)
def test_lastfailed_usecase_splice(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.setattr("sys.dont_write_bytecode", True)
pytester.makepyfile(
"def test_1(): assert 0", test_something="def test_2(): assert 0"
)
p2 = pytester.path.joinpath("test_something.py")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 failed*"])
result = pytester.runpytest("--lf", p2)
result.stdout.fnmatch_lines(["*1 failed*"])
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(["*2 failed*"])
def test_lastfailed_xpass(self, pytester: Pytester) -> None:
pytester.inline_runsource(
"""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
"""
)
config = pytester.parseconfigure()
assert config.cache is not None
lastfailed = config.cache.get("cache/lastfailed", -1)
assert lastfailed == -1
def test_non_serializable_parametrize(self, pytester: Pytester) -> None:
"""Test that failed parametrized tests with unmarshable parameters
don't break pytest-cache.
"""
pytester.makepyfile(
r"""
import pytest
@pytest.mark.parametrize('val', [
b'\xac\x10\x02G',
])
def test_fail(val):
assert False
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 failed in*"])
@pytest.mark.parametrize("parent", ("directory", "package"))
def test_terminal_report_lastfailed(self, pytester: Pytester, parent: str) -> None:
if parent == "package":
pytester.makepyfile(
__init__="",
)
test_a = pytester.makepyfile(
test_a="""
def test_a1(): pass
def test_a2(): pass
"""
)
test_b = pytester.makepyfile(
test_b="""
def test_b1(): assert 0
def test_b2(): assert 0
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 4 items", "*2 failed, 2 passed in*"])
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: rerun previous 2 failures (skipped 1 file)",
"*2 failed in*",
]
)
result = pytester.runpytest(test_a, "--lf")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: 2 known failures not in selected tests",
"*2 passed in*",
]
)
result = pytester.runpytest(test_b, "--lf")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: rerun previous 2 failures",
"*2 failed in*",
]
)
result = pytester.runpytest("test_b.py::test_b1", "--lf")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: rerun previous 1 failure",
"*1 failed in*",
]
)
def test_terminal_report_failedfirst(self, pytester: Pytester) -> None:
pytester.makepyfile(
test_a="""
def test_a1(): assert 0
def test_a2(): pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 2 items", "*1 failed, 1 passed in*"])
result = pytester.runpytest("--ff")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: rerun previous 1 failure first",
"*1 failed, 1 passed in*",
]
)
def test_lastfailed_collectfailure(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
test_maybe="""
import os
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
"""
)
def rlf(fail_import: int, fail_run: int) -> Any:
monkeypatch.setenv("FAILIMPORT", str(fail_import))
monkeypatch.setenv("FAILTEST", str(fail_run))
pytester.runpytest("-q")
config = pytester.parseconfigure()
assert config.cache is not None
lastfailed = config.cache.get("cache/lastfailed", -1)
return lastfailed
lastfailed = rlf(fail_import=0, fail_run=0)
assert lastfailed == -1
lastfailed = rlf(fail_import=1, fail_run=0)
assert list(lastfailed) == ["test_maybe.py"]
lastfailed = rlf(fail_import=0, fail_run=1)
assert list(lastfailed) == ["test_maybe.py::test_hello"]
def test_lastfailed_failure_subset(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
test_maybe="""
import os
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
"""
)
pytester.makepyfile(
test_maybe2="""
import os
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
def test_pass():
pass
"""
)
def rlf(
fail_import: int, fail_run: int, args: Sequence[str] = ()
) -> tuple[Any, Any]:
monkeypatch.setenv("FAILIMPORT", str(fail_import))
monkeypatch.setenv("FAILTEST", str(fail_run))
result = pytester.runpytest("-q", "--lf", *args)
config = pytester.parseconfigure()
assert config.cache is not None
lastfailed = config.cache.get("cache/lastfailed", -1)
return result, lastfailed
result, lastfailed = rlf(fail_import=0, fail_run=0)
assert lastfailed == -1
result.stdout.fnmatch_lines(["*3 passed*"])
result, lastfailed = rlf(fail_import=1, fail_run=0)
assert sorted(list(lastfailed)) == ["test_maybe.py", "test_maybe2.py"]
result, lastfailed = rlf(fail_import=0, fail_run=0, args=("test_maybe2.py",))
assert list(lastfailed) == ["test_maybe.py"]
# edge case of test selection - even if we remember failures
# from other tests we still need to run all tests if no test
# matches the failures
result, lastfailed = rlf(fail_import=0, fail_run=0, args=("test_maybe2.py",))
assert list(lastfailed) == ["test_maybe.py"]
result.stdout.fnmatch_lines(["*2 passed*"])
def test_lastfailed_creates_cache_when_needed(self, pytester: Pytester) -> None:
# Issue #1342
pytester.makepyfile(test_empty="")
pytester.runpytest("-q", "--lf")
assert not os.path.exists(".pytest_cache/v/cache/lastfailed")
pytester.makepyfile(test_successful="def test_success():\n assert True")
pytester.runpytest("-q", "--lf")
assert not os.path.exists(".pytest_cache/v/cache/lastfailed")
pytester.makepyfile(test_errored="def test_error():\n assert False")
pytester.runpytest("-q", "--lf")
assert os.path.exists(".pytest_cache/v/cache/lastfailed")
def test_xfail_not_considered_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail
def test(): assert 0
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 xfailed*"])
assert self.get_cached_last_failed(pytester) == []
def test_xfail_strict_considered_failure(self, pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.xfail(strict=True)
def test(): pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*1 failed*"])
assert self.get_cached_last_failed(pytester) == [
"test_xfail_strict_considered_failure.py::test"
]
@pytest.mark.parametrize("mark", ["mark.xfail", "mark.skip"])
def test_failed_changed_to_xfail_or_skip(
self, pytester: Pytester, mark: str
) -> None:
pytester.makepyfile(
"""
import pytest
def test(): assert 0
"""
)
result = pytester.runpytest()
assert self.get_cached_last_failed(pytester) == [
"test_failed_changed_to_xfail_or_skip.py::test"
]
assert result.ret == 1
pytester.makepyfile(
f"""
import pytest
@pytest.{mark}
def test(): assert 0
"""
)
result = pytester.runpytest()
assert result.ret == 0
assert self.get_cached_last_failed(pytester) == []
assert result.ret == 0
@pytest.mark.parametrize("quiet", [True, False])
@pytest.mark.parametrize("opt", ["--ff", "--lf"])
def test_lf_and_ff_prints_no_needless_message(
self, quiet: bool, opt: str, pytester: Pytester
) -> None:
# Issue 3853
pytester.makepyfile("def test(): assert 0")
args = [opt]
if quiet:
args.append("-q")
result = pytester.runpytest(*args)
result.stdout.no_fnmatch_line("*run all*")
result = pytester.runpytest(*args)
if quiet:
result.stdout.no_fnmatch_line("*run all*")
else:
assert "rerun previous" in result.stdout.str()
def get_cached_last_failed(self, pytester: Pytester) -> list[str]:
config = pytester.parseconfigure()
assert config.cache is not None
return sorted(config.cache.get("cache/lastfailed", {}))
def test_cache_cumulative(self, pytester: Pytester) -> None:
"""Test workflow where user fixes errors gradually file by file using --lf."""
# 1. initial run
test_bar = pytester.makepyfile(
test_bar="""
def test_bar_1(): pass
def test_bar_2(): assert 0
"""
)
test_foo = pytester.makepyfile(
test_foo="""
def test_foo_3(): pass
def test_foo_4(): assert 0
"""
)
pytester.runpytest()
assert self.get_cached_last_failed(pytester) == [
"test_bar.py::test_bar_2",
"test_foo.py::test_foo_4",
]
# 2. fix test_bar_2, run only test_bar.py
pytester.makepyfile(
test_bar="""
def test_bar_1(): pass
def test_bar_2(): pass
"""
)
result = pytester.runpytest(test_bar)
result.stdout.fnmatch_lines(["*2 passed*"])
# ensure cache does not forget that test_foo_4 failed once before
assert self.get_cached_last_failed(pytester) == ["test_foo.py::test_foo_4"]
result = pytester.runpytest("--last-failed")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: rerun previous 1 failure (skipped 1 file)",
"*= 1 failed in *",
]
)
assert self.get_cached_last_failed(pytester) == ["test_foo.py::test_foo_4"]
# 3. fix test_foo_4, run only test_foo.py
test_foo = pytester.makepyfile(
test_foo="""
def test_foo_3(): pass
def test_foo_4(): pass
"""
)
result = pytester.runpytest(test_foo, "--last-failed")
result.stdout.fnmatch_lines(
[
"collected 2 items / 1 deselected / 1 selected",
"run-last-failure: rerun previous 1 failure",
"*= 1 passed, 1 deselected in *",
]
)
assert self.get_cached_last_failed(pytester) == []
result = pytester.runpytest("--last-failed")
result.stdout.fnmatch_lines(["*4 passed*"])
assert self.get_cached_last_failed(pytester) == []
def test_lastfailed_no_failures_behavior_all_passed(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
def test_1(): pass
def test_2(): pass
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(["*2 passed*"])
result = pytester.runpytest("--lf", "--lfnf", "all")
result.stdout.fnmatch_lines(["*2 passed*"])
# Ensure the list passed to pytest_deselected is a copy,
# and not a reference which is cleared right after.
pytester.makeconftest(
"""
deselected = []
def pytest_deselected(items):
global deselected
deselected = items
def pytest_sessionfinish():
print("\\ndeselected={}".format(len(deselected)))
"""
)
result = pytester.runpytest("--lf", "--lfnf", "none")
result.stdout.fnmatch_lines(
[
"collected 2 items / 2 deselected / 0 selected",
"run-last-failure: no previously failed tests, deselecting all items.",
"deselected=2",
"* 2 deselected in *",
]
)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_lastfailed_no_failures_behavior_empty_cache(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
"""
def test_1(): pass
def test_2(): assert 0
"""
)
result = pytester.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines(["*1 failed*1 passed*"])
result = pytester.runpytest("--lf", "--cache-clear", "--lfnf", "all")
result.stdout.fnmatch_lines(["*1 failed*1 passed*"])
result = pytester.runpytest("--lf", "--cache-clear", "--lfnf", "none")
result.stdout.fnmatch_lines(["*2 desel*"])
def test_lastfailed_skip_collection(self, pytester: Pytester) -> None:
"""
Test --lf behavior regarding skipping collection of files that are not marked as
failed in the cache (#5172).
"""
pytester.makepyfile(
**{
"pkg1/test_1.py": """
import pytest
@pytest.mark.parametrize('i', range(3))
def test_1(i): pass
""",
"pkg2/test_2.py": """
import pytest
@pytest.mark.parametrize('i', range(5))
def test_1(i):
assert i not in (1, 3)
""",
}
)
# first run: collects 8 items (test_1: 3, test_2: 5)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 8 items", "*2 failed*6 passed*"])
# second run: collects only 5 items from test_2, because all tests from test_1 have passed
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: rerun previous 2 failures (skipped 1 file)",
"*= 2 failed in *",
]
)
# add another file and check if message is correct when skipping more than 1 file
pytester.makepyfile(
**{
"pkg1/test_3.py": """
def test_3(): pass
"""
}
)
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: rerun previous 2 failures (skipped 2 files)",
"*= 2 failed in *",
]
)
def test_lastfailed_skip_collection_with_nesting(self, pytester: Pytester) -> None:
"""Check that file skipping works even when the file with failures is
nested at a different level of the collection tree."""
pytester.makepyfile(
**{
"test_1.py": """
def test_1(): pass
""",
"pkg/__init__.py": "",
"pkg/test_2.py": """
def test_2(): assert False
""",
}
)
# first run
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 2 items", "*1 failed*1 passed*"])
# second run - test_1.py is skipped.
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: rerun previous 1 failure (skipped 1 file)",
"*= 1 failed in *",
]
)
def test_lastfailed_with_known_failures_not_being_selected(
self, pytester: Pytester
) -> None:
pytester.makepyfile(
**{
"pkg1/test_1.py": """def test_1(): assert 0""",
"pkg1/test_2.py": """def test_2(): pass""",
}
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])
Path("pkg1/test_1.py").unlink()
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: 1 known failures not in selected tests",
"* 1 passed in *",
]
)
# Recreate file with known failure.
pytester.makepyfile(**{"pkg1/test_1.py": """def test_1(): assert 0"""})
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: rerun previous 1 failure (skipped 1 file)",
"* 1 failed in *",
]
)
# Remove/rename test: collects the file again.
pytester.makepyfile(**{"pkg1/test_1.py": """def test_renamed(): assert 0"""})
result = pytester.runpytest("--lf", "-rf")
result.stdout.fnmatch_lines(
[
"collected 2 items",
"run-last-failure: 1 known failures not in selected tests",
"pkg1/test_1.py F *",
"pkg1/test_2.py . *",
"FAILED pkg1/test_1.py::test_renamed - assert 0",
"* 1 failed, 1 passed in *",
]
)
result = pytester.runpytest("--lf", "--co")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: rerun previous 1 failure (skipped 1 file)",
"",
"<Dir *>",
" <Dir pkg1>",
" <Module test_1.py>",
" <Function test_renamed>",
]
)
def test_lastfailed_args_with_deselected(self, pytester: Pytester) -> None:
"""Test regression with --lf running into NoMatch error.
This was caused by it not collecting (non-failed) nodes given as
arguments.
"""
pytester.makepyfile(
**{
"pkg1/test_1.py": """
def test_pass(): pass
def test_fail(): assert 0
""",
}
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])
assert result.ret == 1
result = pytester.runpytest("pkg1/test_1.py::test_pass", "--lf", "--co")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*collected 1 item",
"run-last-failure: 1 known failures not in selected tests",
"",
"<Dir *>",
" <Dir pkg1>",
" <Module test_1.py>",
" <Function test_pass>",
],
consecutive=True,
)
result = pytester.runpytest(
"pkg1/test_1.py::test_pass", "pkg1/test_1.py::test_fail", "--lf", "--co"
)
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"collected 2 items / 1 deselected / 1 selected",
"run-last-failure: rerun previous 1 failure",
"",
"<Dir *>",
" <Dir pkg1>",
" <Module test_1.py>",
" <Function test_fail>",
"*= 1/2 tests collected (1 deselected) in *",
],
)
def test_lastfailed_with_class_items(self, pytester: Pytester) -> None:
"""Test regression with --lf deselecting whole classes."""
pytester.makepyfile(
**{
"pkg1/test_1.py": """
class TestFoo:
def test_pass(self): pass
def test_fail(self): assert 0
def test_other(): assert 0
""",
}
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 3 items", "* 2 failed, 1 passed in *"])
assert result.ret == 1
result = pytester.runpytest("--lf", "--co")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"collected 3 items / 1 deselected / 2 selected",
"run-last-failure: rerun previous 2 failures",
"",
"<Dir *>",
" <Dir pkg1>",
" <Module test_1.py>",
" <Class TestFoo>",
" <Function test_fail>",
" <Function test_other>",
"",
"*= 2/3 tests collected (1 deselected) in *",
],
consecutive=True,
)
def test_lastfailed_with_all_filtered(self, pytester: Pytester) -> None:
pytester.makepyfile(
**{
"pkg1/test_1.py": """
def test_fail(): assert 0
def test_pass(): pass
""",
}
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])
assert result.ret == 1
# Remove known failure.
pytester.makepyfile(
**{
"pkg1/test_1.py": """
def test_pass(): pass
""",
}
)
result = pytester.runpytest("--lf", "--co")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: 1 known failures not in selected tests",
"",
"<Dir *>",
" <Dir pkg1>",
" <Module test_1.py>",
" <Function test_pass>",
"",
"*= 1 test collected in*",
],
consecutive=True,
)
assert result.ret == 0
def test_packages(self, pytester: Pytester) -> None:
"""Regression test for #7758.
The particular issue here was that Package nodes were included in the
filtering, being themselves Modules for the __init__.py, even if they
had failed Modules in them.
The tests includes a test in an __init__.py file just to make sure the
fix doesn't somehow regress that, it is not critical for the issue.
"""
pytester.makepyfile(
**{
"__init__.py": "",
"a/__init__.py": "def test_a_init(): assert False",
"a/test_one.py": "def test_1(): assert False",
"b/__init__.py": "",
"b/test_two.py": "def test_2(): assert False",
},
)
pytester.makeini(
"""
[pytest]
python_files = *.py
"""
)
result = pytester.runpytest()
result.assert_outcomes(failed=3)
result = pytester.runpytest("--lf")
result.assert_outcomes(failed=3)
def test_non_python_file_skipped(
self,
pytester: Pytester,
dummy_yaml_custom_test: None,
) -> None:
pytester.makepyfile(
**{
"test_bad.py": """def test_bad(): assert False""",
},
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])
result = pytester.runpytest("--lf")
result.stdout.fnmatch_lines(
[
"collected 1 item",
"run-last-failure: rerun previous 1 failure (skipped 1 file)",
"* 1 failed in *",
]
)
| TestLastFailed |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_returned.py | {
"start": 840,
"end": 999
} | class ____:
""" __getnewargs__ returns str """
def __getnewargs__(self): # [invalid-getnewargs-returned]
return "(1, 2, 3)"
| SecondBadGetNewArgs |
python | walkccc__LeetCode | solutions/2056. Number of Valid Move Combinations On Chessboard/2056.py | {
"start": 0,
"end": 1639
} | class ____:
def countCombinations(
self,
pieces: list[str],
positions: list[list[int]],
) -> int:
n = len(pieces)
moves = {"rook": [(1, 0), (-1, 0), (0, 1), (0, -1)],
"bishop": [(1, 1), (1, -1), (-1, 1), (-1, -1)],
"queen": [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, 1), (-1, -1)]}
hashedBoards = set()
def getHash(board: list[list[int]]) -> Tuple:
return tuple([tuple(pos) for pos in board])
def dfs(
board: list[list[int]],
pieceMoves: list[tuple[int, int]],
activeMask: int,
) -> None:
"""Performs a depth-first search to explore all possible board states."""
if activeMask == 0:
return
hashedBoards.add(getHash(board))
for nextActiveMask in range(1, 1 << n):
if activeMask & nextActiveMask != nextActiveMask:
continue
# Copy the board.
nextBoard = [pos.copy() for pos in board]
# Move the pieces that are active in this turn.
for i in range(n):
if nextActiveMask >> i & 1:
nextBoard[i][0] += pieceMoves[i][0]
nextBoard[i][1] += pieceMoves[i][1]
# No two or more pieces occupy the same square.
if len(set(getHash(nextBoard))) < n:
continue
# Every piece needs to be in the boundary.
if all(1 <= x <= 8 and 1 <= y <= 8 for x, y in nextBoard):
dfs(nextBoard, pieceMoves, nextActiveMask)
for pieceMoves in itertools.product(*(moves[piece] for piece in pieces)):
dfs(positions, pieceMoves, (1 << n) - 1)
return len(hashedBoards)
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_key.py | {
"start": 972,
"end": 7089
} | class ____(IHaveNew):
"""Object representing the structure of an asset key. Takes in a sanitized string, list of
strings, or tuple of strings.
Example usage:
.. code-block:: python
from dagster import AssetKey
AssetKey("asset1")
AssetKey(["asset1"]) # same as the above
AssetKey(["prefix", "asset1"])
AssetKey(["prefix", "subprefix", "asset1"])
Args:
path (Union[str, Sequence[str]]): String, list of strings, or tuple of strings. A list of
strings represent the hierarchical structure of the asset_key.
"""
# Originally AssetKey contained "path" as a list. In order to change to using a tuple, we now have
parts: Sequence[str] # with path available as a property defined below still returning a list.
def __new__(
cls,
path: Union[str, Sequence[str]],
):
if isinstance(path, str):
parts = (path,)
else:
parts = tuple(check.sequence_param(path, "path", of_type=str))
return super().__new__(cls, parts=parts)
@public
@cached_property
def path(self) -> Sequence[str]:
return list(self.parts)
def __str__(self):
return f"AssetKey({self.path})"
def __repr__(self):
return f"AssetKey({self.path})"
def to_string(self) -> str:
"""E.g. '["first_component", "second_component"]'."""
return self.to_db_string()
def to_db_string(self) -> str:
return seven.json.dumps(self.path)
def to_user_string(self) -> str:
"""E.g. "first_component/second_component"."""
return ASSET_KEY_DELIMITER.join(self.path)
def to_escaped_user_string(self) -> str:
r"""Similar to to_user_string, but escapes slashes in the path components with backslashes.
E.g. ["first_component", "second/component"] -> "first_component/second\/component"
"""
return ASSET_KEY_DELIMITER.join([part.replace("/", "\\/") for part in self.path])
def to_python_identifier(self, suffix: Optional[str] = None) -> str:
"""Build a valid Python identifier based on the asset key that can be used for
operation names or I/O manager keys.
"""
path = list(self.path)
if suffix is not None:
path.append(suffix)
return "__".join(path).replace("-", "_").replace(".", "_")
@staticmethod
def from_user_string(asset_key_string: str) -> "AssetKey":
return AssetKey(asset_key_string.split(ASSET_KEY_DELIMITER))
@staticmethod
def from_escaped_user_string(asset_key_string: str) -> "AssetKey":
"""Inverse of to_escaped_user_string."""
return AssetKey(to_assey_key_path(asset_key_string))
@staticmethod
def from_db_string(asset_key_string: Optional[str]) -> Optional["AssetKey"]:
if not asset_key_string:
return None
if asset_key_string[0] == "[":
# is a json string
try:
path = seven.json.loads(asset_key_string)
except seven.JSONDecodeError:
path = parse_asset_key_string(asset_key_string)
else:
path = parse_asset_key_string(asset_key_string)
return AssetKey(path)
@staticmethod
def get_db_prefix(path: Sequence[str]):
check.sequence_param(path, "path", of_type=str)
return seven.json.dumps(path)[:-2] # strip trailing '"]' from json string
@staticmethod
def from_graphql_input(graphql_input_asset_key: Mapping[str, Sequence[str]]) -> "AssetKey":
return AssetKey(graphql_input_asset_key["path"])
def to_graphql_input(self) -> Mapping[str, Sequence[str]]:
return {"path": self.path}
@staticmethod
def from_coercible(arg: "CoercibleToAssetKey") -> "AssetKey":
if isinstance(arg, AssetKey):
return check.inst_param(arg, "arg", AssetKey)
elif isinstance(arg, str):
return AssetKey([arg])
elif isinstance(arg, list):
check.list_param(arg, "arg", of_type=str)
return AssetKey(arg)
elif isinstance(arg, tuple):
check.tuple_param(arg, "arg", of_type=str)
return AssetKey(arg)
else:
check.failed(f"Unexpected type for AssetKey: {type(arg)}")
@staticmethod
def from_coercible_or_definition(
arg: Union["CoercibleToAssetKey", "AssetsDefinition", "SourceAsset"],
) -> "AssetKey":
from dagster._core.definitions.assets.definition.assets_definition import AssetsDefinition
from dagster._core.definitions.source_asset import SourceAsset
if isinstance(arg, AssetsDefinition):
return arg.key
elif isinstance(arg, SourceAsset):
return arg.key
else:
return AssetKey.from_coercible(arg)
def has_prefix(self, prefix: Sequence[str]) -> bool:
return len(self.path) >= len(prefix) and self.path[: len(prefix)] == prefix
def with_prefix(self, prefix: "CoercibleToAssetKeyPrefix") -> "AssetKey":
prefix = key_prefix_from_coercible(prefix)
return AssetKey(list(prefix) + list(self.path))
CoercibleToAssetKey = Union[AssetKey, str, Sequence[str]]
CoercibleToAssetKeyPrefix = Union[str, Sequence[str]]
CoercibleToAssetKeySubset = Union[str, Sequence[str]]
def check_opt_coercible_to_asset_key_prefix_param(
prefix: Optional[CoercibleToAssetKeyPrefix], param_name: str
) -> Optional[Sequence[str]]:
try:
return key_prefix_from_coercible(prefix) if prefix is not None else None
except check.CheckError:
raise check.ParameterCheckError(
f'Param "{param_name}" is not a string or a sequence of strings'
)
def key_prefix_from_coercible(key_prefix: CoercibleToAssetKeyPrefix) -> Sequence[str]:
if isinstance(key_prefix, str):
return [key_prefix]
elif isinstance(key_prefix, Sequence):
return key_prefix
else:
check.failed(f"Unexpected type for key_prefix: {type(key_prefix)}")
@public
@whitelist_for_serdes(old_storage_names={"AssetCheckHandle"})
| AssetKey |
python | Netflix__metaflow | test/core/tests/switch_in_branch.py | {
"start": 63,
"end": 1018
} | class ____(MetaflowTest):
PRIORITY = 2
ONLY_GRAPHS = ["switch_in_branch"]
@steps(0, ["start-split"], required=True)
def step_start(self):
self.condition = "case1"
@steps(0, ["switch-a"], required=True)
def step_a(self):
pass
@steps(0, ["branch-b"], required=True)
def step_b(self):
self.data = "from_b"
@steps(0, ["branch-c"], required=True)
def step_c(self):
self.data = "from_a_c"
@steps(0, ["branch-d"], required=True)
def step_d(self):
self.data = "from_a_d"
@steps(0, ["join"], required=True)
def step_join(self, inputs):
self.final_data = sorted([inp.data for inp in inputs])
@steps(1, ["end"], required=True)
def step_end(self):
assert_equals(self.final_data, ["from_a_c", "from_b"])
def check_results(self, flow, checker):
checker.assert_artifact("join", "final_data", ["from_a_c", "from_b"])
| SwitchInBranchTest |
python | django__django | tests/delete/models.py | {
"start": 6477,
"end": 6565
} | class ____(models.Model):
delete_top = models.ForeignKey(DeleteTop, models.CASCADE)
| B1 |
python | django__django | tests/auth_tests/test_remote_user.py | {
"start": 15051,
"end": 15191
} | class ____(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
| RemoteUserNoCreateBackend |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple2.py | {
"start": 331,
"end": 1166
} | class ____(Generic[_T, Unpack[_Xs]]):
def __init__(self, *shape: Unpack[_Xs]):
self.x: tuple[Unpack[_Xs]] = shape
# This should generate an error
self.y: _Xs = shape
# This should generate two errors
def func1(self) -> Union[Unpack[_Xs]]: ...
# This should generate an error
def func2(self) -> tuple[Unpack[_T]]: ...
# This should generate an error
def func3(self) -> tuple[Unpack[int]]: ...
# This should generate an error
def func4(self) -> tuple[Unpack[_Xs, _Xs]]: ...
# This should generate an error.
a: list[Unpack[_Xs]] = []
# This should generate an error.
b: Unpack[_Xs] = ()
# This should generate an error.
x: list[Unpack[_Xs]] = []
# This should generate an error.
y: Unpack[_Xs] = ()
# This should generate an error.
z: Unpack = ()
| ClassA |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/initsubclass1.py | {
"start": 1279,
"end": 1311
} | class ____(a=3):
a: int
| ClassI |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/alloy_db.py | {
"start": 52623,
"end": 57876
} | class ____(AlloyDBWriteBaseOperator):
"""
Create a Backup in an Alloy DB cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AlloyDBCreateBackupOperator`
:param backup_id: Required. ID of the backup to create.
:param backup_configuration: Required. Backup to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Backup
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_request: Optional. If set, performs request validation, but does not actually
execute the request.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the backups should be saved.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"backup_id", "backup_configuration"} | set(AlloyDBWriteBaseOperator.template_fields)
)
operator_extra_links = (AlloyDBBackupsLink(),)
def __init__(
self,
backup_id: str,
backup_configuration: alloydb_v1.Backup | dict,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.backup_id = backup_id
self.backup_configuration = backup_configuration
def _get_backup(self) -> proto.Message | None:
self.log.info("Checking if the backup %s exists already...", self.backup_id)
try:
backup = self.hook.get_backup(
backup_id=self.backup_id,
location=self.location,
project_id=self.project_id,
)
except NotFound:
self.log.info("The backup %s does not exist yet.", self.backup_id)
except Exception as ex:
raise AirflowException(ex) from ex
else:
self.log.info("AlloyDB backup %s already exists.", self.backup_id)
result = alloydb_v1.Backup.to_dict(backup)
return result
return None
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
}
def execute(self, context: Context) -> dict | None:
AlloyDBBackupsLink.persist(context=context)
if backup := self._get_backup():
return backup
if self.validate_request:
self.log.info("Validating a Create AlloyDB backup request.")
else:
self.log.info("Creating an AlloyDB backup.")
try:
operation = self.hook.create_backup(
backup_id=self.backup_id,
backup=self.backup_configuration,
location=self.location,
project_id=self.project_id,
request_id=self.request_id,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except Exception as ex:
raise AirflowException(ex)
else:
operation_result = self.get_operation_result(operation)
result = alloydb_v1.Backup.to_dict(operation_result) if operation_result else None
return result
| AlloyDBCreateBackupOperator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/session.py | {
"start": 186465,
"end": 198921
} | class ____(_SessionClassMethods, Generic[_S]):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# an Engine, which the Session will use for connection
# resources
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/")
Session = sessionmaker(engine)
with Session() as session:
session.add(some_object)
session.add(some_other_object)
session.commit()
Context manager use is optional; otherwise, the returned
:class:`_orm.Session` object may be closed explicitly via the
:meth:`_orm.Session.close` method. Using a
``try:/finally:`` block is optional, however will ensure that the close
takes place even if there are database errors::
session = Session()
try:
session.add(some_object)
session.add(some_other_object)
session.commit()
finally:
session.close()
:class:`.sessionmaker` acts as a factory for :class:`_orm.Session`
objects in the same way as an :class:`_engine.Engine` acts as a factory
for :class:`_engine.Connection` objects. In this way it also includes
a :meth:`_orm.sessionmaker.begin` method, that provides a context
manager which both begins and commits a transaction, as well as closes
out the :class:`_orm.Session` when complete, rolling back the transaction
if any errors occur::
Session = sessionmaker(engine)
with Session.begin() as session:
session.add(some_object)
session.add(some_other_object)
# commits transaction, closes session
.. versionadded:: 1.4
When calling upon :class:`_orm.sessionmaker` to construct a
:class:`_orm.Session`, keyword arguments may also be passed to the
method; these arguments will override that of the globally configured
parameters. Below we use a :class:`_orm.sessionmaker` bound to a certain
:class:`_engine.Engine` to produce a :class:`_orm.Session` that is instead
bound to a specific :class:`_engine.Connection` procured from that engine::
Session = sessionmaker(engine)
# bind an individual session to a connection
with engine.connect() as connection:
with Session(bind=connection) as session:
... # work with session
The class also includes a method :meth:`_orm.sessionmaker.configure`, which
can be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated. This
is usually used to associate one or more :class:`_engine.Engine` objects
with an existing
:class:`.sessionmaker` factory before it is first used::
# application starts, sessionmaker does not have
# an engine bound yet
Session = sessionmaker()
# ... later, when an engine URL is read from a configuration
# file or other events allow the engine to be created
engine = create_engine("sqlite:///foo.db")
Session.configure(bind=engine)
sess = Session()
# work with session
.. seealso::
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
class_: Type[_S]
@overload
def __init__(
self,
bind: Optional[_SessionBind] = ...,
*,
class_: Type[_S],
autoflush: bool = ...,
expire_on_commit: bool = ...,
info: Optional[_InfoType] = ...,
**kw: Any,
): ...
@overload
def __init__(
self: "sessionmaker[Session]",
bind: Optional[_SessionBind] = ...,
*,
autoflush: bool = ...,
expire_on_commit: bool = ...,
info: Optional[_InfoType] = ...,
**kw: Any,
): ...
def __init__(
self,
bind: Optional[_SessionBind] = None,
*,
class_: Type[_S] = Session, # type: ignore
autoflush: bool = True,
expire_on_commit: bool = True,
info: Optional[_InfoType] = None,
**kw: Any,
):
r"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`_engine.Engine` or other :class:`.Connectable`
with
which newly created :class:`.Session` objects will be associated.
:param class\_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
.. seealso::
:ref:`session_flushing` - additional background on autoflush
:param expire_on_commit=True: the
:paramref:`_orm.Session.expire_on_commit` setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
:param \**kw: all other keyword arguments are passed to the
constructor of newly created :class:`.Session` objects.
"""
kw["bind"] = bind
kw["autoflush"] = autoflush
kw["expire_on_commit"] = expire_on_commit
if info is not None:
kw["info"] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def begin(self) -> contextlib.AbstractContextManager[_S]:
"""Produce a context manager that both provides a new
:class:`_orm.Session` as well as a transaction that commits.
e.g.::
Session = sessionmaker(some_engine)
with Session.begin() as session:
session.add(some_object)
# commits transaction, closes session
.. versionadded:: 1.4
"""
session = self()
return session._maker_context_manager()
def __call__(self, **local_kw: Any) -> _S:
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker(some_engine)
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == "info" and "info" in local_kw:
d = v.copy()
d.update(local_kw["info"])
local_kw["info"] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw: Any) -> None:
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine("sqlite://"))
"""
self.kw.update(new_kw)
def __repr__(self) -> str:
return "%s(class_=%r, %s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items()),
)
def close_all_sessions() -> None:
"""Close all sessions in memory.
This function consults a global registry of all :class:`.Session` objects
and calls :meth:`.Session.close` on them, which resets them to a clean
state.
This function is not for general use but may be useful for test suites
within the teardown scheme.
"""
for sess in _sessions.values():
sess.close()
def make_transient(instance: object) -> None:
"""Alter the state of the given instance so that it is :term:`transient`.
.. note::
:func:`.make_transient` is a special-case function for
advanced use cases only.
The given mapped instance is assumed to be in the :term:`persistent` or
:term:`detached` state. The function will remove its association with any
:class:`.Session` as well as its :attr:`.InstanceState.identity`. The
effect is that the object will behave as though it were newly constructed,
except retaining any attribute / collection values that were loaded at the
time of the call. The :attr:`.InstanceState.deleted` flag is also reset
if this object had been deleted as a result of using
:meth:`.Session.delete`.
.. warning::
:func:`.make_transient` does **not** "unexpire" or otherwise eagerly
load ORM-mapped attributes that are not currently loaded at the time
the function is called. This includes attributes which:
* were expired via :meth:`.Session.expire`
* were expired as the natural effect of committing a session
transaction, e.g. :meth:`.Session.commit`
* are normally :term:`lazy loaded` but are not currently loaded
* are "deferred" (see :ref:`orm_queryguide_column_deferral`) and are
not yet loaded
* were not present in the query which loaded this object, such as that
which is common in joined table inheritance and other scenarios.
After :func:`.make_transient` is called, unloaded attributes such
as those above will normally resolve to the value ``None`` when
accessed, or an empty collection for a collection-oriented attribute.
As the object is transient and un-associated with any database
identity, it will no longer retrieve these values.
.. seealso::
:func:`.make_transient_to_detached`
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_states([state])
# remove expired state
state.expired_attributes.clear()
# remove deferred callables
if state.callables:
del state.callables
if state.key:
del state.key
if state._deleted:
del state._deleted
def make_transient_to_detached(instance: object) -> None:
"""Make the given transient instance :term:`detached`.
.. note::
:func:`.make_transient_to_detached` is a special-case function for
advanced use cases only.
All attribute history on the given instance
will be reset as though the instance were freshly loaded
from a query. Missing attributes will be marked as expired.
The primary key attributes of the object, which are required, will be made
into the "key" of the instance.
The object can then be added to a session, or merged
possibly with the load=False flag, at which point it will look
as if it were loaded that way, without emitting SQL.
This is a special use case function that differs from a normal
call to :meth:`.Session.merge` in that a given persistent state
can be manufactured without any SQL calls.
.. seealso::
:func:`.make_transient`
:meth:`.Session.enable_relationship_loading`
"""
state = attributes.instance_state(instance)
if state.session_id or state.key:
raise sa_exc.InvalidRequestError("Given object must be transient")
state.key = state.mapper._identity_key_from_state(state)
if state._deleted:
del state._deleted
state._commit_all(state.dict)
state._expire_attributes(state.dict, state.unloaded)
def object_session(instance: object) -> Optional[Session]:
"""Return the :class:`.Session` to which the given instance belongs.
This is essentially the same as the :attr:`.InstanceState.session`
accessor. See that attribute for details.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE as err:
raise exc.UnmappedInstanceError(instance) from err
else:
return _state_session(state)
_new_sessionid = util.counter()
| sessionmaker |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 22369,
"end": 22855
} | class ____(StructModel):
_element_type = NotImplemented
def __init__(self, dmm, fe_type):
members = [
('real', fe_type.underlying_float),
('imag', fe_type.underlying_float),
]
super(ComplexModel, self).__init__(dmm, fe_type, members)
@register_default(types.LiteralList)
@register_default(types.LiteralStrKeyDict)
@register_default(types.Tuple)
@register_default(types.NamedTuple)
@register_default(types.StarArgTuple)
| ComplexModel |
python | simonw__datasette | datasette/database.py | {
"start": 25191,
"end": 25416
} | class ____(Exception):
def __init__(self, e, sql, params):
self.e = e
self.sql = sql
self.params = params
def __str__(self):
return "QueryInterrupted: {}".format(self.e)
| QueryInterrupted |
python | PyCQA__bandit | bandit/core/config.py | {
"start": 429,
"end": 9840
} | class ____:
def __init__(self, config_file=None):
"""Attempt to initialize a config dictionary from a yaml file.
Error out if loading the yaml file fails for any reason.
:param config_file: The Bandit yaml config file
:raises bandit.utils.ConfigError: If the config is invalid or
unreadable.
"""
self.config_file = config_file
self._config = {}
if config_file:
try:
f = open(config_file, "rb")
except OSError:
raise utils.ConfigError(
"Could not read config file.", config_file
)
if config_file.endswith(".toml"):
if tomllib is None:
raise utils.ConfigError(
"toml parser not available, reinstall with toml extra",
config_file,
)
try:
with f:
self._config = (
tomllib.load(f).get("tool", {}).get("bandit", {})
)
except tomllib.TOMLDecodeError as err:
LOG.error(err)
raise utils.ConfigError("Error parsing file.", config_file)
else:
try:
with f:
self._config = yaml.safe_load(f)
except yaml.YAMLError as err:
LOG.error(err)
raise utils.ConfigError("Error parsing file.", config_file)
self.validate(config_file)
# valid config must be a dict
if not isinstance(self._config, dict):
raise utils.ConfigError("Error parsing file.", config_file)
self.convert_legacy_config()
else:
# use sane defaults
self._config["plugin_name_pattern"] = "*.py"
self._config["include"] = ["*.py", "*.pyw"]
self._init_settings()
def get_option(self, option_string):
"""Returns the option from the config specified by the option_string.
'.' can be used to denote levels, for example to retrieve the options
from the 'a' profile you can use 'profiles.a'
:param option_string: The string specifying the option to retrieve
:return: The object specified by the option_string, or None if it can't
be found.
"""
option_levels = option_string.split(".")
cur_item = self._config
for level in option_levels:
if cur_item and (level in cur_item):
cur_item = cur_item[level]
else:
return None
return cur_item
def get_setting(self, setting_name):
if setting_name in self._settings:
return self._settings[setting_name]
else:
return None
@property
def config(self):
"""Property to return the config dictionary
:return: Config dictionary
"""
return self._config
def _init_settings(self):
"""This function calls a set of other functions (one per setting)
This function calls a set of other functions (one per setting) to build
out the _settings dictionary. Each other function will set values from
the config (if set), otherwise use defaults (from constants if
possible).
:return: -
"""
self._settings = {}
self._init_plugin_name_pattern()
def _init_plugin_name_pattern(self):
"""Sets settings['plugin_name_pattern'] from default or config file."""
plugin_name_pattern = constants.plugin_name_pattern
if self.get_option("plugin_name_pattern"):
plugin_name_pattern = self.get_option("plugin_name_pattern")
self._settings["plugin_name_pattern"] = plugin_name_pattern
def convert_legacy_config(self):
updated_profiles = self.convert_names_to_ids()
bad_calls, bad_imports = self.convert_legacy_blacklist_data()
if updated_profiles:
self.convert_legacy_blacklist_tests(
updated_profiles, bad_calls, bad_imports
)
self._config["profiles"] = updated_profiles
def convert_names_to_ids(self):
"""Convert test names to IDs, unknown names are left unchanged."""
extman = extension_loader.MANAGER
updated_profiles = {}
for name, profile in (self.get_option("profiles") or {}).items():
# NOTE(tkelsey): can't use default of get() because value is
# sometimes explicitly 'None', for example when the list is given
# in yaml but not populated with any values.
include = {
(extman.get_test_id(i) or i)
for i in (profile.get("include") or [])
}
exclude = {
(extman.get_test_id(i) or i)
for i in (profile.get("exclude") or [])
}
updated_profiles[name] = {"include": include, "exclude": exclude}
return updated_profiles
def convert_legacy_blacklist_data(self):
"""Detect legacy blacklist data and convert it to new format."""
bad_calls_list = []
bad_imports_list = []
bad_calls = self.get_option("blacklist_calls") or {}
bad_calls = bad_calls.get("bad_name_sets", {})
for item in bad_calls:
for key, val in item.items():
val["name"] = key
val["message"] = val["message"].replace("{func}", "{name}")
bad_calls_list.append(val)
bad_imports = self.get_option("blacklist_imports") or {}
bad_imports = bad_imports.get("bad_import_sets", {})
for item in bad_imports:
for key, val in item.items():
val["name"] = key
val["message"] = val["message"].replace("{module}", "{name}")
val["qualnames"] = val["imports"]
del val["imports"]
bad_imports_list.append(val)
if bad_imports_list or bad_calls_list:
LOG.warning(
"Legacy blacklist data found in config, overriding "
"data plugins"
)
return bad_calls_list, bad_imports_list
@staticmethod
def convert_legacy_blacklist_tests(profiles, bad_imports, bad_calls):
"""Detect old blacklist tests, convert to use new builtin."""
def _clean_set(name, data):
if name in data:
data.remove(name)
data.add("B001")
for name, profile in profiles.items():
blacklist = {}
include = profile["include"]
exclude = profile["exclude"]
name = "blacklist_calls"
if name in include and name not in exclude:
blacklist.setdefault("Call", []).extend(bad_calls)
_clean_set(name, include)
_clean_set(name, exclude)
name = "blacklist_imports"
if name in include and name not in exclude:
blacklist.setdefault("Import", []).extend(bad_imports)
blacklist.setdefault("ImportFrom", []).extend(bad_imports)
blacklist.setdefault("Call", []).extend(bad_imports)
_clean_set(name, include)
_clean_set(name, exclude)
_clean_set("blacklist_import_func", include)
_clean_set("blacklist_import_func", exclude)
# This can happen with a legacy config that includes
# blacklist_calls but exclude blacklist_imports for example
if "B001" in include and "B001" in exclude:
exclude.remove("B001")
profile["blacklist"] = blacklist
def validate(self, path):
"""Validate the config data."""
legacy = False
message = (
"Config file has an include or exclude reference "
"to legacy test '{0}' but no configuration data for "
"it. Configuration data is required for this test. "
"Please consider switching to the new config file "
"format, the tool 'bandit-config-generator' can help "
"you with this."
)
def _test(key, block, exclude, include):
if key in exclude or key in include:
if self._config.get(block) is None:
raise utils.ConfigError(message.format(key), path)
if "profiles" in self._config:
legacy = True
for profile in self._config["profiles"].values():
inc = profile.get("include") or set()
exc = profile.get("exclude") or set()
_test("blacklist_imports", "blacklist_imports", inc, exc)
_test("blacklist_import_func", "blacklist_imports", inc, exc)
_test("blacklist_calls", "blacklist_calls", inc, exc)
# show deprecation message
if legacy:
LOG.warning(
"Config file '%s' contains deprecated legacy config "
"data. Please consider upgrading to the new config "
"format. The tool 'bandit-config-generator' can help "
"you with this. Support for legacy configs will be "
"removed in a future bandit version.",
path,
)
| BanditConfig |
python | walkccc__LeetCode | solutions/1685. Sum of Absolute Differences in a Sorted Array/1685.py | {
"start": 0,
"end": 306
} | class ____:
def getSumAbsoluteDifferences(self, nums: list[int]) -> list[int]:
prefix = list(itertools.accumulate(nums))
suffix = list(itertools.accumulate(nums[::-1]))[::-1]
return [num * (i + 1) - prefix[i] + suffix[i] - num * (len(nums) - i)
for i, num in enumerate(nums)]
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/test_mask_user_code_errors.py | {
"start": 888,
"end": 9780
} | class ____:
pass
@pytest.fixture(scope="function")
def enable_masking_user_code_errors() -> Any:
with environ({"DAGSTER_REDACT_USER_CODE_ERRORS": "1"}):
yield
def test_masking_basic(enable_masking_user_code_errors):
try:
with user_code_error_boundary(
error_cls=DagsterUserCodeExecutionError,
msg_fn=lambda: "hunter2",
):
def hunter2():
raise UserError()
hunter2()
except Exception:
exc_info = sys.exc_info()
err_info = serializable_error_info_from_exc_info(exc_info)
assert "hunter2" not in str(err_info) # pyright: ignore[reportPossiblyUnboundVariable]
def test_masking_nested_user_code_err_boundaries(enable_masking_user_code_errors):
try:
with user_code_error_boundary(
error_cls=DagsterUserCodeExecutionError,
msg_fn=lambda: "hunter2 as well",
):
with user_code_error_boundary(
error_cls=DagsterUserCodeExecutionError,
msg_fn=lambda: "hunter2",
):
def hunter2():
raise UserError()
hunter2()
except Exception:
exc_info = sys.exc_info()
err_info = serializable_error_info_from_exc_info(exc_info)
assert "hunter2" not in str(err_info) # pyright: ignore[reportPossiblyUnboundVariable]
def test_masking_nested_user_code_err_boundaries_reraise(enable_masking_user_code_errors):
try:
try:
with user_code_error_boundary(
error_cls=DagsterUserCodeExecutionError,
msg_fn=lambda: "hunter2",
):
def hunter2():
raise UserError()
hunter2()
except Exception as e:
# Mimics behavior of resource teardown, which runs in a
# user_code_error_boundary after the user code raises an error
with user_code_error_boundary(
error_cls=DagsterUserCodeExecutionError,
msg_fn=lambda: "teardown after we raised hunter2 error",
):
# do teardown stuff
raise e
except Exception:
exc_info = sys.exc_info()
err_info = serializable_error_info_from_exc_info(exc_info)
assert "hunter2" not in str(err_info) # pyright: ignore[reportPossiblyUnboundVariable]
ERROR_ID_REGEX = r"[Ee]rror ID ([a-z0-9\-]+)"
@pytest.mark.parametrize(
"exc_name, expect_exc_name_in_error, build_exc",
[
("UserError", False, lambda: UserError()),
("TypeError", False, lambda: TypeError("hunter2")),
("KeyboardInterrupt", True, lambda: KeyboardInterrupt()),
("DagsterExecutionInterruptedError", True, lambda: dg.DagsterExecutionInterruptedError()),
("Failure", True, lambda: dg.Failure("asdf")),
],
)
def test_masking_op_execution(
enable_masking_user_code_errors,
exc_name: str,
expect_exc_name_in_error: bool,
build_exc: Callable[[], BaseException],
caplog,
) -> Any:
@dg.op
def throws_user_error(_):
def hunter2():
raise build_exc()
hunter2()
@dg.job
def job_def():
throws_user_error()
with caplog.at_level(logging.ERROR):
result = job_def.execute_in_process(raise_on_error=False)
assert not result.success
# Ensure error message and contents of user code don't leak (e.g. hunter2 text or function name)
assert not any("hunter2" in str(event).lower() for event in result.all_events), [
str(event) for event in result.all_events if "hunter2" in str(event)
]
step_error = next(event for event in result.all_events if event.is_step_failure)
# Certain exceptions will not be fully redacted, just the stack trace
# For example, system errors and interrupts may contain useful information
# or information that the framework itself relies on
if expect_exc_name_in_error:
assert (
step_error.step_failure_data.error
and step_error.step_failure_data.error.cls_name == exc_name
)
else:
assert (
step_error.step_failure_data.error
and step_error.step_failure_data.error.cls_name == "DagsterRedactedUserCodeError"
)
# Ensures we can match the error ID in the Dagster+ UI surfaced message to the rich error message
# in logs which includes the redacted error message
assert "Search in logs for this error ID for more details" in str(step_error)
error_id = re.search(ERROR_ID_REGEX, str(step_error)).group(1) # type: ignore
assert f"Error occurred during user code execution, error ID {error_id}" in caplog.text
assert "hunter2" in caplog.text
def test_masking_sensor_execution(instance, enable_masking_user_code_errors, capsys) -> None:
from dagster._api.snapshot_sensor import sync_get_external_sensor_execution_data_ephemeral_grpc
with get_bar_repo_handle(instance) as repository_handle:
try:
sync_get_external_sensor_execution_data_ephemeral_grpc(
instance, repository_handle, "sensor_error", None, None, None, None
)
assert False, "Should have thrown an DagsterUserCodeProcessError!"
except DagsterUserCodeProcessError as e:
assert "womp womp" not in str(e)
assert "Search in logs for this error ID for more details" in str(e)
error_id = re.search(ERROR_ID_REGEX, str(e)).group(1) # type: ignore
captured_stderr = capsys.readouterr().err
assert (
f"Error occurred during user code execution, error ID {error_id}" in captured_stderr
)
# assert "Search in logs for this error ID for more details" not in captured_stderr TODO: fix this
def test_masking_schedule_execution(instance, enable_masking_user_code_errors, capsys) -> None:
from dagster._api.snapshot_schedule import (
sync_get_external_schedule_execution_data_ephemeral_grpc,
)
with get_bar_repo_handle(instance) as repository_handle:
try:
sync_get_external_schedule_execution_data_ephemeral_grpc(
instance,
repository_handle,
"schedule_error",
TimestampWithTimezone(time.time(), "UTC"),
None,
None,
)
assert False, "Should have thrown an DagsterUserCodeProcessError!"
except DagsterUserCodeProcessError as e:
assert "womp womp" not in str(e)
assert "Search in logs for this error ID for more details" in str(e)
error_id = re.search(ERROR_ID_REGEX, str(e)).group(1) # type: ignore
captured_stderr = capsys.readouterr().err
assert (
f"Error occurred during user code execution, error ID {error_id}" in captured_stderr
)
# assert "Search in logs for this error ID for more details" not in captured_stderr TODO: fix this
def test_config_mapping_error(enable_masking_user_code_errors, caplog) -> None:
class DoSomethingConfig(dg.Config):
config_param: str
@dg.op
def do_something(config: DoSomethingConfig) -> str:
return config.config_param
class ConfigMappingConfig(dg.Config):
simplified_param: str
# New, fancy config mapping takes in a Pythonic config object and returns a RunConfig
@dg.config_mapping
def simplified_config(config_in: ConfigMappingConfig) -> dg.RunConfig:
if config_in.simplified_param != "foo":
raise Exception("my password is hunter2")
return dg.RunConfig(
ops={"do_something": DoSomethingConfig(config_param=config_in.simplified_param)}
)
@dg.job(config=simplified_config)
def do_it_all_with_simplified_config() -> None:
do_something()
result = do_it_all_with_simplified_config.execute_in_process(
raise_on_error=False, run_config={"simplified_param": "foo"}
)
assert result.success
err_info = None
try:
result = do_it_all_with_simplified_config.execute_in_process(
raise_on_error=False, run_config={"simplified_param": "bar"}
)
except Exception:
# serialize, as in get_external_execution_plan_snapshot (which wraps config mapping execution)
err_info = serializable_error_info_from_exc_info(sys.exc_info())
assert err_info
assert err_info.cls_name == "DagsterRedactedUserCodeError"
assert "hunter2" not in str(err_info.message)
assert "Search in logs for this error ID for more details" in str(err_info.message)
assert any(
"hunter2"
in str(SerializableErrorInfo.from_traceback(traceback.TracebackException(*record.exc_info)))
for record in caplog.records
if record.exc_info
)
| hunter2 |
python | huggingface__transformers | tests/cli/test_serve.py | {
"start": 13939,
"end": 20723
} | class ____(unittest.TestCase):
def test_processor_inputs_from_inbound_messages_llm(self):
modality = Modality.LLM
messages = expected_outputs = [
{"role": "user", "content": "How are you doing?"},
{"role": "assistant", "content": "I'm doing great, thank you for asking! How can I assist you today?"},
{"role": "user", "content": "Can you help me write tests?"},
]
outputs = Serve.get_processor_inputs_from_inbound_messages(messages, modality)
self.assertListEqual(expected_outputs, outputs)
messages_with_type = [
{"role": "user", "content": [{"type": "text", "text": "How are you doing?"}]},
{
"role": "assistant",
"content": [
{"type": "text", "text": "I'm doing great, thank you for asking! How can I assist you today?"}
],
},
{"role": "user", "content": [{"type": "text", "text": "Can you help me write tests?"}]},
]
outputs = Serve.get_processor_inputs_from_inbound_messages(messages_with_type, modality)
self.assertListEqual(expected_outputs, outputs)
messages_multiple_text = [
{
"role": "user",
"content": [
{"type": "text", "text": "How are you doing?"},
{"type": "text", "text": "I'm doing great, thank you for asking! How can I assist you today?"},
],
},
]
expected_outputs_multiple_text = [
{
"role": "user",
"content": "How are you doing? I'm doing great, thank you for asking! How can I assist you today?",
},
]
outputs = Serve.get_processor_inputs_from_inbound_messages(messages_multiple_text, modality)
self.assertListEqual(expected_outputs_multiple_text, outputs)
def test_processor_inputs_from_inbound_messages_vlm_text_only(self):
modality = Modality.VLM
messages = [
{"role": "user", "content": "How are you doing?"},
{"role": "assistant", "content": "I'm doing great, thank you for asking! How can I assist you today?"},
{"role": "user", "content": "Can you help me write tests?"},
]
expected_outputs = [
{"role": "user", "content": [{"type": "text", "text": "How are you doing?"}]},
{
"role": "assistant",
"content": [
{"type": "text", "text": "I'm doing great, thank you for asking! How can I assist you today?"}
],
},
{"role": "user", "content": [{"type": "text", "text": "Can you help me write tests?"}]},
]
outputs = Serve.get_processor_inputs_from_inbound_messages(messages, modality)
self.assertListEqual(expected_outputs, outputs)
def test_processor_inputs_from_inbound_messages_vlm_text_and_image_in_base_64(self):
modality = Modality.VLM
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "How many pixels are in the image?"},
{
"type": "image_url",
"image_url": {
"url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAASABIAAD/4QBARXhpZgAATU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAABaADAAQAAAABAAAABQAAAAD/7QA4UGhvdG9zaG9wIDMuMAA4QklNBAQAAAAAAAA4QklNBCUAAAAAABDUHYzZjwCyBOmACZjs+EJ+/8AAEQgABQAFAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/bAEMAAQEBAQEBAgEBAgICAgICAwICAgIDBAMDAwMDBAUEBAQEBAQFBQUFBQUFBQYGBgYGBgcHBwcHCAgICAgICAgICP/bAEMBAQEBAgICAwICAwgFBAUICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICP/dAAQAAf/aAAwDAQACEQMRAD8A/v4ooooA/9k="
},
},
],
},
{
"role": "assistant",
"content": "The number of pixels in the image cannot be determined from the provided information.",
},
{"role": "user", "content": "Alright"},
]
expected_outputs = [
{
"role": "user",
"content": [
{"type": "text", "text": "How many pixels are in the image?"},
{"type": "image", "url": "/var/folders/4v/64sxdhsd3gz3r8vhhnyc0mqw0000gn/T/tmp50oyghk6.png"},
],
},
{
"role": "assistant",
"content": [
{
"type": "text",
"text": "The number of pixels in the image cannot be determined from the provided information.",
}
],
},
{"role": "user", "content": [{"type": "text", "text": "Alright"}]},
]
outputs = Serve.get_processor_inputs_from_inbound_messages(messages, modality)
for expected_output, output in zip(expected_outputs, outputs):
expected_output_content = expected_output["content"]
output_content = output["content"]
self.assertEqual(type(expected_output_content), type(output_content))
if isinstance(expected_output_content, list):
for expected_output_content_item, output_content_item in zip(expected_output_content, output_content):
self.assertIn("type", expected_output_content_item)
self.assertIn("type", output_content_item)
self.assertTrue(expected_output_content_item["type"] == output_content_item["type"])
if expected_output_content_item["type"] == "text":
self.assertEqual(expected_output_content_item["text"], output_content_item["text"])
if expected_output_content_item["type"] == "image":
self.assertTrue(os.path.exists(output_content_item["url"]))
else:
raise ValueError("VLMs should only receive content as lists.")
@slow # server startup time is slow on our push CI
@require_openai
| ServeCompletionsGenerateMockTests |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 20639,
"end": 20810
} | class ____(str, Enum):
NAV = "nav"
DAG = "dag"
DAG_RUN = "dag_run"
TASK = "task"
TASK_INSTANCE = "task_instance"
DASHBOARD = "dashboard"
| Destination1 |
python | justquick__django-activity-stream | actstream/drf/views.py | {
"start": 1044,
"end": 2038
} | class ____(viewsets.ReadOnlyModelViewSet):
def get_permissions(self):
if isinstance(DRF_SETTINGS['PERMISSIONS'], (tuple, list)):
return [import_obj(permission)() for permission in DRF_SETTINGS['PERMISSIONS']]
if isinstance(DRF_SETTINGS['PERMISSIONS'], dict):
lookup = {key.lower(): value for key, value in DRF_SETTINGS['PERMISSIONS'].items()}
serializer = self.get_serializer()
if hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
model_label = label(serializer.Meta.model).lower()
if model_label in lookup:
permissions = lookup[model_label]
if isinstance(permissions, str):
permissions = [import_obj(permissions)()]
else:
permissions = [import_obj(permission)() for permission in permissions]
return permissions
return []
| DefaultModelViewSet |
python | django__django | django/core/exceptions.py | {
"start": 1361,
"end": 1536
} | class ____(SuspiciousOperation):
"""
The number of fields in a GET or POST request exceeded
settings.DATA_UPLOAD_MAX_NUMBER_FILES.
"""
pass
| TooManyFilesSent |
python | pytorch__pytorch | test/distributed/fsdp/test_wrap.py | {
"start": 1591,
"end": 1890
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(10, 10, bias=False)
self.bn1 = nn.BatchNorm1d(10)
self.bn2 = nn.BatchNorm2d(10)
self.bn3 = nn.BatchNorm3d(10)
self.sync_bn = nn.SyncBatchNorm(10)
| BatchNormNet |
python | doocs__leetcode | solution/0500-0599/0594.Longest Harmonious Subsequence/Solution.py | {
"start": 0,
"end": 177
} | class ____:
def findLHS(self, nums: List[int]) -> int:
cnt = Counter(nums)
return max((c + cnt[x + 1] for x, c in cnt.items() if cnt[x + 1]), default=0)
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/owners.py | {
"start": 443,
"end": 1035
} | class ____(graphene.Union):
class Meta:
types = (
GrapheneUserDefinitionOwner,
GrapheneTeamDefinitionOwner,
)
name = "DefinitionOwner"
def definition_owner_from_owner_str(
owner_str: str,
) -> Union[GrapheneUserDefinitionOwner, GrapheneTeamDefinitionOwner]:
if is_valid_email(owner_str):
return GrapheneUserDefinitionOwner(email=owner_str)
else:
invariant(owner_str.startswith("team:"))
return GrapheneTeamDefinitionOwner(team=owner_str[5:])
# legacy classes for backcompatibility
| GrapheneDefinitionOwner |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_returned.py | {
"start": 424,
"end": 524
} | class ____(type):
def __len__(cls):
return 1
@six.add_metaclass(LenMetaclass)
| LenMetaclass |
python | pytorch__pytorch | test/test_cpp_extensions_aot.py | {
"start": 7034,
"end": 10719
} | class ____(common.TestCase):
"""Pybind tests for ahead-of-time cpp extensions
These tests verify the types returned from cpp code using custom type
casters. By exercising pybind, we also verify that the type casters work
properly.
For each type caster in `torch/csrc/utils/pybind.h` we create a pybind
function that takes no arguments and returns the type_caster type. The
second argument to `PYBIND11_TYPE_CASTER` should be the type we expect to
receive in python, in these tests we verify this at run-time.
"""
@staticmethod
def expected_return_type(func):
"""
Our Pybind functions have a signature of the form `() -> return_type`.
"""
# Imports needed for the `eval` below.
from typing import List, Tuple # noqa: F401, UP035
return eval(re.search("-> (.*)\n", func.__doc__).group(1))
def check(self, func):
val = func()
expected = self.expected_return_type(func)
origin = get_origin(expected)
if origin is list:
self.check_list(val, expected)
elif origin is tuple:
self.check_tuple(val, expected)
else:
self.assertIsInstance(val, expected)
def check_list(self, vals, expected):
self.assertIsInstance(vals, list)
list_type = get_args(expected)[0]
for val in vals:
self.assertIsInstance(val, list_type)
def check_tuple(self, vals, expected):
self.assertIsInstance(vals, tuple)
tuple_types = get_args(expected)
if tuple_types[1] is ...:
tuple_types = repeat(tuple_types[0])
for val, tuple_type in zip(vals, tuple_types):
self.assertIsInstance(val, tuple_type)
def check_union(self, funcs):
"""Special handling for Union type casters.
A single cpp type can sometimes be cast to different types in python.
In these cases we expect to get exactly one function per python type.
"""
# Verify that all functions have the same return type.
union_type = {self.expected_return_type(f) for f in funcs}
assert len(union_type) == 1
union_type = union_type.pop()
self.assertIs(Union, get_origin(union_type))
# SymInt is inconvenient to test, so don't require it
expected_types = set(get_args(union_type)) - {torch.SymInt}
for func in funcs:
val = func()
for tp in expected_types:
if isinstance(val, tp):
expected_types.remove(tp)
break
else:
raise AssertionError(f"{val} is not an instance of {expected_types}")
self.assertFalse(
expected_types, f"Missing functions for types {expected_types}"
)
def test_pybind_return_types(self):
functions = [
cpp_extension.get_complex,
cpp_extension.get_device,
cpp_extension.get_generator,
cpp_extension.get_intarrayref,
cpp_extension.get_memory_format,
cpp_extension.get_storage,
cpp_extension.get_symfloat,
cpp_extension.get_symintarrayref,
cpp_extension.get_tensor,
]
union_functions = [
[cpp_extension.get_symint],
]
for func in functions:
with self.subTest(msg=f"check {func.__name__}"):
self.check(func)
for funcs in union_functions:
with self.subTest(msg=f"check {[f.__name__ for f in funcs]}"):
self.check_union(funcs)
@torch.testing._internal.common_utils.markDynamoStrictTest
| TestPybindTypeCasters |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 10392,
"end": 11817
} | class ____(object):
def __init__(self, path: str) -> None:
self.path = path
def evaluate(self, base_cls: type) -> type:
module, type_name = _evaluate_path(self.path, base_cls)
return _import(module, type_name)
def _evaluate_path(relative_path: str, base_cls: type) -> Tuple[str, str]:
base_module = base_cls.__module__
modules = _get_modules(relative_path, base_module)
type_name = modules.pop()
module = ".".join(modules)
if not module:
module = base_module
return module, type_name
def _get_modules(relative_path: str, base_module: str) -> List[str]:
canonical_path = relative_path.lstrip(".")
canonical_modules = canonical_path.split(".")
if not relative_path.startswith("."):
return canonical_modules
parents_amount = len(relative_path) - len(canonical_path)
parent_modules = base_module.split(".")
parents_amount = max(0, parents_amount - 1)
if parents_amount > len(parent_modules):
raise ValueError("Can't evaluate path '{}'".format(relative_path))
return parent_modules[: parents_amount * -1] + canonical_modules
def _import(module_name: str, type_name: str) -> Any:
module = __import__(module_name, fromlist=[type_name])
try:
return getattr(module, type_name)
except AttributeError:
raise ValueError("Can't find type '{}.{}'.".format(module_name, type_name))
| _LazyType |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_thread_item_list.py | {
"start": 2702,
"end": 3538
} | class ____(BaseModel):
id: str
"""Identifier of the thread item."""
created_at: int
"""Unix timestamp (in seconds) for when the item was created."""
object: Literal["chatkit.thread_item"]
"""Type discriminator that is always `chatkit.thread_item`."""
tasks: List[DataChatKitTaskGroupTask]
"""Tasks included in the group."""
thread_id: str
"""Identifier of the parent thread."""
type: Literal["chatkit.task_group"]
"""Type discriminator that is always `chatkit.task_group`."""
Data: TypeAlias = Annotated[
Union[
ChatKitThreadUserMessageItem,
ChatKitThreadAssistantMessageItem,
ChatKitWidgetItem,
DataChatKitClientToolCall,
DataChatKitTask,
DataChatKitTaskGroup,
],
PropertyInfo(discriminator="type"),
]
| DataChatKitTaskGroup |
python | scikit-learn__scikit-learn | sklearn/metrics/_plot/det_curve.py | {
"start": 281,
"end": 13298
} | class ____(_BinaryClassifierCurveDisplayMixin):
"""Detection Error Tradeoff (DET) curve visualization.
It is recommended to use :func:`~sklearn.metrics.DetCurveDisplay.from_estimator`
or :func:`~sklearn.metrics.DetCurveDisplay.from_predictions` to create a
visualizer. All parameters are stored as attributes.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <det_curve>`.
.. versionadded:: 0.24
Parameters
----------
fpr : ndarray
False positive rate.
fnr : ndarray
False negative rate.
estimator_name : str, default=None
Name of estimator. If None, the estimator name is not shown.
pos_label : int, float, bool or str, default=None
The label of the positive class. If not `None`, this value is displayed in
the x- and y-axes labels.
Attributes
----------
line_ : matplotlib Artist
DET Curve.
ax_ : matplotlib Axes
Axes with DET Curve.
figure_ : matplotlib Figure
Figure containing the curve.
See Also
--------
det_curve : Compute error rates for different probability thresholds.
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import det_curve, DetCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(n_samples=1000, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.4, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> y_score = clf.decision_function(X_test)
>>> fpr, fnr, _ = det_curve(y_test, y_score)
>>> display = DetCurveDisplay(
... fpr=fpr, fnr=fnr, estimator_name="SVC"
... )
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, fpr, fnr, estimator_name=None, pos_label=None):
self.fpr = fpr
self.fnr = fnr
self.estimator_name = estimator_name
self.pos_label = pos_label
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
sample_weight=None,
drop_intermediate=True,
response_method="auto",
pos_label=None,
name=None,
ax=None,
**kwargs,
):
"""Plot DET curve given an estimator and data.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <det_curve>`.
.. versionadded:: 1.0
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop thresholds where true positives (tp) do not change
from the previous or subsequent threshold. All points with the same
tp value have the same `fnr` and thus same y coordinate.
.. versionadded:: 1.7
response_method : {'predict_proba', 'decision_function', 'auto'} \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the predicted target response. If set
to 'auto', :term:`predict_proba` is tried first and if it does not
exist :term:`decision_function` is tried next.
pos_label : int, float, bool or str, default=None
The label of the positive class. By default, `estimators.classes_[1]`
is considered as the positive class.
name : str, default=None
Name of DET curve for labeling. If `None`, use the name of the
estimator.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
det_curve : Compute error rates for different probability thresholds.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import DetCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(n_samples=1000, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.4, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> DetCurveDisplay.from_estimator(
... clf, X_test, y_test)
<...>
>>> plt.show()
"""
y_score, pos_label, name = cls._validate_and_get_response_values(
estimator,
X,
y,
response_method=response_method,
pos_label=pos_label,
name=name,
)
return cls.from_predictions(
y_true=y,
y_score=y_score,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
name=name,
ax=ax,
pos_label=pos_label,
**kwargs,
)
@classmethod
def from_predictions(
cls,
y_true,
y_score=None,
*,
sample_weight=None,
drop_intermediate=True,
pos_label=None,
name=None,
ax=None,
y_pred="deprecated",
**kwargs,
):
"""Plot the DET curve given the true and predicted labels.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Model Evaluation Guide <det_curve>`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by `decision_function` on some classifiers).
.. versionadded:: 1.8
`y_pred` has been renamed to `y_score`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop thresholds where true positives (tp) do not change
from the previous or subsequent threshold. All points with the same
tp value have the same `fnr` and thus same y coordinate.
.. versionadded:: 1.7
pos_label : int, float, bool or str, default=None
The label of the positive class. When `pos_label=None`, if `y_true`
is in {-1, 1} or {0, 1}, `pos_label` is set to 1, otherwise an
error will be raised.
name : str, default=None
Name of DET curve for labeling. If `None`, name will be set to
`"Classifier"`.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
y_pred : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by “decision_function” on some classifiers).
.. deprecated:: 1.8
`y_pred` is deprecated and will be removed in 1.10. Use
`y_score` instead.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
See Also
--------
det_curve : Compute error rates for different probability thresholds.
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.metrics import DetCurveDisplay
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.svm import SVC
>>> X, y = make_classification(n_samples=1000, random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.4, random_state=0)
>>> clf = SVC(random_state=0).fit(X_train, y_train)
>>> y_score = clf.decision_function(X_test)
>>> DetCurveDisplay.from_predictions(
... y_test, y_score)
<...>
>>> plt.show()
"""
y_score = _deprecate_y_pred_parameter(y_score, y_pred, "1.8")
pos_label_validated, name = cls._validate_from_predictions_params(
y_true, y_score, sample_weight=sample_weight, pos_label=pos_label, name=name
)
fpr, fnr, _ = det_curve(
y_true,
y_score,
pos_label=pos_label,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
)
viz = cls(
fpr=fpr,
fnr=fnr,
estimator_name=name,
pos_label=pos_label_validated,
)
return viz.plot(ax=ax, name=name, **kwargs)
def plot(self, ax=None, *, name=None, **kwargs):
"""Plot visualization.
Parameters
----------
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name of DET curve for labeling. If `None`, use `estimator_name` if
it is not `None`, otherwise no labeling is shown.
**kwargs : dict
Additional keywords arguments passed to matplotlib `plot` function.
Returns
-------
display : :class:`~sklearn.metrics.DetCurveDisplay`
Object that stores computed values.
"""
self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
line_kwargs = {} if name is None else {"label": name}
line_kwargs.update(**kwargs)
# We have the following bounds:
# sp.stats.norm.ppf(0.0) = -np.inf
# sp.stats.norm.ppf(1.0) = np.inf
# We therefore clip to eps and 1 - eps to not provide infinity to matplotlib.
eps = np.finfo(self.fpr.dtype).eps
self.fpr = self.fpr.clip(eps, 1 - eps)
self.fnr = self.fnr.clip(eps, 1 - eps)
(self.line_,) = self.ax_.plot(
sp.stats.norm.ppf(self.fpr),
sp.stats.norm.ppf(self.fnr),
**line_kwargs,
)
info_pos_label = (
f" (Positive label: {self.pos_label})" if self.pos_label is not None else ""
)
xlabel = "False Positive Rate" + info_pos_label
ylabel = "False Negative Rate" + info_pos_label
self.ax_.set(xlabel=xlabel, ylabel=ylabel)
if "label" in line_kwargs:
self.ax_.legend(loc="lower right")
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_locations = sp.stats.norm.ppf(ticks)
tick_labels = [
"{:.0%}".format(s) if (100 * s).is_integer() else "{:.1%}".format(s)
for s in ticks
]
self.ax_.set_xticks(tick_locations)
self.ax_.set_xticklabels(tick_labels)
self.ax_.set_xlim(-3, 3)
self.ax_.set_yticks(tick_locations)
self.ax_.set_yticklabels(tick_labels)
self.ax_.set_ylim(-3, 3)
return self
| DetCurveDisplay |
python | SmileyChris__easy-thumbnails | easy_thumbnails/alias.py | {
"start": 44,
"end": 4000
} | class ____:
"""
A container which stores and retrieves named easy-thumbnail options
dictionaries.
"""
def __init__(self, populate_from_settings=True):
"""
Initialize the Aliases object.
:param populate_from_settings: If ``True`` (default) then populate the
initial aliases from settings. See :meth:`populate_from_settings`.
"""
self._aliases = {}
if populate_from_settings:
self.populate_from_settings()
def populate_from_settings(self):
"""
Populate the aliases from the ``THUMBNAIL_ALIASES`` setting.
"""
settings_aliases = settings.THUMBNAIL_ALIASES
if settings_aliases:
for target, aliases in settings_aliases.items():
target_aliases = self._aliases.setdefault(target, {})
target_aliases.update(aliases)
def set(self, alias, options, target=None):
"""
Add an alias.
:param alias: The name of the alias to add.
:param options: The easy-thumbnails options dictonary for this alias
(should include ``size``).
:param target: A field, model, or app to limit this alias to
(optional).
"""
target = self._coerce_target(target) or ''
target_aliases = self._aliases.setdefault(target, {})
target_aliases[alias] = options
def get(self, alias, target=None):
"""
Get a dictionary of aliased options.
:param alias: The name of the aliased options.
:param target: Get alias for this specific target (optional).
If no matching alias is found, returns ``None``.
"""
for target_part in reversed(list(self._get_targets(target))):
options = self._get(target_part, alias)
if options:
return options
def all(self, target=None, include_global=True):
"""
Get a dictionary of all aliases and their options.
:param target: Include aliases for this specific field, model or app
(optional).
:param include_global: Include all non target-specific aliases
(default ``True``).
For example::
>>> aliases.all(target='my_app.MyModel')
{'small': {'size': (100, 100)}, 'large': {'size': (400, 400)}}
"""
aliases = {}
for target_part in self._get_targets(target, include_global):
aliases.update(self._aliases.get(target_part, {}))
return aliases
def _get(self, target, alias):
"""
Internal method to get a specific alias.
"""
if target not in self._aliases:
return
return self._aliases[target].get(alias)
def _get_targets(self, target, include_global=True):
"""
Internal iterator to split up a complete target into the possible parts
it may match.
For example::
>>> list(aliases._get_targets('my_app.MyModel.somefield'))
['', 'my_app', 'my_app.MyModel', 'my_app.MyModel.somefield']
"""
target = self._coerce_target(target)
if include_global:
yield ''
if not target:
return
target_bits = target.split('.')
for i in range(len(target_bits)):
yield '.'.join(target_bits[:i + 1])
def _coerce_target(self, target):
"""
Internal method to coerce a target to a string.
The assumption is that if it is not ``None`` and not a string, it is
a Django ``FieldFile`` object.
"""
if not target or isinstance(target, str):
return target
if not hasattr(target, 'instance'):
return None
model = target.instance.__class__
return '%s.%s.%s' % (
model._meta.app_label,
model.__name__,
target.field.name,
)
aliases = Aliases()
| Aliases |
python | openai__openai-python | src/openai/_extras/_common.py | {
"start": 312,
"end": 364
} | class ____(OpenAIError):
pass
| MissingDependencyError |
python | astropy__astropy | astropy/visualization/lupton_rgb.py | {
"start": 11805,
"end": 14582
} | class ____(BaseStretch):
r"""
A modified asinh stretch, with some changes to the constants
relative to `~astropy.visualization.AsinhStretch`.
The stretch is given by:
.. math::
& y = {\rm asinh}\left(\frac{Q * x}{stretch}\right) *
\frac{frac}{{\rm asinh}(frac * Q)} \\
& frac = 0.1
Parameters
----------
stretch : float, optional
Linear stretch of the image. ``stretch`` must be greater than 0.
Default is 5.
Q : float, optional
The asinh softening parameter. ``Q`` must be greater than 0.
Default is 8.
Notes
-----
Based on the asinh stretch presented in Lupton et al. 2004
(https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L).
Examples
--------
.. plot::
:show-source-link:
import numpy as np
from astropy.visualization import LuptonAsinhStretch
from matplotlib import pyplot as plt
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(8, 8),
layout='constrained')
ax = ax.ravel()
x = np.linspace(0, 1, 100)
stretches = (0.05, 0.1, 0.2, 0.5, 1, 5, 10)
Qs = (2, 5, 8, 10)
for i, Q in enumerate(Qs):
for st in stretches:
stretch = LuptonAsinhStretch(stretch=st, Q=Q)
label = f'{st=}'
ax[i].plot(x, stretch(x, clip=True), label=label)
ax[i].axis('equal')
ax[i].plot(x, x, ls='dotted', color='k', alpha=0.3)
ax[i].set_xlim(0, 1)
ax[i].set_ylim(0, 1)
ax[i].set_xlabel('Input Value')
ax[i].set_ylabel('Output Value')
ax[i].set_title(f'{stretch.__class__.__name__}, {Q=}')
ax[i].legend(loc='lower right', fontsize=8)
"""
def __init__(self, stretch=5, Q=8):
super().__init__()
if stretch < 0:
raise ValueError(f"Stretch must be non-negative! {stretch=}")
if Q < 0:
raise ValueError(f"Q must be non-negative! {Q=}")
# 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit
epsilon = 1.0 / 2**23
if abs(Q) < epsilon:
Q = 0.1
else:
Qmax = 1e10
if Q > Qmax:
Q = Qmax
self.stretch = stretch
self.Q = Q
frac = 0.1
self._slope = frac / np.arcsinh(frac * Q)
self._soften = Q / float(stretch)
def __call__(self, values, clip=False, out=None):
values = _stretch_prepare(values, clip=clip, out=out)
np.multiply(values, self._soften, out=values)
np.arcsinh(values, out=values)
np.multiply(values, self._slope, out=values)
return values
| LuptonAsinhStretch |
python | huggingface__transformers | src/transformers/models/idefics3/image_processing_idefics3_fast.py | {
"start": 5690,
"end": 22643
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.LANCZOS
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"longest_edge": 4 * 364}
max_image_size = {"longest_edge": 364}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_image_splitting = True
do_pad = True
return_row_col_info = False
valid_kwargs = Idefics3ImageProcessorKwargs
def _prepare_images_structure(self, images: ImageInput, expected_ndims: int = 3) -> ImageInput:
"""
Prepare a nested images structure for processing.
"""
return make_nested_list_of_images(images, expected_ndims=expected_ndims)
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image. The longest edge of the image is resized to size.longest_edge, with the shortest edge
resized to keep the input aspect ratio. Can also be used with size.height and size.width.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing when resizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if interpolation == F.InterpolationMode.LANCZOS:
logger.warning_once(
"You have used fast image processor with LANCZOS resample which not yet supported for torch.Tensor. "
"BICUBIC resample will be used as an alternative. Please fall back to slow image processor if you "
"want full consistency with the original model."
)
interpolation = F.InterpolationMode.BICUBIC
if size.longest_edge:
size = get_resize_output_image_size(image, resolution_max_side=size.longest_edge)
elif size.height and size.width:
size = (size.height, size.width)
else:
raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.")
return F.resize(image, size, interpolation=interpolation, antialias=antialias)
def split_images(
self,
images: torch.Tensor,
max_image_size: dict[str, int],
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Split an image into squares of side max_image_size and the original image resized to max_image_size.
That means that a single image becomes a sequence of images.
This is a "trick" to spend more compute on each image with no changes in the vision encoder.
1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio.
2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)`
sub-images of the same size each (image_size, image_size). Typically, 364x364.
3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width.
Args:
images (`torch.Tensor`):
Images to split.
max_image_size (`Dict[str, int]`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
"""
batch_size, num_channels, height, width = images.size()
height_dim, width_dim = 2, 3
max_height = max_width = max_image_size["longest_edge"]
frames = []
if height > max_height or width > max_width:
# Calculate the number of splits
num_splits_h = math.ceil(height / max_height)
num_splits_w = math.ceil(width / max_width)
# Split the images by height, then by width
frames = (
images.unfold(height_dim, size=max_height, step=max_height)
.unfold(width_dim, size=max_width, step=max_width)
.contiguous()
.view(batch_size, num_channels, -1, max_height, max_width)
.permute(0, 2, 1, 3, 4)
) # batch_size x n_frames x num_channels x height x width
# For the global image at the end, we resize it to match the max_image_size, for cpu memory efficiency
global_image_height, global_image_width = max_height, max_width
images = self.resize(
images, SizeDict(height=global_image_height, width=global_image_width), interpolation=interpolation
)
frames = torch.cat((frames, images.unsqueeze(1)), dim=1)
else:
num_splits_h, num_splits_w = 0, 0
frames = images.unsqueeze(1)
num_splits_h = [num_splits_h] * batch_size
num_splits_w = [num_splits_w] * batch_size
return frames, num_splits_h, num_splits_w
def resize_for_vision_encoder(
self,
image: torch.Tensor,
vision_encoder_max_size: int,
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
Args:
image (`torch.Tensor`):
Images to resize.
vision_encoder_max_size (`int`):
Maximum size of the output image. If the image is larger than this size, it will be split into
patches of this size, and the original image will be concatenated with the patches, resized to max_size.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
"""
height, width = image.size()[-2:]
aspect_ratio = width / height
if width >= height:
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
height = int(width / aspect_ratio)
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
elif height > width:
height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size
width = int(height * aspect_ratio)
width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size
new_size = SizeDict(height=height, width=width)
return self.resize(image, size=new_size, interpolation=interpolation)
def pad(
self,
image: torch.Tensor,
padded_size: tuple[int, int],
fill: int = 0,
return_pixel_mask: bool = True,
):
original_size = image.shape[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
# Only pad if necessary
if original_size != padded_size:
padding = (0, 0, padding_right, padding_bottom)
image = F.pad(image, padding, fill=fill, padding_mode="constant")
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = None
if return_pixel_mask:
pixel_mask = torch.zeros_like(image[..., 0, :, :], dtype=torch.int64)
pixel_mask[: original_size[0], : original_size[1]] = 1
return image, pixel_mask
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Idefics3ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list[list["torch.Tensor"]],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: Optional[bool],
do_image_splitting: Optional[bool],
max_image_size: Optional[dict[str, int]],
return_row_col_info: Optional[bool],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
"""
Process a batch of images for the model.
"""
grouped_images, grouped_images_index = group_images_by_shape(
images, is_nested=True, disable_grouping=disable_grouping
)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index, is_nested=True)
grouped_images, grouped_images_index = group_images_by_shape(
resized_images, is_nested=True, disable_grouping=disable_grouping
)
split_images_grouped = {}
if do_image_splitting:
rows_grouped = {}
cols_grouped = {}
for shape, stacked_images in grouped_images.items():
stacked_images = self.resize_for_vision_encoder(
stacked_images, max_image_size["longest_edge"], interpolation=interpolation
)
stacked_images, rows, cols = self.split_images(
stacked_images, max_image_size=max_image_size, interpolation=interpolation
)
split_images_grouped[shape] = stacked_images
rows_grouped[shape] = rows
cols_grouped[shape] = cols
processed_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True)
rows = reorder_images(rows_grouped, grouped_images_index, is_nested=True)
cols = reorder_images(cols_grouped, grouped_images_index, is_nested=True)
# flattenened the doubly nested list to a nested list
for i, group_images in enumerate(processed_images):
processed_images[i] = [image for sublist in group_images for image in sublist]
else:
for shape, stacked_images in grouped_images.items():
# We square the images to max_image_size
stacked_images = self.resize(
image=stacked_images,
size=SizeDict(height=max_image_size["longest_edge"], width=max_image_size["longest_edge"]),
interpolation=interpolation,
)
split_images_grouped[shape] = stacked_images
processed_images = reorder_images(split_images_grouped, grouped_images_index, is_nested=True)
rows = [[0] * len(images) for images in processed_images]
cols = [[0] * len(images) for images in processed_images]
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(
processed_images, is_nested=True, disable_grouping=disable_grouping
)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=True)
if do_pad:
# Get max images per batch
max_num_images = max(len(images_) for images_ in processed_images)
max_height, max_width = get_max_height_width(processed_images)
processed_images_padded = torch.zeros(
len(processed_images),
max_num_images,
*(processed_images[0][0].shape[0], max_height, max_width),
device=processed_images[0][0].device,
)
pixel_attention_masks = torch.zeros(
len(processed_images),
max_num_images,
*(max_height, max_width),
device=processed_images[0][0].device,
)
for i, images in enumerate(processed_images):
for j, image in enumerate(images):
processed_images_padded[i, j], pixel_attention_masks[i, j] = self.pad(
image, (max_height, max_width)
)
processed_images = processed_images_padded
if do_pad:
data = {"pixel_values": processed_images, "pixel_attention_mask": pixel_attention_masks}
elif return_tensors == "pt":
data = {"pixel_values": torch.stack([torch.stack(images) for images in processed_images])}
else:
data = {"pixel_values": processed_images}
# This is needed for generating correct text inputs in the processor - we don't pad to the max number of images
encoding = BatchFeature(data=data, tensor_type=return_tensors)
if return_row_col_info:
encoding["rows"] = rows
encoding["cols"] = cols
return encoding
def to_dict(self):
encoder_dict = super().to_dict()
encoder_dict.pop("_valid_processor_keys", None)
encoder_dict.pop("return_row_col_info", None)
return encoder_dict
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
do_image_splitting = images_kwargs.get("do_image_splitting", self.do_image_splitting)
max_image_size = images_kwargs.get("max_image_size", self.max_image_size)
size = images_kwargs.get("size", self.size)
num_patches = num_rows = num_cols = 1
if do_image_splitting:
height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size["longest_edge"])
height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE)
aspect_ratio = width / height
if width >= height:
resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
resized_height = int(width / aspect_ratio)
resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
elif height > width:
resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
resized_width = int(height * aspect_ratio)
resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"]
max_height = max_width = max_image_size["longest_edge"]
if resized_height > max_height or resized_width > max_width:
# Calculate the number of splits
num_rows = math.ceil(resized_height / max_height)
num_cols = math.ceil(resized_width / max_width)
num_patches = num_rows * num_cols + 1
return num_patches, num_rows, num_cols
__all__ = ["Idefics3ImageProcessorFast"]
| Idefics3ImageProcessorFast |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classGetItem1.py | {
"start": 456,
"end": 754
} | class ____(Generic[_T, _S]):
# Even though this class has a __class_getitem__ method,
# it will be assumed to follow normal generic class semantics.
def __class_getitem__(cls, args: tuple[int, ...]) -> None: ...
reveal_type(ClassB[int, str], expected_text="type[ClassB[int, str]]")
| ClassB |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_curve.py | {
"start": 11136,
"end": 19043
} | class ____:
@pytest.fixture(autouse=True)
def _tendon_force_length_inverse_arguments_fixture(self):
self.fl_T = Symbol('fl_T')
self.c0 = Symbol('c_0')
self.c1 = Symbol('c_1')
self.c2 = Symbol('c_2')
self.c3 = Symbol('c_3')
self.constants = (self.c0, self.c1, self.c2, self.c3)
@staticmethod
def test_class():
assert issubclass(TendonForceLengthInverseDeGroote2016, Function)
assert issubclass(TendonForceLengthInverseDeGroote2016, CharacteristicCurveFunction)
assert TendonForceLengthInverseDeGroote2016.__name__ == 'TendonForceLengthInverseDeGroote2016'
def test_instance(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
assert isinstance(fl_T_inv, TendonForceLengthInverseDeGroote2016)
assert str(fl_T_inv) == 'TendonForceLengthInverseDeGroote2016(fl_T, c_0, c_1, c_2, c_3)'
def test_doit(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants).doit()
assert fl_T_inv == log((self.fl_T + self.c2)/self.c0)/self.c3 + self.c1
def test_doit_evaluate_false(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants).doit(evaluate=False)
assert fl_T_inv == log(UnevaluatedExpr((self.fl_T + self.c2)/self.c0))/self.c3 + self.c1
def test_with_defaults(self):
constants = (
Float('0.2'),
Float('0.995'),
Float('0.25'),
Float('33.93669377311689'),
)
fl_T_inv_manual = TendonForceLengthInverseDeGroote2016(self.fl_T, *constants)
fl_T_inv_constants = TendonForceLengthInverseDeGroote2016.with_defaults(self.fl_T)
assert fl_T_inv_manual == fl_T_inv_constants
def test_differentiate_wrt_fl_T(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = 1/(self.c3*(self.fl_T + self.c2))
assert fl_T_inv.diff(self.fl_T) == expected
def test_differentiate_wrt_c0(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = -1/(self.c0*self.c3)
assert fl_T_inv.diff(self.c0) == expected
def test_differentiate_wrt_c1(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = Integer(1)
assert fl_T_inv.diff(self.c1) == expected
def test_differentiate_wrt_c2(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = 1/(self.c3*(self.fl_T + self.c2))
assert fl_T_inv.diff(self.c2) == expected
def test_differentiate_wrt_c3(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = -log(UnevaluatedExpr((self.fl_T + self.c2)/self.c0))/self.c3**2
assert fl_T_inv.diff(self.c3) == expected
def test_inverse(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
assert fl_T_inv.inverse() is TendonForceLengthDeGroote2016
def test_function_print_latex(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = r'\left( \operatorname{fl}^T \right)^{-1} \left( fl_{T} \right)'
assert LatexPrinter().doprint(fl_T_inv) == expected
def test_expression_print_latex(self):
fl_T = TendonForceLengthInverseDeGroote2016(self.fl_T, *self.constants)
expected = r'c_{1} + \frac{\log{\left(\frac{c_{2} + fl_{T}}{c_{0}} \right)}}{c_{3}}'
assert LatexPrinter().doprint(fl_T.doit()) == expected
@pytest.mark.parametrize(
'code_printer, expected',
[
(
C89CodePrinter,
'(0.995 + 0.029466630034306838*log(5.0*fl_T + 1.25))',
),
(
C99CodePrinter,
'(0.995 + 0.029466630034306838*log(5.0*fl_T + 1.25))',
),
(
C11CodePrinter,
'(0.995 + 0.029466630034306838*log(5.0*fl_T + 1.25))',
),
(
CXX98CodePrinter,
'(0.995 + 0.029466630034306838*log(5.0*fl_T + 1.25))',
),
(
CXX11CodePrinter,
'(0.995 + 0.029466630034306838*std::log(5.0*fl_T + 1.25))',
),
(
CXX17CodePrinter,
'(0.995 + 0.029466630034306838*std::log(5.0*fl_T + 1.25))',
),
(
FCodePrinter,
' (0.995d0 + 0.02946663003430684d0*log(5.0d0*fl_T + 1.25d0))',
),
(
OctaveCodePrinter,
'(0.995 + 0.02946663003430684*log(5.0*fl_T + 1.25))',
),
(
PythonCodePrinter,
'(0.995 + 0.02946663003430684*math.log(5.0*fl_T + 1.25))',
),
(
NumPyPrinter,
'(0.995 + 0.02946663003430684*numpy.log(5.0*fl_T + 1.25))',
),
(
SciPyPrinter,
'(0.995 + 0.02946663003430684*numpy.log(5.0*fl_T + 1.25))',
),
(
CuPyPrinter,
'(0.995 + 0.02946663003430684*cupy.log(5.0*fl_T + 1.25))',
),
(
JaxPrinter,
'(0.995 + 0.02946663003430684*jax.numpy.log(5.0*fl_T + 1.25))',
),
(
MpmathPrinter,
'(mpmath.mpf((0, 8962163258467287, -53, 53))'
' + mpmath.mpf((0, 33972711434846347, -60, 55))'
'*mpmath.log(mpmath.mpf((0, 5, 0, 3))*fl_T + mpmath.mpf((0, 5, -2, 3))))',
),
(
LambdaPrinter,
'(0.995 + 0.02946663003430684*math.log(5.0*fl_T + 1.25))',
),
]
)
def test_print_code(self, code_printer, expected):
fl_T_inv = TendonForceLengthInverseDeGroote2016.with_defaults(self.fl_T)
assert code_printer().doprint(fl_T_inv) == expected
def test_derivative_print_code(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016.with_defaults(self.fl_T)
dfl_T_inv_dfl_T = fl_T_inv.diff(self.fl_T)
expected = '1/(33.93669377311689*fl_T + 8.484173443279222)'
assert PythonCodePrinter().doprint(dfl_T_inv_dfl_T) == expected
def test_lambdify(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016.with_defaults(self.fl_T)
fl_T_inv_callable = lambdify(self.fl_T, fl_T_inv)
assert fl_T_inv_callable(0.0) == pytest.approx(1.0015752885)
@pytest.mark.skipif(numpy is None, reason='NumPy not installed')
def test_lambdify_numpy(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016.with_defaults(self.fl_T)
fl_T_inv_callable = lambdify(self.fl_T, fl_T_inv, 'numpy')
fl_T = numpy.array([-0.2, -0.01, 0.0, 1.01, 1.02, 1.05])
expected = numpy.array([
0.9541505769,
1.0003724019,
1.0015752885,
1.0492347951,
1.0494677341,
1.0501557022,
])
numpy.testing.assert_allclose(fl_T_inv_callable(fl_T), expected)
@pytest.mark.skipif(jax is None, reason='JAX not installed')
def test_lambdify_jax(self):
fl_T_inv = TendonForceLengthInverseDeGroote2016.with_defaults(self.fl_T)
fl_T_inv_callable = jax.jit(lambdify(self.fl_T, fl_T_inv, 'jax'))
fl_T = jax.numpy.array([-0.2, -0.01, 0.0, 1.01, 1.02, 1.05])
expected = jax.numpy.array([
0.9541505769,
1.0003724019,
1.0015752885,
1.0492347951,
1.0494677341,
1.0501557022,
])
numpy.testing.assert_allclose(fl_T_inv_callable(fl_T), expected)
| TestTendonForceLengthInverseDeGroote2016 |
python | pypa__pip | src/pip/_vendor/rich/progress.py | {
"start": 31745,
"end": 32176
} | class ____(ProgressColumn):
"""Renders human readable transfer speed."""
def render(self, task: "Task") -> Text:
"""Show data transfer speed."""
speed = task.finished_speed or task.speed
if speed is None:
return Text("?", style="progress.data.speed")
data_speed = filesize.decimal(int(speed))
return Text(f"{data_speed}/s", style="progress.data.speed")
| TransferSpeedColumn |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/rtf/base.py | {
"start": 216,
"end": 1093
} | class ____(BaseReader):
"""RTF (Rich Text Format) Reader. Reads rtf file and convert to Document."""
def load_data(
self,
input_file: Union[Path, str],
extra_info: Optional[Dict[str, Any]] = None,
**load_kwargs: Any,
) -> List[Document]:
"""
Load data from RTF file.
Args:
input_file (Path | str): Path for the RTF file.
extra_info (Dict[str, Any]): Path for the RTF file.
Returns:
List[Document]: List of documents.
"""
try:
from striprtf.striprtf import rtf_to_text
except ImportError:
raise ImportError("striprtf is required to read RTF files.")
with open(str(input_file)) as f:
text = rtf_to_text(f.read())
return [Document(text=text.strip(), metadata=extra_info or {})]
| RTFReader |
python | getsentry__sentry | src/sentry/interfaces/contexts.py | {
"start": 6978,
"end": 7081
} | class ____(ContextType):
type = "trace"
context_to_tag_mapping = {}
@contexttype
| TraceContextType |
python | huggingface__transformers | tests/models/dinov2/test_modeling_dinov2.py | {
"start": 10819,
"end": 11907
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("facebook/dinov2-base") if is_vision_available() else None
@slow
def test_inference_no_head(self):
model = Dinov2Model.from_pretrained("facebook/dinov2-base").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
expected_shape = torch.Size((1, 257, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-2.2005, -0.4495, 1.0964], [-3.3959, -0.8942, -1.0315], [-2.9355, 1.1564, -0.7656]],
device=torch_device,
)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
@require_torch
| Dinov2ModelIntegrationTest |
python | lazyprogrammer__machine_learning_examples | svm_class/fake_neural_net.py | {
"start": 808,
"end": 4151
} | class ____:
def __init__(self, gamma=1.0, n_components=100, method='random'):
self.M = n_components
self.gamma = gamma
assert(method in ('normal', 'random', 'kmeans', 'gmm'))
self.method = method
def _subsample_data(self, X, Y, n=10000):
if Y is not None:
X, Y = shuffle(X, Y)
return X[:n], Y[:n]
else:
X = shuffle(X)
return X[:n]
def fit(self, X, Y=None):
if self.method == 'random':
N = len(X)
idx = np.random.randint(N, size=self.M)
self.samples = X[idx]
elif self.method == 'normal':
# just sample from N(0,1)
D = X.shape[1]
self.samples = np.random.randn(self.M, D) / np.sqrt(D)
elif self.method == 'kmeans':
X, Y = self._subsample_data(X, Y)
print("Fitting kmeans...")
t0 = datetime.now()
kmeans = KMeans(n_clusters=len(set(Y)))
kmeans.fit(X)
print("Finished fitting kmeans, duration:", datetime.now() - t0)
# calculate the most ambiguous points
# we will do this by finding the distance between each point
# and all cluster centers
# and return which points have the smallest variance
dists = kmeans.transform(X) # returns an N x K matrix
variances = dists.var(axis=1)
idx = np.argsort(variances) # smallest to largest
idx = idx[:self.M]
self.samples = X[idx]
elif self.method == 'gmm':
X, Y = self._subsample_data(X, Y)
print("Fitting GMM")
t0 = datetime.now()
gmm = GaussianMixture(
n_components=len(set(Y)),
covariance_type='spherical',
reg_covar=1e-6)
gmm.fit(X)
print("Finished fitting GMM, duration:", datetime.now() - t0)
# calculate the most ambiguous points
probs = gmm.predict_proba(X)
ent = stats.entropy(probs.T) # N-length vector of entropies
idx = np.argsort(-ent) # negate since we want biggest first
idx = idx[:self.M]
self.samples = X[idx]
return self
def transform(self, X):
Z = X.dot(self.samples.T) # (Ntest x D) x (D x Nsamples) -> (Ntest x Nsamples)
return np.tanh(self.gamma * Z)
# return self.gamma * Z * (Z > 0)
def fit_transform(self, X, Y=None):
return self.fit(X, Y).transform(X)
# get the data: https://www.kaggle.com/c/digit-recognizer
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
# with SGD
pipeline = Pipeline([
('scaler', StandardScaler()),
('sigmoid', SigmoidFeaturizer(gamma=0.05, n_components=2000, method='normal')),
# ('linear', SGDClassifier(max_iter=1e6, tol=1e-5))
('linear', LogisticRegression()) # takes longer
])
# with Linear SVC
# n_components = 3000
# pipeline = Pipeline([
# ('scaler', StandardScaler()),
# ('sigmoid', SigmoidFeaturizer(n_components=n_components)),
# ('linear', LinearSVC())
# ])
# let's do some cross-validation instead, why not
X = np.vstack((Xtrain, Xtest))
Y = np.concatenate((Ytrain, Ytest))
scores = cross_val_score(pipeline, X, Y, cv=5)
print(scores)
print("avg:", np.mean(scores))
# t0 = datetime.now()
# pipeline.fit(Xtrain, Ytrain)
# print("train duration:", datetime.now() - t0)
# t0 = datetime.now()
# print("train score:", pipeline.score(Xtrain, Ytrain), "duration:", datetime.now() - t0)
# t0 = datetime.now()
# print("test score:", pipeline.score(Xtest, Ytest), "duration:", datetime.now() - t0)
| SigmoidFeaturizer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py | {
"start": 7703,
"end": 18107
} | class ____(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
return np.sum(x, axis=reduction_axes, keepdims=keepdims)
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_sum([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float16)
self._compareAllAxes(np_arr)
# test that mean doesn't overflow
# only on GPU, since it has the more accurate implementation
if not test.is_gpu_available():
return
arr = np.ones([68000], dtype=np.float16)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_arr = variables.Variable(arr)
self.evaluate(variables.global_variables_initializer())
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
tf_out_mean = self.evaluate(tf_mean)
self.assertAllClose(tf_out_mean, 1.)
@test_util.run_deprecated_v1
def testBfloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.bfloat16)
self._compareAllAxes(np_arr, rtol=1e-3, atol=5.)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2**np.random.uniform(0, 15))
size_y = int(2**np.random.uniform(0, 15))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=np.float32)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
for size_x in [1, 3, 16, 33]:
for size_y in [1, 3, 16, 33]:
for size_z in [1, 3, 16, 33]:
arr = np.ones([size_x, size_y, size_z], dtype=np.float32)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat32BFloat16(self):
for dtype in [dtypes.float32, dtypes.bfloat16]:
dtype_np = np.float32 if dtype == dtypes.float32 else dtype.as_numpy_dtype
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtype)
self._compareAllAxes(np_arr)
for _ in range(10):
size_x = int(2 ** np.random.uniform(0, 7))
size_y = int(2 ** np.random.uniform(0, 7))
if size_x * size_y > 1e7:
size_y = int(1e7 / size_x)
arr = np.ones([size_x, size_y], dtype=dtype_np)
col_sum = np.sum(arr, axis=0)
row_sum = np.sum(arr, axis=1)
tf_row_sum = self._tf_reduce(arr, 1, False)
tf_col_sum = self._tf_reduce(arr, 0, False)
tf_out_row, tf_out_col = self.evaluate([tf_row_sum, tf_col_sum])
self.assertAllCloseAccordingToType(col_sum, tf_out_col)
self.assertAllCloseAccordingToType(row_sum, tf_out_row)
for size_x in [1, 3, 16]:
for size_y in [1, 3, 16]:
for size_z in [1, 3, 16]:
arr = np.ones([size_x, size_y, size_z], dtype=dtype_np)
sum_y = np.sum(arr, axis=1)
sum_xz = np.sum(arr, axis=(0, 2))
tf_sum_xz = self._tf_reduce(arr, [0, 2], False)
tf_sum_y = self._tf_reduce(arr, 1, False)
tf_out_sum_xz, tf_out_sum_y = self.evaluate([tf_sum_xz, tf_sum_y])
self.assertAllCloseAccordingToType(sum_y, tf_out_sum_y)
self.assertAllCloseAccordingToType(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
math_ops.reduce_sum(input_tensor, [0, 2])
@test_util.run_deprecated_v1
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = array_ops.placeholder(dtypes.float32)
s_unknown = math_ops.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = array_ops.placeholder(dtypes.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(rank=3))
s_known_rank = math_ops.reduce_sum(
c_known_rank, reduction_axes, keepdims=True)
self.assertEqual(3, s_known_rank.get_shape().rank)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = array_ops.placeholder(dtypes.int32)
c_unknown_indices = constant_op.constant([[10.0], [20.0]])
s_unknown_indices = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = math_ops.reduce_sum(
c_unknown_indices, unknown_indices, keepdims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().rank)
@test_util.run_deprecated_v1
def testWrongShapeForReductionIndices(self):
reduction_axes = [[1], [2]]
c_unknown = array_ops.placeholder(dtypes.float32)
with self.assertRaisesWithPredicateMatch(ValueError,
".*must be at most rank 1.*"):
math_ops.reduce_sum(c_unknown, reduction_axes)
def testInvalidRepeatedReductionIndices(self):
reduction_axes = constant_op.constant([0, 0])
c = constant_op.constant([1.0, 2.0])
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
".*Axes contains duplicate dimension: 0.*"):
self.evaluate(math_ops.reduce_sum(c, reduction_axes))
# Int64??
@test_util.run_deprecated_v1
def testGradient(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]:
x = self._makeIncremental([2, 3, 4, 2], dtype)
self._compareGradientAxes(x)
@test_util.run_deprecated_v1
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
@test_util.run_deprecated_v1
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_sum(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session():
for dtype in (dtypes.bfloat16, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y, np.zeros(9938))
| SumReductionTest |
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable_py30.py | {
"start": 1718,
"end": 1799
} | class ____(metaclass=ABCMet): # [undefined-variable]
""" Notice the typo """
| Bad |
python | tiangolo__fastapi | scripts/sponsors.py | {
"start": 1460,
"end": 1528
} | class ____(BaseModel):
data: SponsorsResponseData
| SponsorsResponse |
python | google__pytype | pytype/errors/error_printer.py | {
"start": 9435,
"end": 10024
} | class ____:
"""Pretty printer for attribute errors."""
def __init__(self, pp: pretty_printer_base.PrettyPrinterBase):
self._pp = pp
def print_receiver(self, obj: types.BaseValue, attr_name: str):
if attr_name in slots.SYMBOL_MAPPING:
obj_repr = self._pp.print_type(obj)
return BadAttr(obj_repr, BadAttrType.SYMBOL)
elif isinstance(obj, types.Module):
return BadAttr(obj.name, BadAttrType.MODULE) # pytype: disable=attribute-error
else:
obj_repr = self._pp.print_type(obj)
return BadAttr(obj_repr, BadAttrType.OBJECT)
| AttributeErrorPrinter |
python | huggingface__transformers | tests/models/qwen2_audio/test_modeling_qwen2_audio.py | {
"start": 8037,
"end": 16271
} | class ____(unittest.TestCase):
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct")
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_small_model_integration_test_single(self):
# Let' s make sure we test the preprocessing to replace what is used
model = Qwen2AudioForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-Audio-7B-Instruct", device_map=torch_device, dtype=torch.float16
)
url = "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3"
messages = [
{
"role": "user",
"content": [
{"type": "audio", "audio_url": url},
{"type": "text", "text": "What's that sound?"},
],
}
]
raw_audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate)
formatted_prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True)
inputs = self.processor(text=formatted_prompt, audio=[raw_audio], return_tensors="pt", padding=True).to(
torch_device
)
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=32)
# fmt: off
EXPECTED_INPUT_IDS = torch.tensor(
[[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 14755, 220, 16, 25, 220, 151647, *[151646] * 101 , 151648, 198, 3838, 594, 429, 5112, 30, 151645, 198, 151644, 77091, 198]],
device=torch_device
)
# fmt: on
torch.testing.assert_close(inputs["input_ids"], EXPECTED_INPUT_IDS)
# fmt: off
EXPECTED_DECODED_TEXT = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nAudio 1: <|audio_bos|>" + "<|AUDIO|>" * 101 + "<|audio_eos|>\nWhat's that sound?<|im_end|>\n<|im_start|>assistant\nIt is the sound of glass breaking.<|im_end|>"
# fmt: on
self.assertEqual(
self.processor.decode(output[0], skip_special_tokens=False),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_batch(self):
# Let' s make sure we test the preprocessing to replace what is used
model = Qwen2AudioForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-Audio-7B-Instruct", device_map=torch_device, dtype=torch.float16
)
conversation1 = [
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3",
},
{"type": "text", "text": "What's that sound?"},
],
},
{"role": "assistant", "content": "It is the sound of glass shattering."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav",
},
{"type": "text", "text": "What can you hear?"},
],
},
]
conversation2 = [
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/1272-128104-0000.flac",
},
{"type": "text", "text": "What does the person say?"},
],
},
]
conversations = [conversation1, conversation2]
text = [
self.processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
for conversation in conversations
]
audios = []
for conversation in conversations:
for message in conversation:
if isinstance(message["content"], list):
for ele in message["content"]:
if ele["type"] == "audio":
audios.append(
librosa.load(
BytesIO(urlopen(ele["audio_url"]).read()),
sr=self.processor.feature_extractor.sampling_rate,
)[0]
)
inputs = self.processor(text=text, audio=audios, return_tensors="pt", padding=True).to(torch_device)
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=32)
EXPECTED_DECODED_TEXT = [
"system\nYou are a helpful assistant.\nuser\nAudio 1: \nWhat's that sound?\nassistant\nIt is the sound of glass shattering.\nuser\nAudio 2: \nWhat can you hear?\nassistant\ncough and throat clearing.",
"system\nYou are a helpful assistant.\nuser\nAudio 1: \nWhat does the person say?\nassistant\nThe original content of this audio is: 'Mister Quiller is the apostle of the middle classes and we are glad to welcome his gospel.'",
]
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
@slow
def test_small_model_integration_test_multiurn(self):
# Let' s make sure we test the preprocessing to replace what is used
model = Qwen2AudioForConditionalGeneration.from_pretrained(
"Qwen/Qwen2-Audio-7B-Instruct", device_map=torch_device, dtype=torch.float16
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3",
},
{"type": "text", "text": "What's that sound?"},
],
},
{"role": "assistant", "content": "It is the sound of glass shattering."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav",
},
{"type": "text", "text": "How about this one?"},
],
},
]
formatted_prompt = self.processor.apply_chat_template(messages, add_generation_prompt=True)
audios = []
for message in messages:
if isinstance(message["content"], list):
for ele in message["content"]:
if ele["type"] == "audio":
audios.append(
librosa.load(
BytesIO(urlopen(ele["audio_url"]).read()),
sr=self.processor.feature_extractor.sampling_rate,
)[0]
)
inputs = self.processor(text=formatted_prompt, audio=audios, return_tensors="pt", padding=True).to(
torch_device
)
torch.manual_seed(42)
output = model.generate(**inputs, max_new_tokens=32, top_k=1)
EXPECTED_DECODED_TEXT = [
"system\nYou are a helpful assistant.\nuser\nAudio 1: \nWhat's that sound?\nassistant\nIt is the sound of glass shattering.\nuser\nAudio 2: \nHow about this one?\nassistant\nThroat clearing."
]
self.assertEqual(
self.processor.batch_decode(output, skip_special_tokens=True),
EXPECTED_DECODED_TEXT,
)
| Qwen2AudioForConditionalGenerationIntegrationTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table12.py | {
"start": 315,
"end": 999
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
data = [
["Foo", 1234, 2000],
["Bar", 1256, 4000],
["Baz", 2234, 3000],
]
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C2:F6", {"data": data})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scipy__scipy | scipy/sparse/linalg/tests/test_matfuncs.py | {
"start": 2496,
"end": 21072
} | class ____:
def test_zero_ndarray(self):
a = array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_zero_sparse(self):
a = csc_array([[0.,0],[0,0]])
assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]])
def test_zero_matrix(self):
a = matrix([[0.,0],[0,0]])
assert_array_almost_equal(expm(a),[[1,0],[0,1]])
def test_misc_types(self):
A = expm(np.array([[1]]))
assert_allclose(expm(((1,),)), A)
assert_allclose(expm([[1]]), A)
assert_allclose(expm(matrix([[1]])), A)
assert_allclose(expm(np.array([[1]])), A)
assert_allclose(expm(csc_array([[1]])).toarray(), A)
B = expm(np.array([[1j]]))
assert_allclose(expm(((1j,),)), B)
assert_allclose(expm([[1j]]), B)
assert_allclose(expm(matrix([[1j]])), B)
assert_allclose(expm(csc_array([[1j]])).toarray(), B)
def test_bidiagonal_sparse(self):
A = csc_array([
[1, 3, 0],
[0, 1, 5],
[0, 0, 2]], dtype=float)
e1 = math.exp(1)
e2 = math.exp(2)
expected = np.array([
[e1, 3*e1, 15*(e2 - 2*e1)],
[0, e1, 5*(e2 - e1)],
[0, 0, e2]], dtype=float)
observed = expm(A).toarray()
assert_array_almost_equal(observed, expected)
def test_padecases_dtype_float(self):
for dtype in [np.float32, np.float64]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_complex(self):
for dtype in [np.complex64, np.complex128]:
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
A = scale * eye(3, dtype=dtype)
observed = expm(A)
expected = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
assert_array_almost_equal_nulp(observed, expected, nulp=100)
def test_padecases_dtype_sparse_float(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.float64
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * eye_array(3, 3, dtype=dtype, format='csc')
e = exp(scale, dtype=dtype) * eye(3, dtype=dtype)
with warnings.catch_warnings():
msg = "Changing the sparsity structure"
warnings.filterwarnings("ignore", msg, SparseEfficiencyWarning)
exact_onenorm = _expm(a, use_exact_onenorm=True).toarray()
inexact_onenorm = _expm(a, use_exact_onenorm=False).toarray()
assert_array_almost_equal_nulp(exact_onenorm, e, nulp=100)
assert_array_almost_equal_nulp(inexact_onenorm, e, nulp=100)
def test_padecases_dtype_sparse_complex(self):
# float32 and complex64 lead to errors in spsolve/UMFpack
dtype = np.complex128
for scale in [1e-2, 1e-1, 5e-1, 1, 10]:
a = scale * eye_array(3, 3, dtype=dtype, format='csc')
e = exp(scale) * eye(3, dtype=dtype)
with warnings.catch_warnings():
msg = "Changing the sparsity structure"
warnings.filterwarnings("ignore", msg, SparseEfficiencyWarning)
assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100)
def test_logm_consistency(self):
random.seed(1234)
for dtype in [np.float64, np.complex128]:
for n in range(1, 10):
for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
# make logm(A) be of a given scale
A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
if np.iscomplexobj(A):
A = A + 1j * random.rand(n, n) * scale
assert_array_almost_equal(expm(logm(A)), A)
def test_integer_matrix(self):
Q = np.array([
[-3, 1, 1, 1],
[1, -3, 1, 1],
[1, 1, -3, 1],
[1, 1, 1, -3]])
assert_allclose(expm(Q), expm(1.0 * Q))
def test_integer_matrix_2(self):
# Check for integer overflows
Q = np.array([[-500, 500, 0, 0],
[0, -550, 360, 190],
[0, 630, -630, 0],
[0, 0, 0, 0]], dtype=np.int16)
assert_allclose(expm(Q), expm(1.0 * Q))
Q = csc_array(Q)
assert_allclose(expm(Q).toarray(), expm(1.0 * Q).toarray())
def test_triangularity_perturbation(self):
# Experiment (1) of
# Awad H. Al-Mohy and Nicholas J. Higham (2012)
# Improved Inverse Scaling and Squaring Algorithms
# for the Matrix Logarithm.
A = np.array([
[3.2346e-1, 3e4, 3e4, 3e4],
[0, 3.0089e-1, 3e4, 3e4],
[0, 0, 3.221e-1, 3e4],
[0, 0, 0, 3.0744e-1]],
dtype=float)
A_logm = np.array([
[-1.12867982029050462e+00, 9.61418377142025565e+04,
-4.52485573953179264e+09, 2.92496941103871812e+14],
[0.00000000000000000e+00, -1.20101052953082288e+00,
9.63469687211303099e+04, -4.68104828911105442e+09],
[0.00000000000000000e+00, 0.00000000000000000e+00,
-1.13289322264498393e+00, 9.53249183094775653e+04],
[0.00000000000000000e+00, 0.00000000000000000e+00,
0.00000000000000000e+00, -1.17947533272554850e+00]],
dtype=float)
assert_allclose(expm(A_logm), A, rtol=1e-4)
# Perturb the upper triangular matrix by tiny amounts,
# so that it becomes technically not upper triangular.
random.seed(1234)
tiny = 1e-17
A_logm_perturbed = A_logm.copy()
A_logm_perturbed[1, 0] = tiny
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Ill-conditioned.*", RuntimeWarning)
warnings.filterwarnings("ignore", "An ill-conditioned.*", RuntimeWarning)
A_expm_logm_perturbed = expm(A_logm_perturbed)
rtol = 1e-4
atol = 100 * tiny
assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol))
def test_burkardt_1(self):
# This matrix is diagonal.
# The calculation of the matrix exponential is simple.
#
# This is the first of a series of matrix exponential tests
# collected by John Burkardt from the following sources.
#
# Alan Laub,
# Review of "Linear System Theory" by Joao Hespanha,
# SIAM Review,
# Volume 52, Number 4, December 2010, pages 779--781.
#
# Cleve Moler and Charles Van Loan,
# Nineteen Dubious Ways to Compute the Exponential of a Matrix,
# Twenty-Five Years Later,
# SIAM Review,
# Volume 45, Number 1, March 2003, pages 3--49.
#
# Cleve Moler,
# Cleve's Corner: A Balancing Act for the Matrix Exponential,
# 23 July 2012.
#
# Robert Ward,
# Numerical computation of the matrix exponential
# with accuracy estimate,
# SIAM Journal on Numerical Analysis,
# Volume 14, Number 4, September 1977, pages 600--610.
exp1 = np.exp(1)
exp2 = np.exp(2)
A = np.array([
[1, 0],
[0, 2],
], dtype=float)
desired = np.array([
[exp1, 0],
[0, exp2],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_2(self):
# This matrix is symmetric.
# The calculation of the matrix exponential is straightforward.
A = np.array([
[1, 3],
[3, 2],
], dtype=float)
desired = np.array([
[39.322809708033859, 46.166301438885753],
[46.166301438885768, 54.711576854329110],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_3(self):
# This example is due to Laub.
# This matrix is ill-suited for the Taylor series approach.
# As powers of A are computed, the entries blow up too quickly.
exp1 = np.exp(1)
exp39 = np.exp(39)
A = np.array([
[0, 1],
[-39, -40],
], dtype=float)
desired = np.array([
[
39/(38*exp1) - 1/(38*exp39),
-np.expm1(-38) / (38*exp1)],
[
39*np.expm1(-38) / (38*exp1),
-1/(38*exp1) + 39/(38*exp39)],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_4(self):
# This example is due to Moler and Van Loan.
# The example will cause problems for the series summation approach,
# as well as for diagonal Pade approximations.
A = np.array([
[-49, 24],
[-64, 31],
], dtype=float)
U = np.array([[3, 1], [4, 2]], dtype=float)
V = np.array([[1, -1/2], [-2, 3/2]], dtype=float)
w = np.array([-17, -1], dtype=float)
desired = np.dot(U * np.exp(w), V)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_5(self):
# This example is due to Moler and Van Loan.
# This matrix is strictly upper triangular
# All powers of A are zero beyond some (low) limit.
# This example will cause problems for Pade approximations.
A = np.array([
[0, 6, 0, 0],
[0, 0, 6, 0],
[0, 0, 0, 6],
[0, 0, 0, 0],
], dtype=float)
desired = np.array([
[1, 6, 18, 36],
[0, 1, 6, 18],
[0, 0, 1, 6],
[0, 0, 0, 1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_6(self):
# This example is due to Moler and Van Loan.
# This matrix does not have a complete set of eigenvectors.
# That means the eigenvector approach will fail.
exp1 = np.exp(1)
A = np.array([
[1, 1],
[0, 1],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_7(self):
# This example is due to Moler and Van Loan.
# This matrix is very close to example 5.
# Mathematically, it has a complete set of eigenvectors.
# Numerically, however, the calculation will be suspect.
exp1 = np.exp(1)
eps = np.spacing(1)
A = np.array([
[1 + eps, 1],
[0, 1 - eps],
], dtype=float)
desired = np.array([
[exp1, exp1],
[0, exp1],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_8(self):
# This matrix was an example in Wikipedia.
exp4 = np.exp(4)
exp16 = np.exp(16)
A = np.array([
[21, 17, 6],
[-5, -1, -6],
[4, 4, 16],
], dtype=float)
desired = np.array([
[13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4],
[-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4],
[16*exp16, 16*exp16, 4*exp16],
], dtype=float) * 0.25
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_9(self):
# This matrix is due to the NAG Library.
# It is an example for function F01ECF.
A = np.array([
[1, 2, 2, 2],
[3, 1, 1, 2],
[3, 2, 1, 2],
[3, 3, 3, 1],
], dtype=float)
desired = np.array([
[740.7038, 610.8500, 542.2743, 549.1753],
[731.2510, 603.5524, 535.0884, 542.2743],
[823.7630, 679.4257, 603.5524, 610.8500],
[998.4355, 823.7630, 731.2510, 740.7038],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_10(self):
# This is Ward's example #1.
# It is defective and nonderogatory.
A = np.array([
[4, 2, 0],
[1, 4, 1],
[1, 1, 4],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6))
desired = np.array([
[147.8666224463699, 183.7651386463682, 71.79703239999647],
[127.7810855231823, 183.7651386463682, 91.88256932318415],
[127.7810855231824, 163.6796017231806, 111.9681062463718],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_11(self):
# This is Ward's example #2.
# It is a symmetric matrix.
A = np.array([
[29.87942128909879, 0.7815750847907159, -2.289519314033932],
[0.7815750847907159, 25.72656945571064, 8.680737820540137],
[-2.289519314033932, 8.680737820540137, 34.39400925519054],
], dtype=float)
assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40))
desired = np.array([
[
5.496313853692378E+15,
-1.823188097200898E+16,
-3.047577080858001E+16],
[
-1.823188097200899E+16,
6.060522870222108E+16,
1.012918429302482E+17],
[
-3.047577080858001E+16,
1.012918429302482E+17,
1.692944112408493E+17],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_12(self):
# This is Ward's example #3.
# Ward's algorithm has difficulty estimating the accuracy
# of its results.
A = np.array([
[-131, 19, 18],
[-390, 56, 54],
[-387, 57, 52],
], dtype=float)
assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1))
desired = np.array([
[-1.509644158793135, 0.3678794391096522, 0.1353352811751005],
[-5.632570799891469, 1.471517758499875, 0.4060058435250609],
[-4.934938326088363, 1.103638317328798, 0.5413411267617766],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_burkardt_13(self):
# This is Ward's example #4.
# This is a version of the Forsythe matrix.
# The eigenvector problem is badly conditioned.
# Ward's algorithm has difficulty estimating the accuracy
# of its results for this problem.
#
# Check the construction of one instance of this family of matrices.
A4_actual = _burkardt_13_power(4, 1)
A4_desired = [[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1e-4, 0, 0, 0]]
assert_allclose(A4_actual, A4_desired)
# Check the expm for a few instances.
for n in (2, 3, 4, 10):
# Approximate expm using Taylor series.
# This works well for this matrix family
# because each matrix in the summation,
# even before dividing by the factorial,
# is entrywise positive with max entry 10**(-floor(p/n)*n).
k = max(1, int(np.ceil(16/n)))
desired = np.zeros((n, n), dtype=float)
for p in range(n*k):
Ap = _burkardt_13_power(n, p)
assert_equal(np.min(Ap), 0)
assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n))
desired += Ap / factorial(p)
actual = expm(_burkardt_13_power(n, 1))
assert_allclose(actual, desired)
def test_burkardt_14(self):
# This is Moler's example.
# This badly scaled matrix caused problems for MATLAB's expm().
A = np.array([
[0, 1e-8, 0],
[-(2e10 + 4e8/6.), -3, 2e10],
[200./3., 0, -200./3.],
], dtype=float)
desired = np.array([
[0.446849468283175, 1.54044157383952e-09, 0.462811453558774],
[-5743067.77947947, -0.0152830038686819, -4526542.71278401],
[0.447722977849494, 1.54270484519591e-09, 0.463480648837651],
], dtype=float)
actual = expm(A)
assert_allclose(actual, desired)
def test_pascal(self):
# Test pascal triangle.
# Nilpotent exponential, used to trigger a failure (gh-8029)
for scale in [1.0, 1e-3, 1e-6]:
for n in range(0, 80, 3):
sc = scale ** np.arange(n, -1, -1)
if np.any(sc < 1e-300):
break
A = np.diag(np.arange(1, n + 1), -1) * scale
B = expm(A)
got = B
expected = binom(np.arange(n + 1)[:,None],
np.arange(n + 1)[None,:]) * sc[None,:] / sc[:,None]
atol = 1e-13 * abs(expected).max()
assert_allclose(got, expected, atol=atol)
def test_matrix_input(self):
# Large np.matrix inputs should work, gh-5546
A = np.zeros((200, 200))
A[-1,0] = 1
B0 = expm(A)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "the matrix subclass.*", DeprecationWarning)
warnings.filterwarnings(
"ignore", "the matrix subclass.*", PendingDeprecationWarning)
B = expm(np.matrix(A))
assert_allclose(B, B0)
def test_exp_sinch_overflow(self):
# Check overflow in intermediate steps is fixed (gh-11839)
L = np.array([[1.0, -0.5, -0.5, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, -0.5, -0.5, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, -0.5, -0.5],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
E0 = expm(-L)
E1 = expm(-2**11 * L)
E2 = E0
for j in range(11):
E2 = E2 @ E2
assert_allclose(E1, E2)
| TestExpM |
python | getsentry__sentry | src/sentry/integrations/slack/utils/rule_status.py | {
"start": 310,
"end": 1904
} | class ____:
def __init__(self, uuid: str | None = None) -> None:
self._uuid = uuid or self._generate_uuid()
cluster_id = getattr(settings, "SENTRY_RULE_TASK_REDIS_CLUSTER", "default")
self.client = redis_clusters.get(cluster_id)
self._set_initial_value()
@property
def uuid(self) -> str:
return self._uuid
def set_value(
self,
status: str,
rule_id: int | None = None,
error_message: str | None = None,
) -> None:
value = self._format_value(status, rule_id, error_message)
self.client.set(self._get_redis_key(), f"{value}", ex=60 * 60)
def get_value(self) -> Any:
key = self._get_redis_key()
value = self.client.get(key)
return orjson.loads(cast(Union[str, bytes], value))
def _generate_uuid(self) -> str:
return uuid4().hex
def _set_initial_value(self) -> None:
value = orjson.dumps({"status": "pending"}).decode()
self.client.set(self._get_redis_key(), f"{value}", ex=60 * 60, nx=True)
def _get_redis_key(self) -> str:
return f"slack-channel-task:1:{self.uuid}"
def _format_value(
self,
status: str,
rule_id: int | None,
error_message: str | None,
) -> str:
value = {"status": status}
if rule_id:
value["rule_id"] = str(rule_id)
if error_message:
value["error"] = error_message
elif status == "failed":
value["error"] = SLACK_FAILED_MESSAGE
return orjson.dumps(value).decode()
| RedisRuleStatus |
python | pypa__pip | src/pip/_vendor/pygments/scanner.py | {
"start": 535,
"end": 669
} | class ____(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
| EndOfText |
python | jazzband__django-simple-history | simple_history/models.py | {
"start": 2365,
"end": 32624
} | class ____:
DEFAULT_MODEL_NAME_PREFIX = "Historical"
thread = context = LocalContext() # retain thread for backwards compatibility
m2m_models = {}
def __init__(
self,
verbose_name=None,
verbose_name_plural=None,
bases=(models.Model,),
user_related_name="+",
table_name=None,
inherit=False,
excluded_fields=None,
history_id_field=None,
history_change_reason_field=None,
user_model=None,
get_user=_default_get_user,
cascade_delete_history=False,
custom_model_name=None,
app=None,
history_user_id_field=None,
history_user_getter=_history_user_getter,
history_user_setter=_history_user_setter,
related_name=None,
use_base_model_db=False,
user_db_constraint=True,
no_db_index=list(),
excluded_field_kwargs=None,
history_manager=HistoryManager,
historical_queryset=HistoricalQuerySet,
m2m_fields=(),
m2m_fields_model_field_name="_history_m2m_fields",
m2m_bases=(models.Model,),
):
self.user_set_verbose_name = verbose_name
self.user_set_verbose_name_plural = verbose_name_plural
self.user_related_name = user_related_name
self.user_db_constraint = user_db_constraint
self.table_name = table_name
self.inherit = inherit
self.history_id_field = history_id_field
self.history_change_reason_field = history_change_reason_field
self.user_model = user_model
self.get_user = get_user
self.cascade_delete_history = cascade_delete_history
self.custom_model_name = custom_model_name
self.app = app
self.user_id_field = history_user_id_field
self.user_getter = history_user_getter
self.user_setter = history_user_setter
self.related_name = related_name
self.use_base_model_db = use_base_model_db
self.history_manager = history_manager
self.historical_queryset = historical_queryset
self.m2m_fields = m2m_fields
self.m2m_fields_model_field_name = m2m_fields_model_field_name
if isinstance(no_db_index, str):
no_db_index = [no_db_index]
self.no_db_index = no_db_index
if excluded_fields is None:
excluded_fields = []
self.excluded_fields = excluded_fields
if excluded_field_kwargs is None:
excluded_field_kwargs = {}
self.excluded_field_kwargs = excluded_field_kwargs
try:
if isinstance(bases, str):
raise TypeError
self.bases = (HistoricalChanges,) + tuple(bases)
except TypeError:
raise TypeError("The `bases` option must be a list or a tuple.")
try:
if isinstance(m2m_bases, str):
raise TypeError
self.m2m_bases = (HistoricalChanges,) + tuple(m2m_bases)
except TypeError:
raise TypeError("The `m2m_bases` option must be a list or a tuple.")
def contribute_to_class(self, cls, name):
self.manager_name = name
self.module = cls.__module__
self.cls = cls
models.signals.class_prepared.connect(self.finalize, weak=False)
self.add_extra_methods(cls)
if cls._meta.abstract and not self.inherit:
msg = (
"HistoricalRecords added to abstract model ({}) without "
"inherit=True".format(self.cls.__name__)
)
warnings.warn(msg, UserWarning)
def add_extra_methods(self, cls):
def save_without_historical_record(self, *args, **kwargs):
"""
Save the model instance without creating a historical record.
Make sure you know what you're doing before using this method.
"""
self.skip_history_when_saving = True
try:
ret = self.save(*args, **kwargs)
finally:
del self.skip_history_when_saving
return ret
setattr(cls, "save_without_historical_record", save_without_historical_record)
def finalize(self, sender, **kwargs):
inherited = False
if self.cls is not sender: # set in concrete
inherited = self.inherit and issubclass(sender, self.cls)
if not inherited:
return # set in abstract
if hasattr(sender._meta, "simple_history_manager_attribute"):
raise exceptions.MultipleRegistrationsError(
"{}.{} registered multiple times for history tracking.".format(
sender._meta.app_label, sender._meta.object_name
)
)
history_model = self.create_history_model(sender, inherited)
if inherited:
# Make sure history model is in same module as concrete model
module = importlib.import_module(history_model.__module__)
else:
module = importlib.import_module(self.module)
setattr(module, history_model.__name__, history_model)
# The HistoricalRecords object will be discarded,
# so the signal handlers can't use weak references.
models.signals.post_save.connect(self.post_save, sender=sender, weak=False)
models.signals.post_delete.connect(self.post_delete, sender=sender, weak=False)
models.signals.pre_delete.connect(self.pre_delete, sender=sender, weak=False)
m2m_fields = self.get_m2m_fields_from_model(sender)
for field in m2m_fields:
m2m_changed.connect(
partial(self.m2m_changed, attr=field.name),
sender=field.remote_field.through,
weak=False,
)
descriptor = HistoryDescriptor(
history_model,
manager=self.history_manager,
queryset=self.historical_queryset,
)
setattr(sender, self.manager_name, descriptor)
sender._meta.simple_history_manager_attribute = self.manager_name
for field in m2m_fields:
m2m_model = self.create_history_m2m_model(
history_model, field.remote_field.through
)
self.m2m_models[field] = m2m_model
setattr(module, m2m_model.__name__, m2m_model)
m2m_descriptor = HistoryDescriptor(m2m_model)
setattr(history_model, field.name, m2m_descriptor)
def get_history_model_name(self, model):
if not self.custom_model_name:
return f"{self.DEFAULT_MODEL_NAME_PREFIX}{model._meta.object_name}"
# Must be trying to use a custom history model name
if callable(self.custom_model_name):
name = self.custom_model_name(model._meta.object_name)
else:
# simple string
name = self.custom_model_name
# Desired class name cannot be same as the model it is tracking
if not (
name.lower() == model._meta.object_name.lower()
and model.__module__ == self.module
):
return name
raise ValueError(
"The 'custom_model_name' option '{}' evaluates to a name that is the same "
"as the model it is tracking. This is not permitted.".format(
self.custom_model_name
)
)
def create_history_m2m_model(self, model, through_model):
attrs = {}
fields = self.copy_fields(through_model)
attrs.update(fields)
attrs.update(self.get_extra_fields_m2m(model, through_model, fields))
name = self.get_history_model_name(through_model)
registered_models[through_model._meta.db_table] = through_model
attrs.update(Meta=type("Meta", (), self.get_meta_options_m2m(through_model)))
m2m_history_model = type(str(name), self.m2m_bases, attrs)
return m2m_history_model
def create_history_model(self, model, inherited):
"""
Creates a historical model to associate with the model provided.
"""
attrs = {
"__module__": self.module,
"_history_excluded_fields": self.excluded_fields,
"_history_m2m_fields": self.get_m2m_fields_from_model(model),
"tracked_fields": self.fields_included(model),
}
app_module = "%s.models" % model._meta.app_label
if inherited:
# inherited use models module
attrs["__module__"] = model.__module__
elif model.__module__ != self.module:
# registered under different app
attrs["__module__"] = self.module
elif app_module != self.module:
# Abuse an internal API because the app registry is loading.
app = apps.app_configs[model._meta.app_label]
models_module = app.name
attrs["__module__"] = models_module
fields = self.copy_fields(model)
attrs.update(fields)
attrs.update(self.get_extra_fields(model, fields))
# type in python2 wants str as a first argument
attrs.update(Meta=type("Meta", (), self.get_meta_options(model)))
if not inherited and self.table_name is not None:
attrs["Meta"].db_table = self.table_name
# Set as the default then check for overrides
name = self.get_history_model_name(model)
registered_models[model._meta.db_table] = model
history_model = type(str(name), self.bases, attrs)
return history_model
def fields_included(self, model):
fields = []
for field in model._meta.fields:
if field.name not in self.excluded_fields:
fields.append(field)
return fields
def field_excluded_kwargs(self, field):
"""
Find the excluded kwargs for a given field.
"""
return self.excluded_field_kwargs.get(field.name, set())
def copy_fields(self, model):
"""
Creates copies of the model's original fields, returning
a dictionary mapping field name to copied field object.
"""
fields = {}
for field in self.fields_included(model):
field = copy.copy(field)
field.remote_field = copy.copy(field.remote_field)
if isinstance(field, OrderWrt):
# OrderWrt is a proxy field, switch to a plain IntegerField
field.__class__ = models.IntegerField
if isinstance(field, models.ForeignKey):
old_field = field
old_swappable = old_field.swappable
old_field.swappable = False
try:
_name, _path, args, field_args = old_field.deconstruct()
finally:
old_field.swappable = old_swappable
if getattr(old_field, "one_to_one", False) or isinstance(
old_field, models.OneToOneField
):
FieldType = models.ForeignKey
else:
FieldType = type(old_field)
# Remove any excluded kwargs for the field.
# This is useful when a custom OneToOneField is being used that
# has a different set of arguments than ForeignKey
for exclude_arg in self.field_excluded_kwargs(old_field):
field_args.pop(exclude_arg, None)
# If field_args['to'] is 'self' then we have a case where the object
# has a foreign key to itself. If we pass the historical record's
# field to = 'self', the foreign key will point to an historical
# record rather than the base record. We can use old_field.model here.
if field_args.get("to", None) == "self":
field_args["to"] = old_field.model
# Override certain arguments passed when creating the field
# so that they work for the historical field.
field_args.update(
db_constraint=False,
related_name="+",
null=True,
blank=True,
primary_key=False,
db_index=True,
serialize=True,
unique=False,
on_delete=models.DO_NOTHING,
)
field = FieldType(*args, **field_args)
field.name = old_field.name
else:
transform_field(field)
# drop db index
if field.name in self.no_db_index:
field.db_index = False
fields[field.name] = field
return fields
def _get_history_change_reason_field(self):
if self.history_change_reason_field:
# User specific field from init
history_change_reason_field = self.history_change_reason_field
elif getattr(
settings, "SIMPLE_HISTORY_HISTORY_CHANGE_REASON_USE_TEXT_FIELD", False
):
# Use text field with no max length, not enforced by DB anyways
history_change_reason_field = models.TextField(null=True)
else:
# Current default, with max length
history_change_reason_field = models.CharField(max_length=100, null=True)
return history_change_reason_field
def _get_history_id_field(self):
if self.history_id_field:
history_id_field = self.history_id_field.clone()
history_id_field.primary_key = True
history_id_field.editable = False
elif getattr(settings, "SIMPLE_HISTORY_HISTORY_ID_USE_UUID", False):
history_id_field = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False
)
else:
history_id_field = models.AutoField(primary_key=True)
return history_id_field
def _get_history_user_fields(self):
if self.user_id_field is not None:
# Tracking user using explicit id rather than Django ForeignKey
history_user_fields = {
"history_user": property(self.user_getter, self.user_setter),
"history_user_id": self.user_id_field,
}
else:
user_model = self.user_model or getattr(
settings, "AUTH_USER_MODEL", "auth.User"
)
history_user_fields = {
"history_user": models.ForeignKey(
user_model,
null=True,
related_name=self.user_related_name,
on_delete=models.SET_NULL,
db_constraint=self.user_db_constraint,
)
}
return history_user_fields
def _get_history_related_field(self, model):
if self.related_name:
if self.manager_name == self.related_name:
raise exceptions.RelatedNameConflictError(
"The related name must not be called like the history manager."
)
return {
"history_relation": models.ForeignKey(
model,
on_delete=models.DO_NOTHING,
related_name=self.related_name,
db_constraint=False,
)
}
else:
return {}
def get_extra_fields_m2m(self, model, through_model, fields):
"""Return dict of extra fields added to the m2m historical record model"""
extra_fields = {
"__module__": model.__module__,
"__str__": lambda self: "{} as of {}".format(
self._meta.verbose_name, self.history.history_date
),
"history": models.ForeignKey(
model,
db_constraint=False,
on_delete=models.DO_NOTHING,
),
"instance_type": through_model,
"m2m_history_id": self._get_history_id_field(),
}
return extra_fields
def get_extra_fields(self, model, fields):
"""Return dict of extra fields added to the historical record model"""
def revert_url(self):
"""URL for this change in the default admin site."""
opts = model._meta
app_label, model_name = opts.app_label, opts.model_name
return reverse(
f"{admin.site.name}:{app_label}_{model_name}_simple_history",
args=[getattr(self, opts.pk.attname), self.history_id],
)
def get_instance(self):
attrs = {
field.attname: getattr(self, field.attname) for field in fields.values()
}
if self._history_excluded_fields:
# We don't add ManyToManyFields to this list because they may cause
# the subsequent `.get()` call to fail. See #706 for context.
excluded_attnames = [
model._meta.get_field(field).attname
for field in self._history_excluded_fields
if not isinstance(model._meta.get_field(field), ManyToManyField)
]
try:
values = (
model.objects.filter(pk=getattr(self, model._meta.pk.attname))
.values(*excluded_attnames)
.get()
)
except ObjectDoesNotExist:
pass
else:
attrs.update(values)
result = model(**attrs)
# this is the only way external code could know an instance is historical
setattr(result, SIMPLE_HISTORY_REVERSE_ATTR_NAME, self)
return result
def get_next_record(self):
"""
Get the next history record for the instance. `None` if last.
"""
history = utils.get_history_manager_from_history(self)
return (
history.filter(history_date__gt=self.history_date)
.order_by("history_date")
.first()
)
def get_prev_record(self):
"""
Get the previous history record for the instance. `None` if first.
"""
history = utils.get_history_manager_from_history(self)
return (
history.filter(history_date__lt=self.history_date)
.order_by("history_date")
.last()
)
def get_default_history_user(instance):
"""
Returns the user specified by `get_user` method for manually creating
historical objects
"""
return self.get_history_user(instance)
extra_fields = {
"history_id": self._get_history_id_field(),
"history_date": models.DateTimeField(db_index=self._date_indexing is True),
"history_change_reason": self._get_history_change_reason_field(),
"history_type": models.CharField(
max_length=1,
choices=(("+", _("Created")), ("~", _("Changed")), ("-", _("Deleted"))),
),
"history_object": HistoricalObjectDescriptor(
model, self.fields_included(model)
),
"instance": property(get_instance),
"instance_type": model,
"next_record": property(get_next_record),
"prev_record": property(get_prev_record),
"revert_url": revert_url,
"__str__": lambda self: "{} as of {}".format(
self.history_object, self.history_date
),
"get_default_history_user": staticmethod(get_default_history_user),
}
extra_fields.update(self._get_history_related_field(model))
extra_fields.update(self._get_history_user_fields())
return extra_fields
@property
def _date_indexing(self):
"""False, True, or 'composite'; default is True"""
result = getattr(settings, "SIMPLE_HISTORY_DATE_INDEX", True)
valid = True
if isinstance(result, str):
result = result.lower()
if result not in ("composite",):
valid = False
elif not isinstance(result, bool):
valid = False
if not valid:
raise ImproperlyConfigured(
"SIMPLE_HISTORY_DATE_INDEX must be one of (False, True, 'Composite')"
)
return result
def get_meta_options_m2m(self, through_model):
"""
Returns a dictionary of fields that will be added to
the Meta inner class of the m2m historical record model.
"""
name = self.get_history_model_name(through_model)
meta_fields = {"verbose_name": name}
if self.app:
meta_fields["app_label"] = self.app
return meta_fields
def get_meta_options(self, model):
"""
Returns a dictionary of fields that will be added to
the Meta inner class of the historical record model.
"""
meta_fields = {
"ordering": ("-history_date", "-history_id"),
"get_latest_by": ("history_date", "history_id"),
}
if self.user_set_verbose_name:
name = self.user_set_verbose_name
else:
name = format_lazy("historical {}", smart_str(model._meta.verbose_name))
if self.user_set_verbose_name_plural:
plural_name = self.user_set_verbose_name_plural
else:
plural_name = format_lazy(
"historical {}", smart_str(model._meta.verbose_name_plural)
)
meta_fields["verbose_name"] = name
meta_fields["verbose_name_plural"] = plural_name
if self.app:
meta_fields["app_label"] = self.app
if self._date_indexing == "composite":
meta_fields["indexes"] = (
models.Index(fields=("history_date", model._meta.pk.attname)),
)
return meta_fields
def post_save(self, instance, created, using=None, **kwargs):
if not getattr(settings, "SIMPLE_HISTORY_ENABLED", True):
return
if hasattr(instance, "skip_history_when_saving"):
return
if not kwargs.get("raw", False):
self.create_historical_record(instance, created and "+" or "~", using=using)
def post_delete(self, instance, using=None, **kwargs):
if not getattr(settings, "SIMPLE_HISTORY_ENABLED", True):
return
if self.cascade_delete_history:
manager = getattr(instance, self.manager_name)
manager.using(using).all().delete()
else:
self.create_historical_record(instance, "-", using=using)
def pre_delete(self, instance, **kwargs):
"""
pre_delete method to ensure all deferred fields are loaded on the model
"""
# First check that history is enabled (on model and globally)
if not getattr(settings, "SIMPLE_HISTORY_ENABLED", True):
return
if not hasattr(instance._meta, "simple_history_manager_attribute"):
return
fields = self.fields_included(instance)
field_attrs = {field.attname for field in fields}
deferred_attrs = instance.get_deferred_fields()
# Load all deferred fields that are present in fields_included
fields = field_attrs.intersection(deferred_attrs)
if fields:
instance.refresh_from_db(fields=fields)
def get_change_reason_for_object(self, instance, history_type, using):
"""
Get change reason for object.
Customize this method to automatically fill change reason from context.
"""
return utils.get_change_reason_from_object(instance)
def m2m_changed(self, instance, action, attr, pk_set, reverse, **_):
if not getattr(settings, "SIMPLE_HISTORY_ENABLED", True):
return
if hasattr(instance, "skip_history_when_saving"):
return
if action in ("post_add", "post_remove", "post_clear"):
# It should be safe to ~ this since the row must exist to modify m2m on it
self.create_historical_record(instance, "~")
def create_historical_record_m2ms(self, history_instance, instance):
for field in history_instance._history_m2m_fields:
m2m_history_model = self.m2m_models[field]
original_instance = history_instance.instance
through_model = getattr(original_instance, field.name).through
through_model_field_names = [f.name for f in through_model._meta.fields]
through_model_fk_field_names = [
f.name for f in through_model._meta.fields if isinstance(f, ForeignKey)
]
insert_rows = []
through_field_name = utils.get_m2m_field_name(field)
rows = through_model.objects.filter(**{through_field_name: instance})
rows = rows.select_related(*through_model_fk_field_names)
for row in rows:
insert_row = {"history": history_instance}
for field_name in through_model_field_names:
insert_row[field_name] = getattr(row, field_name)
insert_rows.append(m2m_history_model(**insert_row))
pre_create_historical_m2m_records.send(
sender=m2m_history_model,
rows=insert_rows,
history_instance=history_instance,
instance=instance,
field=field,
)
created_rows = m2m_history_model.objects.bulk_create(insert_rows)
post_create_historical_m2m_records.send(
sender=m2m_history_model,
created_rows=created_rows,
history_instance=history_instance,
instance=instance,
field=field,
)
def create_historical_record(self, instance, history_type, using=None):
using = using if self.use_base_model_db else None
history_date = getattr(instance, "_history_date", timezone.now())
history_user = self.get_history_user(instance)
history_change_reason = self.get_change_reason_for_object(
instance, history_type, using
)
manager = getattr(instance, self.manager_name)
attrs = {}
for field in self.fields_included(instance):
attrs[field.attname] = getattr(instance, field.attname)
relation_field = getattr(manager.model, "history_relation", None)
if relation_field is not None:
attrs["history_relation"] = instance
history_instance = manager.model(
history_date=history_date,
history_type=history_type,
history_user=history_user,
history_change_reason=history_change_reason,
**attrs,
)
pre_create_historical_record.send(
sender=manager.model,
instance=instance,
history_date=history_date,
history_user=history_user,
history_change_reason=history_change_reason,
history_instance=history_instance,
using=using,
)
history_instance.save(using=using)
self.create_historical_record_m2ms(history_instance, instance)
post_create_historical_record.send(
sender=manager.model,
instance=instance,
history_instance=history_instance,
history_date=history_date,
history_user=history_user,
history_change_reason=history_change_reason,
using=using,
)
def get_history_user(self, instance):
"""Get the modifying user from instance or middleware."""
try:
return instance._history_user
except AttributeError:
request = None
try:
if self.context.request.user.is_authenticated:
request = self.context.request
except AttributeError:
pass
return self.get_user(instance=instance, request=request)
def get_m2m_fields_from_model(self, model):
m2m_fields = set(self.m2m_fields)
try:
m2m_fields.update(getattr(model, self.m2m_fields_model_field_name))
except AttributeError:
pass
field_names = [
field if isinstance(field, str) else field.name for field in m2m_fields
]
return [getattr(model, field_name).field for field_name in field_names]
def transform_field(field):
"""Customize field appropriately for use in historical model"""
field.name = field.attname
if isinstance(field, models.BigAutoField):
field.__class__ = models.BigIntegerField
elif isinstance(field, models.AutoField):
field.__class__ = models.IntegerField
elif isinstance(field, models.FileField):
# Don't copy file, just path.
if getattr(settings, "SIMPLE_HISTORY_FILEFIELD_TO_CHARFIELD", False):
field.__class__ = models.CharField
else:
field.__class__ = models.TextField
# Historical instance shouldn't change create/update timestamps
field.auto_now = False
field.auto_now_add = False
# Just setting db_collation explicitly since we're not using
# field.deconstruct() here
field.db_collation = None
if field.primary_key or field.unique:
# Unique fields can no longer be guaranteed unique,
# but they should still be indexed for faster lookups.
field.primary_key = False
# DEV: Remove this check (but keep the contents) when the minimum required
# Django version is 5.1
if django.VERSION >= (5, 1):
field.unique = False
# (Django < 5.1) Can't set `unique` as it's a property, so set the backing field
# (Django >= 5.1) Set the backing field in addition to the cached property
# above, to cover all bases
field._unique = False
field.db_index = True
field.serialize = True
| HistoricalRecords |
python | apache__airflow | airflow-core/tests/unit/serialization/test_dag_serialization.py | {
"start": 17093,
"end": 155758
} | class ____:
"""Unit tests for stringified DAGs."""
@pytest.fixture(autouse=True)
def setup_test_cases(self):
with mock.patch.object(BaseHook, "get_connection") as m:
m.return_value = Connection(
extra=(
"{"
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
"}"
)
)
# Skip that test if latest botocore is used - it reads all example dags and in case latest botocore
# is upgraded to latest, usually aiobotocore can't be installed and some of the system tests will fail with
# import errors. Also skip if not running on main branch - some of the example dags might fail due to
# outdated imports in past branches
@pytest.mark.skipif(
os.environ.get("UPGRADE_BOTO", "") == "true",
reason="This test is skipped when latest botocore is installed",
)
@skip_if_force_lowest_dependencies_marker
@skip_if_not_on_main
@pytest.mark.db_test
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
with warnings.catch_warnings():
dags, import_errors = collect_dags()
serialized_dags = {}
for v in dags.values():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Ignore some errors.
import_errors = {
file: error
for file, error in import_errors.items()
# Don't worry about warnings, we only care about errors here -- otherwise
# AirflowProviderDeprecationWarning etc show up in import_errors, and being aware of all of those is
# not relevant to this test; we only care about actual errors
if "airflow.exceptions.AirflowProviderDeprecationWarning" not in error
# TODO: TaskSDK
if "`use_airflow_context=True` is not yet implemented" not in error
# This "looks" like a problem, but is just a quirk of the parse-all-dags-in-one-process we do
# in this test
if "AirflowDagDuplicatedIdException: Ignoring DAG example_sagemaker" not in error
}
# Let's not be exact about this, but if everything fails to parse we should fail this test too
assert import_errors == {}
assert len(dags) > 100
# Compares with the ground truth of JSON string.
actual, expected = self.prepare_ser_dags_for_comparison(
actual=serialized_dags["simple_dag"],
expected=serialized_simple_dag_ground_truth,
)
assert actual == expected
@pytest.mark.db_test
@pytest.mark.parametrize(
("timetable", "serialized_timetable"),
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed DAG is serialized correctly."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
expected["dag"]["timetable"] = serialized_timetable
# these tasks are not mapped / in mapped task group
for task in expected["dag"]["tasks"]:
task["__var"]["_needs_expansion"] = False
actual, expected = self.prepare_ser_dags_for_comparison(
actual=serialized_dag,
expected=expected,
)
assert actual == expected
@pytest.mark.db_test
def test_dag_serialization_preserves_empty_access_roles(self):
"""Verify that an explicitly empty access_control dict is preserved."""
dag = make_simple_dag()
dag.access_control = {}
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
assert serialized_dag["dag"]["access_control"] == {
"__type": "dict",
"__var": {},
}
@pytest.mark.db_test
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests_common.test_utils.timetables.CustomSerializationTimetable' "
"is not registered or "
"you have a top level database access that disrupted the session. "
"Please check the airflow best practices documentation."
)
assert str(ctx.value) == message
def prepare_ser_dags_for_comparison(self, actual, expected):
"""Verify serialized DAGs match the ground truth."""
assert actual["dag"]["fileloc"].split("/")[-1] == "test_dag_serialization.py"
actual["dag"]["fileloc"] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
tasks = []
for task in sorted(dag_dict["dag"]["tasks"], key=lambda x: x["__var"]["task_id"]):
task["__var"] = dict(sorted(task["__var"].items(), key=lambda x: x[0]))
tasks.append(task)
dag_dict["dag"]["tasks"] = tasks
if "access_control" in dag_dict["dag"]:
dag_dict["dag"]["access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
expected = copy.deepcopy(expected)
# by roundtripping to json we get a cleaner diff
# if not doing this, we get false alarms such as "__var" != VAR
actual = json.loads(json.dumps(sorted_serialized_dag(actual)))
expected = json.loads(json.dumps(sorted_serialized_dag(expected)))
return actual, expected
@pytest.mark.db_test
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, SerializedDAG)
stringified_dags[dag.dag_id] = dag
dags, _ = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
@skip_if_force_lowest_dependencies_marker
@pytest.mark.db_test
def test_roundtrip_provider_example_dags(self):
dags, _ = collect_dags(
[
"providers/*/src/airflow/providers/*/example_dags",
"providers/*/src/airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
# Let's not be exact about this, but if everything fails to parse we should fail this test too
assert len(dags) >= 7
@pytest.mark.db_test
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag: SerializedDAG, dag: DAG):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
exclusion_list = {
# Doesn't implement __eq__ properly. Check manually.
"timetable",
"timezone",
# Need to check fields in it, to exclude functions.
"default_args",
"task_group",
"params",
"_processor_dags_folder",
}
fields_to_check = dag.get_serialized_fields() - exclusion_list
for field in fields_to_check:
actual = getattr(serialized_dag, field)
expected = getattr(dag, field, None)
assert actual == expected, f"{dag.dag_id}.{field} does not match"
# _processor_dags_folder is only populated at serialization time
# it's only used when relying on serialized dag to determine a dag's relative path
assert (
serialized_dag._processor_dags_folder
== (AIRFLOW_REPO_ROOT_PATH / "airflow-core" / "tests" / "unit" / "dags").as_posix()
)
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert v == serialized_dag.default_args[k], (
f"{dag.dag_id}.default_args[{k}] does not match"
)
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone == dag.timezone
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-Airflow operators are casted to BaseOperator or MappedOperator."""
from airflow.models.mappedoperator import MappedOperator as SchedulerMappedOperator
from airflow.sdk import BaseOperator
from airflow.sdk.definitions.mappedoperator import MappedOperator
assert isinstance(task, (BaseOperator, MappedOperator))
# Every task should have a task_group property -- even if it's the DAG's root task group
assert serialized_task.task_group
if isinstance(task, BaseOperator):
assert isinstance(serialized_task, SerializedBaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
"task_type",
"_operator_name",
# Type is excluded, so don't check it
"_log",
# List vs tuple. Check separately
"template_ext",
"template_fields",
# We store the string, real dag has the actual code
"_pre_execute_hook",
"_post_execute_hook",
# Checked separately
"resources",
"on_failure_fail_dagrun",
"_needs_expansion",
"_is_sensor",
}
else: # Promised to be mapped by the assert above.
assert isinstance(serialized_task, SchedulerMappedOperator)
fields_to_check = {f.name for f in attrs.fields(MappedOperator)}
fields_to_check -= {
"map_index_template",
# Matching logic in BaseOperator.get_serialized_fields().
"dag",
"task_group",
# List vs tuple. Check separately.
"operator_extra_links",
"template_ext",
"template_fields",
# Checked separately.
"operator_class",
"partial_kwargs",
"expand_input",
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(task, field), (
f"{task.dag.dag_id}.{task.task_id}.{field} does not match"
)
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# `deps` are set in the Scheduler's BaseOperator as that is where we need to evaluate deps
# so only serialized tasks that are sensors should have the ReadyToRescheduleDep.
if task._is_sensor:
assert ReadyToRescheduleDep() in serialized_task.deps
else:
assert ReadyToRescheduleDep() not in serialized_task.deps
# Ugly hack as some operators override params var in their init
if isinstance(task.params, ParamsDict) and isinstance(serialized_task.params, ParamsDict):
assert serialized_task.params.dump() == task.params.dump()
if isinstance(task, MappedOperator):
# MappedOperator.operator_class now stores only minimal type information
# for memory efficiency (task_type and _operator_name).
serialized_task.operator_class["task_type"] == type(task).__name__
if isinstance(serialized_task.operator_class, DecoratedOperator):
serialized_task.operator_class["_operator_name"] == task._operator_name
# Serialization cleans up default values in partial_kwargs, this
# adds them back to both sides.
default_partial_kwargs = (
BaseOperator.partial(task_id="_")._expand(EXPAND_INPUT_EMPTY, strict=False).partial_kwargs
)
# These are added in `_TaskDecorator` e.g. when @setup or @teardown task is passed
default_decorator_partial_kwargs = {
"is_setup": False,
"is_teardown": False,
"on_failure_fail_dagrun": False,
}
serialized_partial_kwargs = {
**default_partial_kwargs,
**default_decorator_partial_kwargs,
**serialized_task.partial_kwargs,
}
original_partial_kwargs = {
**default_partial_kwargs,
**default_decorator_partial_kwargs,
**task.partial_kwargs,
}
assert serialized_partial_kwargs == original_partial_kwargs
# ExpandInputs have different classes between scheduler and definition
assert attrs.asdict(serialized_task._get_specified_expand_input()) == attrs.asdict(
task._get_specified_expand_input()
)
@pytest.mark.parametrize(
("dag_start_date", "task_start_date", "expected_task_start_date"),
[
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
None,
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=dt_timezone(timedelta(hours=1))),
datetime(2019, 7, 30, tzinfo=dt_timezone(timedelta(hours=1))),
datetime(2019, 8, 1, tzinfo=dt_timezone(timedelta(hours=1))),
),
(
pendulum.datetime(2019, 8, 1, tz="UTC"),
None,
pendulum.datetime(2019, 8, 1, tz="UTC"),
),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id="simple_dag", schedule=None, start_date=dag_start_date)
BaseOperator(task_id="simple_task", dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]["__var"]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]["__var"]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(
dag_id="simple_dag",
schedule=None,
start_date=datetime(2019, 8, 1, tzinfo=timezone.utc),
) as dag:
BaseOperator(task_id="simple_task")
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
("dag_end_date", "task_end_date", "expected_task_end_date"),
[
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
None,
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(
dag_id="simple_dag",
schedule=None,
start_date=datetime(2019, 8, 1),
end_date=dag_end_date,
)
BaseOperator(task_id="simple_task", dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]["__var"]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]["__var"]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
("serialized_timetable", "expected_timetable"),
[
(
{"__type": "airflow.timetables.simple.NullTimetable", "__var": {}},
NullTimetable(),
),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
(
{"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}},
OnceTimetable(),
),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 3,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
("serialized_timetable", "expected_timetable_summary"),
[
(
{"__type": "airflow.timetables.simple.NullTimetable", "__var": {}},
"None",
),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
"0 0 * * 0",
),
(
{"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}},
"@once",
),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
"1 day, 0:00:00",
),
(CUSTOM_TIMETABLE_SERIALIZED, "CustomSerializationTimetable('foo')"),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable_summary(
self,
serialized_timetable,
expected_timetable_summary,
):
serialized = {
"__version": 3,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable_summary == expected_timetable_summary
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 3,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
message = (
"Timetable class "
"'tests_common.test_utils.timetables.CustomSerializationTimetable' "
"is not registered or "
"you have a top level database access that disrupted the session. "
"Please check the airflow best practices documentation."
)
with pytest.raises(ValueError, match=message):
SerializedDAG.from_dict(serialized)
@pytest.mark.parametrize(
("val", "expected"),
[
(
relativedelta(days=-1),
{"__type": "relativedelta", "__var": {"days": -1}},
),
(
relativedelta(month=1, days=-1),
{"__type": "relativedelta", "__var": {"month": 1, "days": -1}},
),
# Every friday
(
relativedelta(weekday=FR),
{"__type": "relativedelta", "__var": {"weekday": [4]}},
),
# Every second friday
(
relativedelta(weekday=FR(2)),
{"__type": "relativedelta", "__var": {"weekday": [4, 2]}},
),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG.serialize(val)
assert serialized == expected
round_tripped = SerializedDAG.deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
("val", "expected_val"),
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, ParamValidationError),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
if expected_val == ParamValidationError:
with pytest.raises(ParamValidationError):
dag = DAG(dag_id="simple_dag", schedule=None, params=val)
# further tests not relevant
return
dag = DAG(dag_id="simple_dag", schedule=None, params=val)
BaseOperator(task_id="simple_task", dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag_json = SerializedDAG.to_json(dag)
serialized_dag = json.loads(serialized_dag_json)
assert "params" in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params.dump()
assert expected_val == deserialized_simple_task.params.dump()
def test_invalid_params(self):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
dag = DAG(
dag_id="simple_dag",
schedule=None,
params={"path": S3Param("s3://my_bucket/my_path")},
)
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
dag = DAG(dag_id="simple_dag", schedule=None)
BaseOperator(
task_id="simple_task",
dag=dag,
start_date=datetime(2019, 8, 1),
params={"path": S3Param("s3://my_bucket/my_path")},
)
@pytest.mark.parametrize(
"param",
[
Param("my value", description="hello", schema={"type": "string"}),
Param("my value", description="hello"),
Param(None, description=None),
Param([True], type="array", items={"type": "boolean"}),
Param(),
],
)
def test_full_param_roundtrip(self, param: Param):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
sdk_dag = DAG(dag_id="simple_dag", schedule=None, params={"my_param": param})
serialized_json = SerializedDAG.to_json(sdk_dag)
serialized = json.loads(serialized_json)
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params.get_param("my_param").value == param.value
observed_param = dag.params.get_param("my_param")
assert isinstance(observed_param, SerializedParam)
assert observed_param.description == param.description
assert observed_param.schema == param.schema
assert observed_param.dump() == {
"value": None if param.value is NOTSET else param.value,
"schema": param.schema,
"description": param.description,
"source": None,
}
@pytest.mark.parametrize(
("val", "expected_val"),
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, ParamValidationError),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id="simple_dag", schedule=None)
if expected_val == ParamValidationError:
with pytest.raises(ParamValidationError):
BaseOperator(
task_id="simple_task",
dag=dag,
params=val,
start_date=datetime(2019, 8, 1),
)
# further tests not relevant
return
BaseOperator(
task_id="simple_task",
dag=dag,
params=val,
start_date=datetime(2019, 8, 1),
)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]["__var"]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]["__var"]
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params.dump()
@pytest.mark.db_test
@pytest.mark.parametrize(
("bash_command", "serialized_links", "links"),
[
pytest.param(
"true",
{"Google Custom": "_link_CustomOpLink"},
{"Google Custom": "http://google.com/custom_base_link?search=true"},
id="non-indexed-link",
),
pytest.param(
["echo", "true"],
{"BigQuery Console #1": "bigquery_1", "BigQuery Console #2": "bigquery_2"},
{
"BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=echo",
"BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=true",
},
id="multiple-indexed-links",
),
],
)
def test_extra_serialized_field_and_operator_links(
self, bash_command, serialized_links, links, dag_maker
):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
If CustomOperator is called with a string argument for bash_command it
has a single link, if called with an array it has one link per element.
We use this to test the serialization of link data.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
with dag_maker(dag_id="simple_dag", start_date=test_date) as dag:
CustomOperator(task_id="simple_task", bash_command=bash_command)
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]["__var"]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == bash_command
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["__var"]["_operator_extra_links"] == serialized_links
# Test all the extra_links are set
assert simple_task.extra_links == sorted({*links, "airflow", "github", "google"})
dr = dag_maker.create_dagrun(logical_date=test_date)
(ti,) = dr.task_instances
XComModel.set(
key="search_query",
value=bash_command,
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
run_id=dr.run_id,
)
# Test Deserialized inbuilt link
for i, (name, expected) in enumerate(links.items()):
# staging the part where a task at runtime pushes xcom for extra links
XComModel.set(
key=simple_task.operator_extra_links[i].xcom_key,
value=expected,
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
run_id=dr.run_id,
)
link = simple_task.get_extra_links(ti, name)
assert link == expected
current_python_version = sys.version_info[:2]
if current_python_version >= (3, 13):
# TODO(potiuk) We should bring it back when ray is supported on Python 3.13
# Test Deserialized link registered via Airflow Plugin
from tests_common.test_utils.mock_operators import GoogleLink
link = simple_task.get_extra_links(ti, GoogleLink.name)
assert link == "https://www.google.com"
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return hash(self.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
("templated_field", "expected_field"),
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
(
{"foo": {"bar": "{{ task.task_id }}"}},
{"foo": {"bar": "{{ task.task_id }}"}},
),
(
[
{"foo1": {"bar": "{{ task.task_id }}"}},
{"foo2": {"bar": "{{ task.task_id }}"}},
],
[
{"foo1": {"bar": "{{ task.task_id }}"}},
{"foo2": {"bar": "{{ task.task_id }}"}},
],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}",
att2="{{ task.task_id }}",
template_fields=["att1"],
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}",
att2="{{ task.task_id }}",
template_fields=["att1"],
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}",
att4="{{ task.task_id }}",
template_fields=["att3"],
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG(
"test_serialized_template_fields",
schedule=None,
start_date=datetime(2019, 8, 1),
)
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization need to be ignored
ignored_keys: set = {
"_processor_dags_folder",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
"params",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert BaseOperator(task_id="dummy").do_xcom_push is True, (
"Precondition check! If this fails the test won't make sense"
)
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id="dummy")
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
from airflow.task.trigger_rule import TriggerRule
base_operator = BaseOperator(task_id="10")
# Return the name of any annotated class property, or anything explicitly listed in serialized fields
field_names = {
fld.name
for fld in dataclasses.fields(BaseOperator)
if fld.name in BaseOperator.get_serialized_fields()
} | BaseOperator.get_serialized_fields()
fields = {k: getattr(base_operator, k) for k in field_names}
assert fields == {
"_logger_name": None,
"_needs_expansion": None,
"_post_execute_hook": None,
"_pre_execute_hook": None,
"_task_display_name": None,
"allow_nested_operators": True,
"depends_on_past": False,
"do_xcom_push": True,
"doc": None,
"doc_json": None,
"doc_md": None,
"doc_rst": None,
"doc_yaml": None,
"downstream_task_ids": set(),
"end_date": None,
"email": None,
"email_on_failure": True,
"email_on_retry": True,
"execution_timeout": None,
"executor": None,
"executor_config": {},
"has_on_execute_callback": False,
"has_on_failure_callback": False,
"has_on_retry_callback": False,
"has_on_skipped_callback": False,
"has_on_success_callback": False,
"ignore_first_depends_on_past": False,
"is_setup": False,
"is_teardown": False,
"inlets": [],
"map_index_template": None,
"max_active_tis_per_dag": None,
"max_active_tis_per_dagrun": None,
"max_retry_delay": None,
"on_failure_fail_dagrun": False,
"outlets": [],
"owner": "airflow",
"params": {},
"pool": "default_pool",
"pool_slots": 1,
"priority_weight": 1,
"queue": "default",
"resources": None,
"retries": 0,
"retry_delay": timedelta(0, 300),
"retry_exponential_backoff": 0,
"run_as_user": None,
"start_date": None,
"start_from_trigger": False,
"start_trigger_args": None,
"task_id": "10",
"task_type": "BaseOperator",
"template_ext": (),
"template_fields": (),
"template_fields_renderers": {},
"trigger_rule": TriggerRule.ALL_SUCCESS,
"ui_color": "#fff",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"wait_for_past_depends_before_skipping": False,
"weight_rule": _DownstreamPriorityWeightStrategy(),
"multiple_outputs": False,
}, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_operator_deserialize_old_names(self):
blob = {
"task_id": "custom_task",
"_downstream_task_ids": ["foo"],
"template_ext": [],
"template_fields": ["bash_command"],
"template_fields_renderers": {},
"task_type": "CustomOperator",
"_task_module": "tests_common.test_utils.mock_operators",
"pool": "default_pool",
"ui_color": "#fff",
"ui_fgcolor": "#000",
}
SerializedDAG._json_schema.validate(blob, _schema=load_dag_schema_dict()["definitions"]["operator"])
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.downstream_task_ids == {"foo"}
def test_task_resources(self):
"""
Test task resources serialization/deserialization.
"""
from airflow.providers.standard.operators.empty import EmptyOperator
logical_date = datetime(2020, 1, 1)
task_id = "task1"
with DAG("test_task_resources", schedule=None, start_date=logical_date) as dag:
task = EmptyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
SerializedDAG.validate_schema(SerializedDAG.to_dict(dag))
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
deserialized_task = json_dag.get_task(task_id)
assert deserialized_task.resources == task.resources
assert isinstance(deserialized_task.resources, Resources)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.providers.standard.operators.empty import EmptyOperator
logical_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", schedule=None, start_date=logical_date) as dag:
task1 = EmptyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = EmptyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = EmptyOperator(task_id="task3")
_ = EmptyOperator(task_id="task4")
task5 = EmptyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
assert node.dag is serialized_dag
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
@staticmethod
def assert_taskgroup_children(se_task_group, dag_task_group, expected_children):
assert se_task_group.children.keys() == dag_task_group.children.keys() == expected_children
@staticmethod
def assert_task_is_setup_teardown(task, is_setup: bool = False, is_teardown: bool = False):
assert task.is_setup == is_setup
assert task.is_teardown == is_teardown
def test_setup_teardown_tasks(self):
"""
Test setup and teardown task serialization/deserialization.
"""
from airflow.providers.standard.operators.empty import EmptyOperator
logical_date = datetime(2020, 1, 1)
with DAG(
"test_task_group_setup_teardown_tasks",
schedule=None,
start_date=logical_date,
) as dag:
EmptyOperator(task_id="setup").as_setup()
EmptyOperator(task_id="teardown").as_teardown()
with TaskGroup("group1"):
EmptyOperator(task_id="setup1").as_setup()
EmptyOperator(task_id="task1")
EmptyOperator(task_id="teardown1").as_teardown()
with TaskGroup("group2"):
EmptyOperator(task_id="setup2").as_setup()
EmptyOperator(task_id="task2")
EmptyOperator(task_id="teardown2").as_teardown()
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
self.assert_taskgroup_children(
serialized_dag.task_group, dag.task_group, {"setup", "teardown", "group1"}
)
self.assert_task_is_setup_teardown(serialized_dag.task_group.children["setup"], is_setup=True)
self.assert_task_is_setup_teardown(serialized_dag.task_group.children["teardown"], is_teardown=True)
se_first_group = serialized_dag.task_group.children["group1"]
dag_first_group = dag.task_group.children["group1"]
self.assert_taskgroup_children(
se_first_group,
dag_first_group,
{"group1.setup1", "group1.task1", "group1.group2", "group1.teardown1"},
)
self.assert_task_is_setup_teardown(se_first_group.children["group1.setup1"], is_setup=True)
self.assert_task_is_setup_teardown(se_first_group.children["group1.task1"])
self.assert_task_is_setup_teardown(se_first_group.children["group1.teardown1"], is_teardown=True)
se_second_group = se_first_group.children["group1.group2"]
dag_second_group = dag_first_group.children["group1.group2"]
self.assert_taskgroup_children(
se_second_group,
dag_second_group,
{"group1.group2.setup2", "group1.group2.task2", "group1.group2.teardown2"},
)
self.assert_task_is_setup_teardown(se_second_group.children["group1.group2.setup2"], is_setup=True)
self.assert_task_is_setup_teardown(se_second_group.children["group1.group2.task2"])
self.assert_task_is_setup_teardown(
se_second_group.children["group1.group2.teardown2"], is_teardown=True
)
@pytest.mark.db_test
def test_teardown_task_on_failure_fail_dagrun_serialization(self, dag_maker):
with dag_maker() as dag:
@teardown(on_failure_fail_dagrun=True)
def mytask():
print(1)
mytask()
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
task = serialized_dag.task_group.children["mytask"]
assert task.is_teardown is True
assert task.on_failure_fail_dagrun is True
@pytest.mark.db_test
def test_basic_mapped_dag(self, dag_maker):
dagbag = DagBag(
"airflow-core/src/airflow/example_dags/example_dynamic_task_mapping.py", include_examples=False
)
assert not dagbag.import_errors
dag = dagbag.dags["example_dynamic_task_mapping"]
ser_dag = SerializedDAG.to_dict(dag)
# We should not include `_is_sensor` most of the time (as it would be wasteful). Check we don't
assert "_is_sensor" not in ser_dag["dag"]["tasks"][0]["__var"]
SerializedDAG.validate_schema(ser_dag)
@pytest.mark.db_test
def test_teardown_mapped_serialization(self, dag_maker):
with dag_maker() as dag:
@teardown(on_failure_fail_dagrun=True)
def mytask(val=None):
print(1)
mytask.expand(val=[1, 2, 3])
task = dag.task_group.children["mytask"]
assert task.partial_kwargs["is_teardown"] is True
assert task.partial_kwargs["on_failure_fail_dagrun"] is True
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
task = serialized_dag.task_group.children["mytask"]
assert task.partial_kwargs["is_teardown"] is True
assert task.partial_kwargs["on_failure_fail_dagrun"] is True
def test_serialize_mapped_outlets(self):
with DAG(dag_id="d", schedule=None, start_date=datetime.now()):
op = MockOperator.partial(task_id="x").expand(arg1=[1, 2])
assert op.inlets == []
assert op.outlets == []
serialized = SerializedBaseOperator.serialize_mapped_operator(op)
assert "inlets" not in serialized
assert "outlets" not in serialized
round_tripped = SerializedBaseOperator.deserialize_operator(serialized)
assert isinstance(round_tripped, MappedOperator)
assert round_tripped.inlets == []
assert round_tripped.outlets == []
@pytest.mark.db_test
@pytest.mark.parametrize("mapped", [False, True])
def test_derived_dag_deps_sensor(self, mapped):
"""
Tests DAG dependency detection for sensors, including derived classes
"""
from airflow.providers.standard.operators.empty import EmptyOperator
from airflow.providers.standard.sensors.external_task import ExternalTaskSensor
class DerivedSensor(ExternalTaskSensor):
pass
logical_date = datetime(2020, 1, 1)
for class_ in [ExternalTaskSensor, DerivedSensor]:
with DAG(dag_id="test_derived_dag_deps_sensor", schedule=None, start_date=logical_date) as dag:
if mapped:
task1 = class_.partial(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
).expand(external_task_id=["some_task", "some_other_task"])
else:
task1 = class_(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = EmptyOperator(task_id="task2")
task1 >> task2
dag = SerializedDAG.to_dict(dag)
assert dag["dag"]["dag_dependencies"] == [
{
"source": "external_dag_id",
"target": "test_derived_dag_deps_sensor",
"label": "task1",
"dependency_type": "sensor",
"dependency_id": "task1",
}
]
@pytest.mark.db_test
def test_dag_deps_assets_with_duplicate_asset(self, testing_assets):
"""
Check that dag_dependencies node is populated correctly for a DAG with duplicate assets.
"""
from airflow.providers.standard.sensors.external_task import ExternalTaskSensor
logical_date = datetime(2020, 1, 1)
with DAG(dag_id="test", start_date=logical_date, schedule=[testing_assets[0]] * 5) as dag:
ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
BashOperator(
task_id="asset_writer",
bash_command="echo hello",
outlets=[testing_assets[1]] * 3 + testing_assets[2:3],
)
@dag.task(outlets=[testing_assets[3]])
def other_asset_writer(x):
pass
other_asset_writer.expand(x=[1, 2])
testing_asset_key_strs = [AssetUniqueKey.from_asset(asset).to_str() for asset in testing_assets]
dag = SerializedDAG.to_dict(dag)
actual = sorted(dag["dag"]["dag_dependencies"], key=lambda x: tuple(x.values()))
expected = sorted(
[
{
"source": "test",
"target": "asset",
"label": "asset4",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[3],
},
{
"source": "external_dag_id",
"target": "test",
"label": "task1",
"dependency_type": "sensor",
"dependency_id": "task1",
},
{
"source": "test",
"target": "asset",
"label": "asset3",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[2],
},
{
"source": "test",
"target": "asset",
"label": "asset2",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[1],
},
{
"source": "asset",
"target": "test",
"label": "asset1",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[0],
},
{
"source": "asset",
"target": "test",
"label": "asset1",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[0],
},
{
"source": "asset",
"target": "test",
"label": "asset1",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[0],
},
{
"source": "asset",
"target": "test",
"label": "asset1",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[0],
},
{
"source": "asset",
"target": "test",
"label": "asset1",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[0],
},
],
key=lambda x: tuple(x.values()),
)
assert actual == expected
@pytest.mark.db_test
def test_dag_deps_assets(self, testing_assets):
"""
Check that dag_dependencies node is populated correctly for a DAG with assets.
Note that asset id will not be stored at this stage and will be later evaluated when
calling SerializedDagModel.get_dag_dependencies.
"""
from airflow.providers.standard.sensors.external_task import ExternalTaskSensor
logical_date = datetime(2020, 1, 1)
with DAG(dag_id="test", start_date=logical_date, schedule=testing_assets[0:1]) as dag:
ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
BashOperator(task_id="asset_writer", bash_command="echo hello", outlets=testing_assets[1:3])
@dag.task(outlets=testing_assets[3:])
def other_asset_writer(x):
pass
other_asset_writer.expand(x=[1, 2])
testing_asset_key_strs = [AssetUniqueKey.from_asset(asset).to_str() for asset in testing_assets]
dag = SerializedDAG.to_dict(dag)
actual = sorted(dag["dag"]["dag_dependencies"], key=lambda x: tuple(x.values()))
expected = sorted(
[
{
"source": "test",
"target": "asset",
"label": "asset4",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[3],
},
{
"source": "external_dag_id",
"target": "test",
"label": "task1",
"dependency_type": "sensor",
"dependency_id": "task1",
},
{
"source": "test",
"target": "asset",
"label": "asset3",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[2],
},
{
"source": "test",
"target": "asset",
"label": "asset2",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[1],
},
{
"source": "asset",
"target": "test",
"label": "asset1",
"dependency_type": "asset",
"dependency_id": testing_asset_key_strs[0],
},
],
key=lambda x: tuple(x.values()),
)
assert actual == expected
@pytest.mark.parametrize("mapped", [False, True])
def test_derived_dag_deps_operator(self, mapped):
"""
Tests DAG dependency detection for operators, including derived classes
"""
from airflow.providers.standard.operators.empty import EmptyOperator
from airflow.providers.standard.operators.trigger_dagrun import (
TriggerDagRunOperator,
)
class DerivedOperator(TriggerDagRunOperator):
pass
logical_date = datetime(2020, 1, 1)
for class_ in [TriggerDagRunOperator, DerivedOperator]:
with DAG(
dag_id="test_derived_dag_deps_trigger",
schedule=None,
start_date=logical_date,
) as dag:
task1 = EmptyOperator(task_id="task1")
if mapped:
task2 = class_.partial(
task_id="task2",
trigger_dag_id="trigger_dag_id",
).expand(trigger_run_id=["one", "two"])
else:
task2 = class_(
task_id="task2",
trigger_dag_id="trigger_dag_id",
)
task1 >> task2
dag = SerializedDAG.to_dict(dag)
assert dag["dag"]["dag_dependencies"] == [
{
"source": "test_derived_dag_deps_trigger",
"target": "trigger_dag_id",
"label": "task2",
"dependency_type": "trigger",
"dependency_id": "task2",
}
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.providers.standard.operators.empty import EmptyOperator
from airflow.serialization.serialized_objects import TaskGroupSerialization
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
logical_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", schedule=None, start_date=logical_date) as dag:
start = EmptyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = EmptyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = EmptyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = EmptyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = EmptyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = EmptyOperator(task_id="task_down2")
end = EmptyOperator(task_id="end")
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = TaskGroupSerialization.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ["task_group_up1", "task_group_up2"]
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == [
"task_group_up1.task_up1",
"task_group_up2.task_up2",
]
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ["task_group_down1", "task_group_down2"]
task_group_down1_dict = TaskGroupSerialization.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ["end"]
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.providers.standard.operators.empty import EmptyOperator
from airflow.sdk import Label
with DAG(
"test_edge_info_serialization",
schedule=None,
start_date=datetime(2020, 1, 1),
) as dag:
task1 = EmptyOperator(task_id="task1")
task2 = EmptyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.db_test
@pytest.mark.parametrize("mode", ["poke", "reschedule"])
def test_serialize_sensor(self, mode):
from airflow.sdk.bases.sensor import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context: Context):
return False
op = DummySensor(task_id="dummy", mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
assert "_is_sensor" in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.reschedule == (mode == "reschedule")
assert ReadyToRescheduleDep in [type(d) for d in serialized_op.deps]
@pytest.mark.parametrize("mode", ["poke", "reschedule"])
def test_serialize_mapped_sensor_has_reschedule_dep(self, mode):
from airflow.sdk.bases.sensor import BaseSensorOperator
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
class DummySensor(BaseSensorOperator):
def poke(self, context: Context):
return False
op = DummySensor.partial(task_id="dummy", mode=mode).expand(poke_interval=[23])
blob = SerializedBaseOperator.serialize_mapped_operator(op)
assert "_is_sensor" in blob
assert "_is_mapped" in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert ReadyToRescheduleDep in [type(d) for d in serialized_op.deps]
@pytest.mark.parametrize(
("passed_success_callback", "expected_value"),
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(
dag_id="test_dag_on_success_callback_roundtrip",
schedule=None,
**passed_success_callback,
)
BaseOperator(task_id="simple_task", dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
("passed_failure_callback", "expected_value"),
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(
dag_id="test_dag_on_failure_callback_roundtrip",
schedule=None,
**passed_failure_callback,
)
BaseOperator(task_id="simple_task", dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
("dag_arg", "conf_arg", "expected"),
[
(True, "True", True),
(True, "False", True),
(False, "True", False),
(False, "False", False),
(None, "True", True),
(None, "False", False),
],
)
def test_dag_disable_bundle_versioning_roundtrip(self, dag_arg, conf_arg, expected):
"""
Test that when disable_bundle_versioning is passed to the DAG, has_disable_bundle_versioning is stored
in Serialized JSON blob. And when it is de-serialized dag.has_disable_bundle_versioning is set to True.
When the callback is not set, has_disable_bundle_versioning should not be stored in Serialized blob
and so default to False on de-serialization
"""
with conf_vars({("dag_processor", "disable_bundle_versioning"): conf_arg}):
kwargs = {}
if dag_arg is not None:
kwargs["disable_bundle_versioning"] = dag_arg
dag = DAG(
dag_id="test_dag_disable_bundle_versioning_roundtrip",
schedule=None,
**kwargs,
)
BaseOperator(task_id="simple_task", dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.disable_bundle_versioning is expected
@pytest.mark.parametrize(
("object_to_serialized", "expected_output"),
[
(
["task_1", "task_5", "task_2", "task_4"],
["task_1", "task_5", "task_2", "task_4"],
),
(
{"task_1", "task_5", "task_2", "task_4"},
["task_1", "task_2", "task_4", "task_5"],
),
(
("task_1", "task_5", "task_2", "task_4"),
["task_1", "task_5", "task_2", "task_4"],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
("task_1", "task_5", "task_2", 3, ["x", "y"]),
["task_1", "task_5", "task_2", 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG.serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_params_upgrade(self):
"""When pre-2.2.0 param (i.e. primitive) is deserialized we convert to Param"""
serialized = {
"__version": 3,
"dag": {
"dag_id": "simple_dag",
"fileloc": "/path/to/file.py",
"tasks": [],
"timezone": "UTC",
"params": {"none": None, "str": "str", "dict": {"a": "b"}},
},
}
dag = SerializedDAG.from_dict(serialized)
assert dag.params["none"] is None
# After decoupling, server-side deserialization uses SerializedParam
assert isinstance(dag.params.get_param("none"), SerializedParam)
assert dag.params["str"] == "str"
def test_params_serialization_from_dict_upgrade(self):
"""
In <=2.9.2 params were serialized as a JSON object instead of a list of key-value pairs.
This test asserts that the params are still deserialized properly.
"""
serialized = {
"__version": 3,
"dag": {
"dag_id": "simple_dag",
"fileloc": "/path/to/file.py",
"tasks": [],
"timezone": "UTC",
"params": {
"my_param": {
"__class": "airflow.models.param.Param",
"default": "str",
}
},
},
}
dag = SerializedDAG.from_dict(serialized)
param = dag.params.get_param("my_param")
# After decoupling, server-side deserialization uses SerializedParam
assert isinstance(param, SerializedParam)
assert param.value == "str"
def test_params_serialize_default_2_2_0(self):
"""
In 2.0.0, param ``default`` was assumed to be json-serializable objects and were not run though
the standard serializer function. In 2.2.2 we serialize param ``default``. We keep this
test only to ensure that params stored in 2.2.0 can still be parsed correctly.
"""
serialized = {
"__version": 3,
"dag": {
"dag_id": "simple_dag",
"fileloc": "/path/to/file.py",
"tasks": [],
"timezone": "UTC",
"params": [["str", {"__class": "airflow.models.param.Param", "default": "str"}]],
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
# After decoupling, server-side deserialization uses SerializedParam
assert isinstance(dag.params.get_param("str"), SerializedParam)
assert dag.params["str"] == "str"
def test_params_serialize_default(self):
serialized = {
"__version": 3,
"dag": {
"dag_id": "simple_dag",
"fileloc": "/path/to/file.py",
"tasks": [],
"timezone": "UTC",
"params": [
[
"my_param",
{
"default": "a string value",
"description": "hello",
"schema": {"__var": {"type": "string"}, "__type": "dict"},
"__class": "airflow.models.param.Param",
},
]
],
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == "a string value"
param = dag.params.get_param("my_param")
# After decoupling, server-side deserialization uses SerializedParam
assert isinstance(param, SerializedParam)
assert param.description == "hello"
assert param.schema == {"type": "string"}
@pytest.mark.db_test
def test_not_templateable_fields_in_serialized_dag(self):
"""
Test that when we use not templateable fields, an Airflow exception is raised.
"""
class TestOperator(BaseOperator):
template_fields = (
"execution_timeout", # not templateable
"run_as_user", # templateable
)
def execute(self, context: Context):
pass
dag = DAG(dag_id="test_dag", schedule=None, start_date=datetime(2023, 11, 9))
with dag:
task = TestOperator(
task_id="test_task",
run_as_user="{{ test_run_as_user }}",
execution_timeout=timedelta(seconds=10),
)
task.render_template_fields(context={"test_run_as_user": "foo"})
assert task.run_as_user == "foo"
with pytest.raises(
AirflowException,
match=re.escape(
dedent(
"""Failed to serialize DAG 'test_dag': Cannot template BaseOperator field:
'execution_timeout' op.__class__.__name__='TestOperator' op.template_fields=('execution_timeout', 'run_as_user')"""
)
),
):
SerializedDAG.to_dict(dag)
@pytest.mark.db_test
def test_start_trigger_args_in_serialized_dag(self):
"""
Test that when we provide start_trigger_args, the DAG can be correctly serialized.
"""
class TestOperator(BaseOperator):
start_trigger_args = StartTriggerArgs(
trigger_cls="airflow.providers.standard.triggers.temporal.TimeDeltaTrigger",
trigger_kwargs={"delta": timedelta(seconds=1)},
next_method="execute_complete",
next_kwargs=None,
timeout=None,
)
start_from_trigger = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_trigger_args.trigger_kwargs = {"delta": timedelta(seconds=2)}
self.start_from_trigger = True
def execute_complete(self):
pass
class Test2Operator(BaseOperator):
start_trigger_args = StartTriggerArgs(
trigger_cls="airflow.triggers.testing.SuccessTrigger",
trigger_kwargs={},
next_method="execute_complete",
next_kwargs=None,
timeout=None,
)
start_from_trigger = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def execute_complete(self):
pass
dag = DAG(dag_id="test_dag", schedule=None, start_date=datetime(2023, 11, 9))
with dag:
TestOperator(task_id="test_task_1")
Test2Operator(task_id="test_task_2")
serialized_obj = SerializedDAG.to_dict(dag)
tasks = serialized_obj["dag"]["tasks"]
assert tasks[0]["__var"]["start_trigger_args"] == {
"__type": "START_TRIGGER_ARGS",
"trigger_cls": "airflow.providers.standard.triggers.temporal.TimeDeltaTrigger",
# "trigger_kwargs": {"__type": "dict", "__var": {"delta": {"__type": "timedelta", "__var": 2.0}}},
"trigger_kwargs": {
"__type": "dict",
"__var": {"delta": {"__type": "timedelta", "__var": 2.0}},
},
"next_method": "execute_complete",
"next_kwargs": None,
"timeout": None,
}
assert tasks[0]["__var"]["start_from_trigger"] is True
assert tasks[1]["__var"]["start_trigger_args"] == {
"__type": "START_TRIGGER_ARGS",
"trigger_cls": "airflow.triggers.testing.SuccessTrigger",
"trigger_kwargs": {"__type": "dict", "__var": {}},
"next_method": "execute_complete",
"next_kwargs": None,
"timeout": None,
}
assert tasks[1]["__var"]["start_from_trigger"] is True
def test_kubernetes_optional():
"""Test that serialization module loads without kubernetes, but deserialization of PODs requires it"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition(".")[0] == "kubernetes":
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch("builtins.__import__", side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.providers.cncf.kubernetes.*?
imported_airflow = {
c.args[0].split(".", 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
"__type": "k8s.V1Pod",
"__var": PodGenerator.serialize_pod(executor_config_pod),
}
# we should error if attempting to deserialize POD without kubernetes installed
with pytest.raises(RuntimeError, match="Cannot deserialize POD objects without kubernetes"):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag())
def test_operator_expand_serde():
literal = [1, 2, {"a": "b"}]
real_op = BashOperator.partial(task_id="a", executor_config={"dict": {"sub": "value"}}).expand(
bash_command=literal
)
serialized = BaseSerialization.serialize(real_op)
assert serialized["__var"] == {
"_is_mapped": True,
"_task_module": "airflow.providers.standard.operators.bash",
"task_type": "BashOperator",
"expand_input": {
"type": "dict-of-lists",
"value": {
"__type": "dict",
"__var": {"bash_command": [1, 2, {"__type": "dict", "__var": {"a": "b"}}]},
},
},
"partial_kwargs": {
"executor_config": {
"__type": "dict",
"__var": {"dict": {"__type": "dict", "__var": {"sub": "value"}}},
},
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
"task_id": "a",
"template_fields": ["bash_command", "env", "cwd"],
"template_ext": [".sh", ".bash"],
"template_fields_renderers": {"bash_command": "bash", "env": "json"},
"ui_color": "#f0ede4",
"_disallow_kwargs_override": False,
"_expand_input_attr": "expand_input",
}
op = BaseSerialization.deserialize(serialized)
assert isinstance(op, MappedOperator)
# operator_class now stores only minimal type information for memory efficiency
assert op.operator_class == {
"task_type": "BashOperator",
"_operator_name": "BashOperator",
}
assert op.expand_input.value["bash_command"] == literal
assert op.partial_kwargs["executor_config"] == {"dict": {"sub": "value"}}
def test_operator_expand_xcomarg_serde():
from airflow.models.xcom_arg import SchedulerPlainXComArg
from airflow.sdk.definitions.xcom_arg import XComArg
from airflow.serialization.serialized_objects import _XComRef
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id="task_2").expand(arg2=XComArg(task1))
serialized = BaseSerialization.serialize(mapped)
assert serialized["__var"] == {
"_is_mapped": True,
"_task_module": "tests_common.test_utils.mock_operators",
"task_type": "MockOperator",
"expand_input": {
"type": "dict-of-lists",
"value": {
"__type": "dict",
"__var": {
"arg2": {
"__type": "xcomref",
"__var": {"task_id": "op1", "key": "return_value"},
}
},
},
},
"partial_kwargs": {
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
"task_id": "task_2",
"template_fields": ["arg1", "arg2"],
"_disallow_kwargs_override": False,
"_expand_input_attr": "expand_input",
}
op = BaseSerialization.deserialize(serialized)
# The XComArg can't be deserialized before the DAG is.
xcom_ref = op.expand_input.value["arg2"]
assert xcom_ref == _XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY})
serialized_dag: DAG = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
xcom_arg = serialized_dag.task_dict["task_2"].expand_input.value["arg2"]
assert isinstance(xcom_arg, SchedulerPlainXComArg)
assert xcom_arg.operator is serialized_dag.task_dict["op1"]
@pytest.mark.parametrize("strict", [True, False])
def test_operator_expand_kwargs_literal_serde(strict):
from airflow.sdk.definitions.xcom_arg import XComArg
from airflow.serialization.serialized_objects import DEFAULT_OPERATOR_DEPS, _XComRef
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id="task_2").expand_kwargs(
[{"a": "x"}, {"a": XComArg(task1)}],
strict=strict,
)
serialized = BaseSerialization.serialize(mapped)
assert serialized["__var"] == {
"_is_mapped": True,
"_task_module": "tests_common.test_utils.mock_operators",
"task_type": "MockOperator",
"expand_input": {
"type": "list-of-dicts",
"value": [
{"__type": "dict", "__var": {"a": "x"}},
{
"__type": "dict",
"__var": {
"a": {
"__type": "xcomref",
"__var": {"task_id": "op1", "key": "return_value"},
}
},
},
],
},
"partial_kwargs": {
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
"task_id": "task_2",
"template_fields": ["arg1", "arg2"],
"_disallow_kwargs_override": strict,
"_expand_input_attr": "expand_input",
}
op = BaseSerialization.deserialize(serialized)
assert op.deps == DEFAULT_OPERATOR_DEPS
assert op._disallow_kwargs_override == strict
# The XComArg can't be deserialized before the DAG is.
expand_value = op.expand_input.value
assert expand_value == [
{"a": "x"},
{"a": _XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY})},
]
serialized_dag: DAG = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
resolved_expand_value = serialized_dag.task_dict["task_2"].expand_input.value
assert resolved_expand_value == [
{"a": "x"},
{"a": _XComRef({"task_id": "op1", "key": "return_value"})},
]
@pytest.mark.parametrize("strict", [True, False])
def test_operator_expand_kwargs_xcomarg_serde(strict):
from airflow.models.xcom_arg import SchedulerPlainXComArg
from airflow.sdk.definitions.xcom_arg import XComArg
from airflow.serialization.serialized_objects import _XComRef
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id="task_2").expand_kwargs(XComArg(task1), strict=strict)
serialized = SerializedBaseOperator.serialize(mapped)
assert serialized["__var"] == {
"_is_mapped": True,
"_task_module": "tests_common.test_utils.mock_operators",
"task_type": "MockOperator",
"expand_input": {
"type": "list-of-dicts",
"value": {
"__type": "xcomref",
"__var": {"task_id": "op1", "key": "return_value"},
},
},
"partial_kwargs": {
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
"task_id": "task_2",
"template_fields": ["arg1", "arg2"],
"_disallow_kwargs_override": strict,
"_expand_input_attr": "expand_input",
}
op = BaseSerialization.deserialize(serialized)
assert op._disallow_kwargs_override == strict
# The XComArg can't be deserialized before the DAG is.
xcom_ref = op.expand_input.value
assert xcom_ref == _XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY})
serialized_dag: DAG = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
xcom_arg = serialized_dag.task_dict["task_2"].expand_input.value
assert isinstance(xcom_arg, SchedulerPlainXComArg)
assert xcom_arg.operator is serialized_dag.task_dict["op1"]
def test_task_resources_serde():
"""
Test task resources serialization/deserialization.
"""
from airflow.providers.standard.operators.empty import EmptyOperator
logical_date = datetime(2020, 1, 1)
task_id = "task1"
with DAG("test_task_resources", schedule=None, start_date=logical_date) as _:
task = EmptyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
serialized = BaseSerialization.serialize(task)
assert serialized["__var"]["resources"] == {
"cpus": {"name": "CPU", "qty": 0.1, "units_str": "core(s)"},
"disk": {"name": "Disk", "qty": 512, "units_str": "MB"},
"gpus": {"name": "GPU", "qty": 0, "units_str": "gpu(s)"},
"ram": {"name": "RAM", "qty": 2048, "units_str": "MB"},
}
@pytest.mark.parametrize("execution_timeout", [None, timedelta(hours=1)])
def test_task_execution_timeout_serde(execution_timeout):
"""
Test task execution_timeout serialization/deserialization.
"""
from airflow.providers.standard.operators.empty import EmptyOperator
with DAG("test_task_execution_timeout", schedule=None, start_date=datetime(2020, 1, 1)) as _:
task = EmptyOperator(task_id="task1", execution_timeout=execution_timeout)
serialized = BaseSerialization.serialize(task)
if execution_timeout:
assert "execution_timeout" in serialized["__var"]
deserialized = BaseSerialization.deserialize(serialized)
assert deserialized.execution_timeout == task.execution_timeout
def test_taskflow_expand_serde():
from airflow.models.xcom_arg import XComArg
from airflow.sdk import task
from airflow.serialization.serialized_objects import _ExpandInputRef, _XComRef
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
op1 = BaseOperator(task_id="op1")
@task(retry_delay=30)
def x(arg1, arg2, arg3):
print(arg1, arg2, arg3)
print("**", type(x), type(x.partial), type(x.expand))
x.partial(arg1=[1, 2, {"a": "b"}]).expand(arg2={"a": 1, "b": 2}, arg3=XComArg(op1))
original = dag.get_task("x")
serialized = BaseSerialization.serialize(original)
assert serialized["__var"] == {
"_is_mapped": True,
"_task_module": "airflow.providers.standard.decorators.python",
"task_type": "_PythonDecoratedOperator",
"_operator_name": "@task",
"partial_kwargs": {
"op_args": [],
"op_kwargs": {
"__type": "dict",
"__var": {"arg1": [1, 2, {"__type": "dict", "__var": {"a": "b"}}]},
},
"retry_delay": {"__type": "timedelta", "__var": 30.0},
},
"op_kwargs_expand_input": {
"type": "dict-of-lists",
"value": {
"__type": "dict",
"__var": {
"arg2": {"__type": "dict", "__var": {"a": 1, "b": 2}},
"arg3": {
"__type": "xcomref",
"__var": {"task_id": "op1", "key": "return_value"},
},
},
},
},
"ui_color": "#ffefeb",
"task_id": "x",
"template_fields": ["templates_dict", "op_args", "op_kwargs"],
"template_fields_renderers": {
"templates_dict": "json",
"op_args": "py",
"op_kwargs": "py",
},
"_disallow_kwargs_override": False,
"_expand_input_attr": "op_kwargs_expand_input",
"python_callable_name": qualname(x),
}
deserialized = BaseSerialization.deserialize(serialized)
assert isinstance(deserialized, MappedOperator)
assert deserialized.upstream_task_ids == set()
assert deserialized.downstream_task_ids == set()
assert deserialized.op_kwargs_expand_input == _ExpandInputRef(
key="dict-of-lists",
value={
"arg2": {"a": 1, "b": 2},
"arg3": _XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY}),
},
)
assert deserialized.partial_kwargs == {
"op_args": [],
"op_kwargs": {"arg1": [1, 2, {"a": "b"}]},
"retry_delay": timedelta(seconds=30),
}
# this dag is not pickleable in this context, so we have to simply
# set it to None
deserialized.dag = None
# Ensure the serialized operator can also be correctly pickled, to ensure
# correct interaction between DAG pickling and serialization. This is done
# here so we don't need to duplicate tests between pickled and non-pickled
# DAGs everywhere else.
pickled = pickle.loads(pickle.dumps(deserialized))
assert pickled.op_kwargs_expand_input == _ExpandInputRef(
key="dict-of-lists",
value={
"arg2": {"a": 1, "b": 2},
"arg3": _XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY}),
},
)
assert pickled.partial_kwargs == {
"op_args": [],
"op_kwargs": {"arg1": [1, 2, {"a": "b"}]},
"retry_delay": timedelta(seconds=30),
}
@pytest.mark.parametrize("strict", [True, False])
def test_taskflow_expand_kwargs_serde(strict):
from airflow.models.xcom_arg import XComArg
from airflow.sdk import task
from airflow.serialization.serialized_objects import _ExpandInputRef, _XComRef
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
op1 = BaseOperator(task_id="op1")
@task(retry_delay=30)
def x(arg1, arg2, arg3):
print(arg1, arg2, arg3)
x.partial(arg1=[1, 2, {"a": "b"}]).expand_kwargs(XComArg(op1), strict=strict)
original = dag.get_task("x")
serialized = BaseSerialization.serialize(original)
assert serialized["__var"] == {
"_is_mapped": True,
"_task_module": "airflow.providers.standard.decorators.python",
"task_type": "_PythonDecoratedOperator",
"_operator_name": "@task",
"python_callable_name": qualname(x),
"partial_kwargs": {
"op_args": [],
"op_kwargs": {
"__type": "dict",
"__var": {"arg1": [1, 2, {"__type": "dict", "__var": {"a": "b"}}]},
},
"retry_delay": {"__type": "timedelta", "__var": 30.0},
},
"op_kwargs_expand_input": {
"type": "list-of-dicts",
"value": {
"__type": "xcomref",
"__var": {"task_id": "op1", "key": "return_value"},
},
},
"ui_color": "#ffefeb",
"task_id": "x",
"template_fields": ["templates_dict", "op_args", "op_kwargs"],
"template_fields_renderers": {
"templates_dict": "json",
"op_args": "py",
"op_kwargs": "py",
},
"_disallow_kwargs_override": strict,
"_expand_input_attr": "op_kwargs_expand_input",
}
deserialized = BaseSerialization.deserialize(serialized)
assert isinstance(deserialized, MappedOperator)
assert deserialized._disallow_kwargs_override == strict
assert deserialized.upstream_task_ids == set()
assert deserialized.downstream_task_ids == set()
assert deserialized.op_kwargs_expand_input == _ExpandInputRef(
key="list-of-dicts",
value=_XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY}),
)
assert deserialized.partial_kwargs == {
"op_args": [],
"op_kwargs": {"arg1": [1, 2, {"a": "b"}]},
"retry_delay": timedelta(seconds=30),
}
# this dag is not pickleable in this context, so we have to simply
# set it to None
deserialized.dag = None
# Ensure the serialized operator can also be correctly pickled, to ensure
# correct interaction between DAG pickling and serialization. This is done
# here so we don't need to duplicate tests between pickled and non-pickled
# DAGs everywhere else.
pickled = pickle.loads(pickle.dumps(deserialized))
assert pickled.op_kwargs_expand_input == _ExpandInputRef(
"list-of-dicts",
_XComRef({"task_id": "op1", "key": XCOM_RETURN_KEY}),
)
assert pickled.partial_kwargs == {
"op_args": [],
"op_kwargs": {"arg1": [1, 2, {"a": "b"}]},
"retry_delay": timedelta(seconds=30),
}
def test_mapped_task_group_serde():
from airflow.models.expandinput import SchedulerDictOfListsExpandInput
from airflow.sdk.definitions.decorators.task_group import task_group
from airflow.serialization.definitions.taskgroup import SerializedTaskGroup
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
@task_group
def tg(a: str) -> None:
BaseOperator(task_id="op1")
with pytest.raises(NotImplementedError) as ctx:
BashOperator.partial(task_id="op2").expand(bash_command=["ls", a])
assert str(ctx.value) == "operator expansion in an expanded task group is not yet supported"
tg.expand(a=[".", ".."])
ser_dag = SerializedBaseOperator.serialize(dag)
assert ser_dag[Encoding.VAR]["task_group"]["children"]["tg"] == (
"taskgroup",
{
"_group_id": "tg",
"children": {
"tg.op1": ("operator", "tg.op1"),
# "tg.op2": ("operator", "tg.op2"),
},
"downstream_group_ids": [],
"downstream_task_ids": [],
"expand_input": {
"type": "dict-of-lists",
"value": {"__type": "dict", "__var": {"a": [".", ".."]}},
},
"group_display_name": "",
"is_mapped": True,
"prefix_group_id": True,
"tooltip": "",
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"upstream_group_ids": [],
"upstream_task_ids": [],
},
)
serde_dag = SerializedDAG.deserialize_dag(ser_dag[Encoding.VAR])
serde_tg = serde_dag.task_group.children["tg"]
assert isinstance(serde_tg, SerializedTaskGroup)
assert serde_tg._expand_input == SchedulerDictOfListsExpandInput({"a": [".", ".."]})
@pytest.mark.db_test
def test_mapped_task_with_operator_extra_links_property():
class _DummyOperator(BaseOperator):
def __init__(self, inputs, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
@property
def operator_extra_links(self):
return (AirflowLink2(),)
with DAG("test-dag", schedule=None, start_date=datetime(2020, 1, 1)) as dag:
_DummyOperator.partial(task_id="task").expand(inputs=[1, 2, 3])
serialized_dag = SerializedBaseOperator.serialize(dag)
assert serialized_dag[Encoding.VAR]["tasks"][0]["__var"] == {
"task_id": "task",
"expand_input": {
"type": "dict-of-lists",
"value": {"__type": "dict", "__var": {"inputs": [1, 2, 3]}},
},
"partial_kwargs": {
"retry_delay": {"__type": "timedelta", "__var": 300.0},
},
"_disallow_kwargs_override": False,
"_expand_input_attr": "expand_input",
"_operator_extra_links": {"airflow": "_link_AirflowLink2"},
"template_fields": [],
"task_type": "_DummyOperator",
"_task_module": "unit.serialization.test_dag_serialization",
"_is_mapped": True,
}
deserialized_dag = SerializedDAG.deserialize_dag(serialized_dag[Encoding.VAR])
# operator defined links have to be instances of XComOperatorLink
assert deserialized_dag.task_dict["task"].operator_extra_links == [
XComOperatorLink(name="airflow", xcom_key="_link_AirflowLink2")
]
mapped_task = deserialized_dag.task_dict["task"]
assert mapped_task.operator_extra_link_dict == {
"airflow": XComOperatorLink(name="airflow", xcom_key="_link_AirflowLink2")
}
assert mapped_task.global_operator_extra_link_dict == {"airflow": AirflowLink(), "github": GithubLink()}
assert mapped_task.extra_links == sorted({"airflow", "github"})
def empty_function(*args, **kwargs):
"""Empty function for testing."""
def test_python_callable_in_partial_kwargs():
from airflow.providers.standard.operators.python import PythonOperator
operator = PythonOperator.partial(
task_id="task",
python_callable=empty_function,
).expand(op_kwargs=[{"x": 1}])
serialized = SerializedBaseOperator.serialize_mapped_operator(operator)
assert "python_callable" not in serialized["partial_kwargs"]
assert serialized["partial_kwargs"]["python_callable_name"] == qualname(empty_function)
deserialized = SerializedBaseOperator.deserialize_operator(serialized)
assert "python_callable" not in deserialized.partial_kwargs
assert deserialized.partial_kwargs["python_callable_name"] == qualname(empty_function)
def test_handle_v1_serdag():
v1 = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 240.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
"_task_group": {
"_group_id": None,
"prefix_group_id": True,
"children": {
"bash_task": ("operator", "bash_task"),
"custom_task": ("operator", "custom_task"),
},
"tooltip": "",
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"upstream_group_ids": [],
"downstream_group_ids": [],
"upstream_task_ids": [],
"downstream_task_ids": [],
},
"is_paused_upon_creation": False,
"max_active_runs": 16,
"max_active_tasks": 16,
"max_consecutive_failed_dag_runs": 0,
"_dag_id": "simple_dag",
"deadline": None,
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"_processor_dags_folder": (
AIRFLOW_REPO_ROOT_PATH / "airflow-core" / "tests" / "unit" / "dags"
).as_posix(),
"tasks": [
{
"__type": "operator",
"__var": {
"task_id": "bash_task",
"retries": 1,
"retry_delay": 240.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": [".sh", ".bash"],
"template_fields": ["bash_command", "env", "cwd"],
"template_fields_renderers": {"bash_command": "bash", "env": "json"},
"bash_command": "echo {{ task.task_id }}",
"_task_type": "BashOperator",
# Slightly difference from v2-10-stable here, we manually changed this path
"_task_module": "airflow.providers.standard.operators.bash",
"owner": "airflow1",
"pool": "pool1",
"task_display_name": "my_bash_task",
"is_setup": False,
"is_teardown": False,
"on_failure_fail_dagrun": False,
"executor_config": {
"__type": "dict",
"__var": {
"pod_override": {
"__type": "k8s.V1Pod",
"__var": PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
"_log_config_logger_name": "airflow.task.operators",
"_needs_expansion": False,
"weight_rule": "downstream",
"start_trigger_args": None,
"start_from_trigger": False,
"inlets": [
{
"__type": "dataset",
"__var": {
"extra": {},
"uri": "asset-1",
},
},
{
"__type": "dataset_alias",
"__var": {"name": "alias-name"},
},
],
"outlets": [
{
"__type": "dataset",
"__var": {
"extra": {},
"uri": "asset-2",
},
},
],
},
},
{
"__type": "operator",
"__var": {
"task_id": "custom_task",
"retries": 1,
"retry_delay": 240.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ["bash_command"],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_operator_name": "@custom",
# Slightly difference from v2-10-stable here, we manually changed this path
"_task_module": "tests_common.test_utils.mock_operators",
"pool": "default_pool",
"is_setup": False,
"is_teardown": False,
"on_failure_fail_dagrun": False,
"_log_config_logger_name": "airflow.task.operators",
"_needs_expansion": False,
"weight_rule": "downstream",
"start_trigger_args": None,
"start_from_trigger": False,
},
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "dict",
"__var": {
"DAGs": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
}
},
},
"edge_info": {},
"dag_dependencies": [
# dataset as schedule (source)
{
"source": "dataset",
"target": "dag1",
"dependency_type": "dataset",
"dependency_id": "dataset_uri_1",
},
# dataset alias (resolved) as schedule (source)
{
"source": "dataset",
"target": "dataset-alias:alias_name_1",
"dependency_type": "dataset",
"dependency_id": "dataset_uri_2",
},
{
"source": "dataset:alias_name_1",
"target": "dag2",
"dependency_type": "dataset-alias",
"dependency_id": "alias_name_1",
},
# dataset alias (not resolved) as schedule (source)
{
"source": "dataset-alias",
"target": "dag2",
"dependency_type": "dataset-alias",
"dependency_id": "alias_name_2",
},
# dataset as outlets (target)
{
"source": "dag10",
"target": "dataset",
"dependency_type": "dataset",
"dependency_id": "dataset_uri_10",
},
# dataset alias (resolved) as outlets (target)
{
"source": "dag20",
"target": "dataset-alias:alias_name_10",
"dependency_type": "dataset",
"dependency_id": "dataset_uri_20",
},
{
"source": "dataset:dataset_uri_20",
"target": "dataset-alias",
"dependency_type": "dataset-alias",
"dependency_id": "alias_name_10",
},
# dataset alias (not resolved) as outlets (target)
{
"source": "dag2",
"target": "dataset-alias",
"dependency_type": "dataset-alias",
"dependency_id": "alias_name_2",
},
],
"params": [],
},
}
expected_dag_dependencies = [
# asset as schedule (source)
{
"dependency_id": "dataset_uri_1",
"dependency_type": "asset",
"label": "dataset_uri_1",
"source": "asset",
"target": "dag1",
},
# asset alias (resolved) as schedule (source)
{
"dependency_id": "dataset_uri_2",
"dependency_type": "asset",
"label": "dataset_uri_2",
"source": "asset",
"target": "asset-alias:alias_name_1",
},
{
"dependency_id": "alias_name_1",
"dependency_type": "asset-alias",
"label": "alias_name_1",
"source": "asset:alias_name_1",
"target": "dag2",
},
# asset alias (not resolved) as schedule (source)
{
"dependency_id": "alias_name_2",
"dependency_type": "asset-alias",
"label": "alias_name_2",
"source": "asset-alias",
"target": "dag2",
},
# asset as outlets (target)
{
"dependency_id": "dataset_uri_10",
"dependency_type": "asset",
"label": "dataset_uri_10",
"source": "dag10",
"target": "asset",
},
# asset alias (resolved) as outlets (target)
{
"dependency_id": "dataset_uri_20",
"dependency_type": "asset",
"label": "dataset_uri_20",
"source": "dag20",
"target": "asset-alias:alias_name_10",
},
{
"dependency_id": "alias_name_10",
"dependency_type": "asset-alias",
"label": "alias_name_10",
"source": "asset:dataset_uri_20",
"target": "asset-alias",
},
# asset alias (not resolved) as outlets (target)
{
"dependency_id": "alias_name_2",
"dependency_type": "asset-alias",
"label": "alias_name_2",
"source": "dag2",
"target": "asset-alias",
},
]
SerializedDAG.conversion_v1_to_v2(v1)
SerializedDAG.conversion_v2_to_v3(v1)
dag = SerializedDAG.from_dict(v1)
expected_sdag = copy.deepcopy(serialized_simple_dag_ground_truth)
expected = SerializedDAG.from_dict(expected_sdag)
fields_to_verify = set(vars(expected).keys()) - {
"task_group", # Tested separately
"dag_dependencies", # Tested separately
"last_loaded", # Dynamically set to utcnow
}
for f in fields_to_verify:
dag_value = getattr(dag, f)
expected_value = getattr(expected, f)
assert dag_value == expected_value, (
f"V2 DAG field '{f}' differs from V3: V2={dag_value!r} != V3={expected_value!r}"
)
for f in set(vars(expected.task_group).keys()) - {"dag"}:
dag_tg_value = getattr(dag.task_group, f)
expected_tg_value = getattr(expected.task_group, f)
assert dag_tg_value == expected_tg_value, (
f"V2 task_group field '{f}' differs: V2={dag_tg_value!r} != V3={expected_tg_value!r}"
)
assert getattr(dag, "dag_dependencies") == expected_dag_dependencies
def test_handle_v2_serdag():
"""Test that v2 serialized DAGs can be deserialized properly."""
v2 = {
"__version": 2,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 240.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
},
},
"start_date": 1564617600.0,
"timetable": {
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {
"delta": 86400.0,
},
},
"task_group": {
"_group_id": None,
"group_display_name": "",
"prefix_group_id": True,
"children": {
"bash_task": ("operator", "bash_task"),
"custom_task": ("operator", "custom_task"),
},
"tooltip": "",
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"upstream_group_ids": [],
"downstream_group_ids": [],
"upstream_task_ids": [],
"downstream_task_ids": [],
},
"is_paused_upon_creation": False,
"dag_id": "simple_dag",
"catchup": False,
"disable_bundle_versioning": False,
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"_processor_dags_folder": (
AIRFLOW_REPO_ROOT_PATH / "airflow-core" / "tests" / "unit" / "dags"
).as_posix(),
"tasks": [
{
"__type": "operator",
"__var": {
"task_id": "bash_task",
"retries": 1,
"retry_delay": 240.0,
"max_retry_delay": 600.0,
"downstream_task_ids": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": [".sh", ".bash"],
"template_fields": ["bash_command", "env", "cwd"],
"template_fields_renderers": {
"bash_command": "bash",
"env": "json",
},
"bash_command": "echo {{ task.task_id }}",
"task_type": "BashOperator",
"_task_module": "airflow.providers.standard.operators.bash",
"owner": "airflow1",
"pool": "pool1",
"is_setup": False,
"is_teardown": False,
"on_failure_fail_dagrun": False,
"executor_config": {
"__type": "dict",
"__var": {
"pod_override": {
"__type": "k8s.V1Pod",
"__var": PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
"_needs_expansion": False,
"weight_rule": "downstream",
"start_trigger_args": None,
"start_from_trigger": False,
"inlets": [
{
"__type": "asset",
"__var": {
"extra": {},
"group": "asset",
"name": "asset-1",
"uri": "asset-1",
},
},
{
"__type": "asset_alias",
"__var": {"group": "asset", "name": "alias-name"},
},
],
"outlets": [
{
"__type": "asset",
"__var": {
"extra": {},
"group": "asset",
"name": "asset-2",
"uri": "asset-2",
},
},
],
},
},
{
"__type": "operator",
"__var": {
"task_id": "custom_task",
"retries": 1,
"retry_delay": 240.0,
"max_retry_delay": 600.0,
"downstream_task_ids": [],
"_operator_extra_links": {"Google Custom": "_link_CustomOpLink"},
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ["bash_command"],
"template_fields_renderers": {},
"task_type": "CustomOperator",
"_operator_name": "@custom",
"_task_module": "tests_common.test_utils.mock_operators",
"pool": "default_pool",
"is_setup": False,
"is_teardown": False,
"on_failure_fail_dagrun": False,
"_needs_expansion": False,
"weight_rule": "downstream",
"start_trigger_args": None,
"start_from_trigger": False,
},
},
],
"timezone": "UTC",
"access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "dict",
"__var": {
"DAGs": {
"__type": "set",
"__var": [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
],
}
},
}
},
},
"edge_info": {},
"dag_dependencies": [
{
"dependency_id": '{"name": "asset-2", "uri": "asset-2"}',
"dependency_type": "asset",
"label": "asset-2",
"source": "simple_dag",
"target": "asset",
},
],
"params": [],
"tags": [],
},
}
# Test that v2 DAGs can be deserialized without conversion
dag = SerializedDAG.from_dict(v2)
expected_sdag = copy.deepcopy(serialized_simple_dag_ground_truth)
expected = SerializedDAG.from_dict(expected_sdag)
fields_to_verify = set(vars(expected).keys()) - {
"task_group", # Tested separately
"last_loaded", # Dynamically set to utcnow
}
for f in fields_to_verify:
dag_value = getattr(dag, f)
expected_value = getattr(expected, f)
assert dag_value == expected_value, (
f"V2 DAG field '{f}' differs from V3: V2={dag_value!r} != V3={expected_value!r}"
)
for f in set(vars(expected.task_group).keys()) - {"dag"}:
dag_tg_value = getattr(dag.task_group, f)
expected_tg_value = getattr(expected.task_group, f)
assert dag_tg_value == expected_tg_value, (
f"V2 task_group field '{f}' differs: V2={dag_tg_value!r} != V3={expected_tg_value!r}"
)
def test_dag_schema_defaults_optimization():
"""Test that DAG fields matching schema defaults are excluded from serialization."""
# Create DAG with all schema default values
dag_with_defaults = DAG(
dag_id="test_defaults_dag",
start_date=datetime(2023, 1, 1),
# These should match schema defaults and be excluded
catchup=False,
fail_fast=False,
max_active_runs=16,
max_active_tasks=16,
max_consecutive_failed_dag_runs=0,
render_template_as_native_obj=False,
disable_bundle_versioning=False,
# These should be excluded as None
description=None,
doc_md=None,
)
# Serialize and check exclusions
serialized = SerializedDAG.to_dict(dag_with_defaults)
dag_data = serialized["dag"]
# Schema default fields should be excluded
for field in SerializedDAG.get_schema_defaults("dag").keys():
assert field not in dag_data, f"Schema default field '{field}' should be excluded"
# None fields should also be excluded
none_fields = ["description", "doc_md"]
for field in none_fields:
assert field not in dag_data, f"None field '{field}' should be excluded"
# Test deserialization restores defaults correctly
deserialized_dag = SerializedDAG.from_dict(serialized)
# Verify schema defaults are restored
assert deserialized_dag.catchup is False
assert deserialized_dag.fail_fast is False
assert deserialized_dag.max_active_runs == 16
assert deserialized_dag.max_active_tasks == 16
assert deserialized_dag.max_consecutive_failed_dag_runs == 0
assert deserialized_dag.render_template_as_native_obj is False
assert deserialized_dag.disable_bundle_versioning is False
# Test with non-default values (should be included)
dag_non_defaults = DAG(
dag_id="test_non_defaults_dag",
start_date=datetime(2023, 1, 1),
catchup=True, # Non-default
max_active_runs=32, # Non-default
description="Test description", # Non-None
)
serialized_non_defaults = SerializedDAG.to_dict(dag_non_defaults)
dag_non_defaults_data = serialized_non_defaults["dag"]
# Non-default values should be included
assert "catchup" in dag_non_defaults_data
assert dag_non_defaults_data["catchup"] is True
assert "max_active_runs" in dag_non_defaults_data
assert dag_non_defaults_data["max_active_runs"] == 32
assert "description" in dag_non_defaults_data
assert dag_non_defaults_data["description"] == "Test description"
def test_email_optimization_removes_email_attrs_when_email_empty():
"""Test that email_on_failure and email_on_retry are removed when email is empty."""
with DAG(dag_id="test_email_optimization") as dag:
BashOperator(
task_id="test_task",
bash_command="echo test",
email=None, # Empty email
email_on_failure=True, # This should be removed during serialization
email_on_retry=True, # This should be removed during serialization
)
serialized_dag = SerializedDAG.to_dict(dag)
task_serialized = serialized_dag["dag"]["tasks"][0]["__var"]
assert task_serialized is not None
assert "email_on_failure" not in task_serialized
assert "email_on_retry" not in task_serialized
# But they should be present when email is not empty
with DAG(dag_id="test_email_with_attrs") as dag_with_email:
BashOperator(
task_id="test_task_with_email",
bash_command="echo test",
email="test@example.com", # Non-empty email
email_on_failure=True,
email_on_retry=True,
)
serialized_dag_with_email = SerializedDAG.to_dict(dag_with_email)
task_with_email_serialized = serialized_dag_with_email["dag"]["tasks"][0]["__var"]
assert task_with_email_serialized is not None
# email_on_failure and email_on_retry SHOULD be in the serialized task
# since email is not empty
assert "email" in task_with_email_serialized
assert task_with_email_serialized["email"] == "test@example.com"
def dummy_callback():
pass
@pytest.mark.parametrize(
("callback_config", "expected_flags", "is_mapped"),
[
# Regular operator tests
(
{
"on_failure_callback": dummy_callback,
"on_retry_callback": [dummy_callback, dummy_callback],
"on_success_callback": dummy_callback,
},
{"has_on_failure_callback": True, "has_on_retry_callback": True, "has_on_success_callback": True},
False,
),
(
{}, # No callbacks
{
"has_on_failure_callback": False,
"has_on_retry_callback": False,
"has_on_success_callback": False,
},
False,
),
(
{"on_failure_callback": [], "on_success_callback": None}, # Empty callbacks
{"has_on_failure_callback": False, "has_on_success_callback": False},
False,
),
# Mapped operator tests
(
{"on_failure_callback": dummy_callback, "on_success_callback": [dummy_callback, dummy_callback]},
{"has_on_failure_callback": True, "has_on_success_callback": True},
True,
),
(
{}, # Mapped operator without callbacks
{"has_on_failure_callback": False, "has_on_success_callback": False},
True,
),
],
)
def test_task_callback_boolean_optimization(callback_config, expected_flags, is_mapped):
"""Test that task callbacks are optimized using has_on_*_callback boolean flags."""
dag = DAG(dag_id="test_callback_dag")
if is_mapped:
# Create mapped operator
task = BashOperator.partial(task_id="test_task", dag=dag, **callback_config).expand(
bash_command=["echo 1", "echo 2"]
)
serialized = BaseSerialization.serialize(task)
deserialized = BaseSerialization.deserialize(serialized)
# For mapped operators, check partial_kwargs
serialized_data = serialized.get("__var", {}).get("partial_kwargs", {})
# Test serialization
for flag, expected in expected_flags.items():
if expected:
assert flag in serialized_data
assert serialized_data[flag] is True
else:
assert serialized_data.get(flag, False) is False
# Test deserialized properties
for flag, expected in expected_flags.items():
assert getattr(deserialized, flag) is expected
else:
# Create regular operator
task = BashOperator(task_id="test_task", bash_command="echo test", dag=dag, **callback_config)
serialized = BaseSerialization.serialize(task)
deserialized = BaseSerialization.deserialize(serialized)
# For regular operators, check top-level
serialized_data = serialized.get("__var", {})
# Test serialization (only True values are stored)
for flag, expected in expected_flags.items():
if expected:
assert serialized_data.get(flag, False) is True
else:
assert serialized_data.get(flag, False) is False
# Test deserialized properties
for flag, expected in expected_flags.items():
assert getattr(deserialized, flag) is expected
@pytest.mark.parametrize(
"kwargs",
[
{"inlets": [Asset(uri="file://some.txt")]},
{"outlets": [Asset(uri="file://some.txt")]},
{"on_success_callback": lambda *args, **kwargs: None},
{"on_execute_callback": lambda *args, **kwargs: None},
],
)
def test_is_schedulable_task_empty_operator_evaluates_true(kwargs):
from airflow.providers.standard.operators.empty import EmptyOperator
dag = DAG(dag_id="test_dag")
task = EmptyOperator(task_id="empty_task", dag=dag, **kwargs)
serialized_task = BaseSerialization.deserialize(BaseSerialization.serialize(task))
assert TI.is_task_schedulable(serialized_task)
@pytest.mark.parametrize(
"kwargs",
[
{},
{"on_failure_callback": lambda *args, **kwargs: None},
{"on_skipped_callback": lambda *args, **kwargs: None},
{"on_retry_callback": lambda *args, **kwargs: None},
],
)
def test_is_schedulable_task_empty_operator_evaluates_false(kwargs):
from airflow.providers.standard.operators.empty import EmptyOperator
dag = DAG(dag_id="test_dag")
task = EmptyOperator(task_id="empty_task", dag=dag, **kwargs)
serialized_task = BaseSerialization.deserialize(BaseSerialization.serialize(task))
assert not TI.is_task_schedulable(serialized_task)
def test_is_schedulable_task_non_empty_operator():
dag = DAG(dag_id="test_dag")
regular_task = BashOperator(task_id="regular", bash_command="echo test", dag=dag)
mapped_task = BashOperator.partial(task_id="mapped", dag=dag).expand(bash_command=["echo 1"])
serialized_regular = BaseSerialization.deserialize(BaseSerialization.serialize(regular_task))
serialized_mapped = BaseSerialization.deserialize(BaseSerialization.serialize(mapped_task))
assert TI.is_task_schedulable(serialized_regular)
assert TI.is_task_schedulable(serialized_mapped)
def test_task_callback_properties_exist():
"""Test that all callback boolean properties exist on both regular and mapped operators."""
dag = DAG(dag_id="test_dag")
regular_task = BashOperator(task_id="regular", bash_command="echo test", dag=dag)
mapped_task = BashOperator.partial(task_id="mapped", dag=dag).expand(bash_command=["echo 1"])
callback_properties = [
"has_on_execute_callback",
"has_on_failure_callback",
"has_on_success_callback",
"has_on_retry_callback",
"has_on_skipped_callback",
]
for prop in callback_properties:
assert hasattr(regular_task, prop), f"Regular operator missing {prop}"
assert hasattr(mapped_task, prop), f"Mapped operator missing {prop}"
serialized_regular = BaseSerialization.deserialize(BaseSerialization.serialize(regular_task))
serialized_mapped = BaseSerialization.deserialize(BaseSerialization.serialize(mapped_task))
assert hasattr(serialized_regular, prop), f"Deserialized regular operator missing {prop}"
assert hasattr(serialized_mapped, prop), f"Deserialized mapped operator missing {prop}"
@pytest.mark.parametrize(
("old_callback_name", "new_callback_name"),
[
("on_execute_callback", "has_on_execute_callback"),
("on_failure_callback", "has_on_failure_callback"),
("on_success_callback", "has_on_success_callback"),
("on_retry_callback", "has_on_retry_callback"),
("on_skipped_callback", "has_on_skipped_callback"),
],
)
def test_task_callback_backward_compatibility(old_callback_name, new_callback_name):
"""Test that old serialized DAGs with on_*_callback keys are correctly converted to has_on_*_callback."""
old_serialized_task = {
"is_setup": False,
old_callback_name: [
" def dumm_callback(*args, **kwargs):\n # hello\n pass\n"
],
"is_teardown": False,
"task_type": "BaseOperator",
"pool": "default_pool",
"task_id": "simple_task",
"template_fields": [],
"on_failure_fail_dagrun": False,
"downstream_task_ids": [],
"template_ext": [],
"ui_fgcolor": "#000",
"weight_rule": "downstream",
"ui_color": "#fff",
"template_fields_renderers": {},
"_needs_expansion": False,
"start_from_trigger": False,
"_task_module": "airflow.sdk.bases.operator",
"start_trigger_args": None,
}
# Test deserialization converts old format to new format
deserialized_task = SerializedBaseOperator.deserialize_operator(old_serialized_task)
# Verify the new format is present and correct
assert hasattr(deserialized_task, new_callback_name)
assert getattr(deserialized_task, new_callback_name) is True
assert not hasattr(deserialized_task, old_callback_name)
# Test with empty/None callback (should convert to False)
old_serialized_task[old_callback_name] = None
deserialized_task_empty = SerializedBaseOperator.deserialize_operator(old_serialized_task)
assert getattr(deserialized_task_empty, new_callback_name) is False
def test_weight_rule_absolute_serialization_deserialization():
"""Test that weight_rule can be serialized and deserialized correctly."""
from airflow.sdk import task
with DAG("test_weight_rule_dag") as dag:
@task(weight_rule=WeightRule.ABSOLUTE)
def test_task():
return "test"
test_task()
serialized_dag = SerializedDAG.to_dict(dag)
assert serialized_dag["dag"]["tasks"][0]["__var"]["weight_rule"] == "absolute"
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_task = deserialized_dag.task_dict["test_task"]
assert isinstance(deserialized_task.weight_rule, _AbsolutePriorityWeightStrategy)
| TestStringifiedDAGs |
python | scipy__scipy | scipy/linalg/tests/test_basic.py | {
"start": 7541,
"end": 18532
} | class ____:
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 2 0] [2-j]
# [ j 4 -j 2] X = [4-j]
# [ 2 j 4 -j] [4+j]
# [ 0 2 j 4] [2+j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
def test_02_complex(self):
# Solve
# [ 4 -j 2 0] [2-j 2+4j]
# [ j 4 -j 2] X = [4-j -1-j]
# [ 2 j 4 -j] [4+j 4+2j]
# [ 0 2 j 4] [2+j j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([[2-1j, 2+4j],
[4.0-1j, -1-1j],
[4.0+1j, 4+2j],
[2+1j, 1j]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1, 1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1))
def test_tridiag_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_lower(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_float32(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_float32(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_complex(self):
# Solve
# [ 4 -j 0] [ -j]
# [ j 4 -j] X = [4-j]
# [ 0 j 4] [4+j]
#
ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
b = array([-1.0j, 4.0-1j, 4+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0])
def test_tridiag_02_complex(self):
# Solve
# [ 4 -j 0] [ -j 4j]
# [ j 4 -j] X = [4-j -1-j]
# [ 0 j 4] [4+j 4 ]
#
ab = array([[-99, -1.0j, -1.0j],
[4.0, 4.0, 4.0]])
b = array([[-1j, 4.0j],
[4.0-1j, -1.0-1j],
[4.0+1j, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_check_finite(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, check_finite=False)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_bad_shapes(self):
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0]])
assert_raises(ValueError, solveh_banded, ab, b)
assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
assert_raises(ValueError, solveh_banded, ab, [1.0])
def test_1x1(self):
x = solveh_banded([[1]], [[1, 2, 3]])
assert_array_equal(x, [[1.0, 2.0, 3.0]])
assert_equal(x.dtype, np.dtype('f8'))
def test_native_list_arguments(self):
# Same as test_01_upper, using python's native list.
ab = [[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]]
b = [1.0, 4.0, 1.0, 2.0]
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
@pytest.mark.parametrize('dt_ab', [int, float, np.float32, complex, np.complex64])
@pytest.mark.parametrize('dt_b', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt_ab, dt_b):
# ab contains one empty row corresponding to the diagonal
ab = np.array([[]], dtype=dt_ab)
b = np.array([], dtype=dt_b)
x = solveh_banded(ab, b)
assert x.shape == (0,)
assert x.dtype == solve(np.eye(1, dtype=dt_ab), np.ones(1, dtype=dt_b)).dtype
b = np.empty((0, 0), dtype=dt_b)
x = solveh_banded(ab, b)
assert x.shape == (0, 0)
assert x.dtype == solve(np.eye(1, dtype=dt_ab), np.ones(1, dtype=dt_b)).dtype
| TestSolveHBanded |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_gcs.py | {
"start": 2599,
"end": 5114
} | class ____:
def test_parse_gcs_url(self):
"""
Test GCS url parsing
"""
assert gcs._parse_gcs_url("gs://bucket/path/to/blob") == ("bucket", "path/to/blob")
# invalid URI
with pytest.raises(AirflowException):
gcs._parse_gcs_url("gs:/bucket/path/to/blob")
with pytest.raises(AirflowException):
gcs._parse_gcs_url("http://google.com/aaa")
# trailing slash
assert gcs._parse_gcs_url("gs://bucket/path/to/blob/") == ("bucket", "path/to/blob/")
# bucket only
assert gcs._parse_gcs_url("gs://bucket/") == ("bucket", "")
@pytest.mark.parametrize(
("json_value", "parsed_value"),
[
("[1, 2, 3]", [1, 2, 3]),
('"string value"', "string value"),
('{"key1": [1], "key2": {"subkey": 2}}', {"key1": [1], "key2": {"subkey": 2}}),
],
)
@mock.patch(GCS_STRING.format("GCSHook"))
@mock.patch(GCS_STRING.format("NamedTemporaryFile"))
def test_parse_json_from_gcs(self, temp_file, gcs_hook, json_value, parsed_value):
temp_file.return_value.__enter__.return_value.read.return_value = json_value
assert gcs.parse_json_from_gcs(gcp_conn_id=GCP_CONN_ID, file_uri=GCS_FILE_URI) == parsed_value
@mock.patch(GCS_STRING.format("GCSHook"))
def test_parse_json_from_gcs_fail_download(self, gsc_hook):
gsc_hook.return_value.download.return_value.side_effect = GoogleAPICallError
with pytest.raises(AirflowException):
gcs.parse_json_from_gcs(gcp_conn_id=GCP_CONN_ID, file_uri=GCS_FILE_URI)
@mock.patch(GCS_STRING.format("GCSHook"))
@mock.patch(GCS_STRING.format("NamedTemporaryFile"))
def test_parse_json_from_gcs_fail_read_file(self, temp_file, gcs_hook):
for exception_class in (ValueError, OSError, RuntimeError):
temp_file.return_value.__enter__.return_value.read.side_effect = exception_class
with pytest.raises(AirflowException):
gcs.parse_json_from_gcs(gcp_conn_id=GCP_CONN_ID, file_uri=GCS_FILE_URI)
@mock.patch(GCS_STRING.format("GCSHook"))
@mock.patch(GCS_STRING.format("NamedTemporaryFile"))
def test_parse_json_from_gcs_fail_json_loads(self, temp_file, gcs_hook):
temp_file.return_value.__enter__.return_value.read.return_value = "Invalid json"
with pytest.raises(AirflowException):
gcs.parse_json_from_gcs(gcp_conn_id=GCP_CONN_ID, file_uri=GCS_FILE_URI)
| TestGCSHookHelperFunctions |
python | kamyu104__LeetCode-Solutions | Python/maximize-palindrome-length-from-subsequences.py | {
"start": 45,
"end": 751
} | class ____(object):
def longestPalindrome(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
s = word1+word2
dp = [[0]*len(s) for _ in xrange(len(s))]
result = 0
for j in xrange(len(s)):
dp[j][j] = 1
for i in reversed(xrange(j)):
if s[i] == s[j]:
dp[i][j] = 2 if i+1 == j else dp[i+1][j-1] + 2
if i < len(word1) <= j:
result = max(result, dp[i][j])
else:
dp[i][j] = max(dp[i+1][j], dp[i][j-1])
return result
# Time: O((m + n)^2)
# Space: O((m + n)^2)
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/consts.py | {
"start": 82,
"end": 1677
} | class ____(str, Enum):
"""
An enum for the different step ids of the connector test pipeline.
"""
ACCEPTANCE = "acceptance"
INCREMENTAL_ACCEPTANCE = "incremental_acceptance"
BUILD_NORMALIZATION = "build_normalization"
BUILD_TAR = "build_tar"
BUILD = "build"
INTEGRATION = "integration"
PYTHON_CLI_VALIDATION = "python_cli_validation"
QA_CHECKS = "qa_checks"
UNIT = "unit"
VERSION_INC_CHECK = "version_inc_check"
TEST_ORCHESTRATOR = "test_orchestrator"
DEPLOY_ORCHESTRATOR = "deploy_orchestrator"
CONNECTOR_LIVE_TESTS = "connector_live_tests"
REGRESSION_TEST = "common.regression_test"
ADD_CHANGELOG_ENTRY = "bump_version.changelog"
SET_CONNECTOR_VERSION = "bump_version.set"
CHECK_UPDATE_CANDIDATE = "up_to_date.check"
UPDATE_POETRY = "up_to_date.poetry"
UPDATE_PULL_REQUEST = "up_to_date.pull"
LLM_RELATIONSHIPS = "llm_relationships"
DBML_FILE = "dbml_file"
PUBLISH_ERD = "publish_erd"
PULL_REQUEST_CREATE = "pull_request.create"
PULL_REQUEST_UPDATE = "pull_request.update"
MANIFEST_ONLY_CHECK = "migrate_to_manifest_only.check"
MANIFEST_ONLY_STRIP = "migrate_to_manifest_only.strip"
MANIFEST_ONLY_UPDATE = "migrate_to_manifest_only.update"
INLINE_CANDIDATE = "migration_to_inline_schemas.candidate"
INLINE_MIGRATION = "migration_to_inline_schemas.migration"
INLINE_CLEANUP = "migration_to_inline_schemas.cleanup"
LOAD_IMAGE_TO_LOCAL_DOCKER_HOST = "load_image_to_local_docker_host"
def __str__(self) -> str:
return self.value
| CONNECTOR_TEST_STEP_ID |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 199840,
"end": 203011
} | class ____(TestCase):
def test_basic(self):
zen = [
'Beautiful is better than ugly',
'Explicit is better than implicit',
'Simple is better than complex',
'Complex is better than complicated',
'Flat is better than nested',
'Sparse is better than dense',
'Readability counts',
]
for size, expected in (
(
34,
[
(zen[0],),
(zen[1],),
(zen[2],),
(zen[3],),
(zen[4],),
(zen[5],),
(zen[6],),
],
),
(
61,
[
(zen[0], zen[1]),
(zen[2],),
(zen[3], zen[4]),
(zen[5], zen[6]),
],
),
(
90,
[
(zen[0], zen[1], zen[2]),
(zen[3], zen[4], zen[5]),
(zen[6],),
],
),
(
124,
[(zen[0], zen[1], zen[2], zen[3]), (zen[4], zen[5], zen[6])],
),
(
150,
[(zen[0], zen[1], zen[2], zen[3], zen[4]), (zen[5], zen[6])],
),
(
177,
[(zen[0], zen[1], zen[2], zen[3], zen[4], zen[5]), (zen[6],)],
),
):
with self.subTest(size=size):
actual = list(mi.constrained_batches(iter(zen), size))
self.assertEqual(actual, expected)
def test_max_count(self):
iterable = ['1', '1', '12345678', '12345', '12345']
max_size = 10
max_count = 2
actual = list(mi.constrained_batches(iterable, max_size, max_count))
expected = [('1', '1'), ('12345678',), ('12345', '12345')]
self.assertEqual(actual, expected)
def test_strict(self):
iterable = ['1', '123456789', '1']
size = 8
with self.assertRaises(ValueError):
list(mi.constrained_batches(iterable, size))
actual = list(mi.constrained_batches(iterable, size, strict=False))
expected = [('1',), ('123456789',), ('1',)]
self.assertEqual(actual, expected)
def test_get_len(self):
class Record(tuple):
def total_size(self):
return sum(len(x) for x in self)
record_3 = Record(('1', '23'))
record_5 = Record(('1234', '1'))
record_10 = Record(('1', '12345678', '1'))
record_2 = Record(('1', '1'))
iterable = [record_3, record_5, record_10, record_2]
self.assertEqual(
list(
mi.constrained_batches(
iterable, 10, get_len=lambda x: x.total_size()
)
),
[(record_3, record_5), (record_10,), (record_2,)],
)
def test_bad_max(self):
with self.assertRaises(ValueError):
list(mi.constrained_batches([], 0))
| ConstrainedBatchesTests |
python | facebookresearch__faiss | tests/test_fast_scan.py | {
"start": 19353,
"end": 21417
} | class ____(unittest.TestCase):
def subtest_accuracy(self, paq):
"""
Compare IndexPAQFastScan with IndexPAQ (qint8)
"""
d = 16
ds = datasets.SyntheticDataset(d, 1000, 1000, 500)
gt = ds.get_groundtruth(k=1)
index = faiss.index_factory(d, f'{paq}2x3x4_Nqint8')
index.train(ds.get_train())
index.add(ds.get_database())
Dref, Iref = index.search(ds.get_queries(), 1)
indexfs = faiss.index_factory(d, f'{paq}2x3x4fs_Nlsq2x4')
indexfs.train(ds.get_train())
indexfs.add(ds.get_database())
Da, Ia = indexfs.search(ds.get_queries(), 1)
nq = Iref.shape[0]
recall_ref = (Iref == gt).sum() / nq
recall = (Ia == gt).sum() / nq
assert abs(recall_ref - recall) < 0.05
def test_accuracy_PLSQ(self):
self.subtest_accuracy("PLSQ")
def test_accuracy_PRQ(self):
self.subtest_accuracy("PRQ")
def subtest_factory(self, paq):
index = faiss.index_factory(16, f'{paq}2x3x4fs_Nlsq2x4')
q = faiss.downcast_Quantizer(index.aq)
self.assertEqual(q.nsplits, 2)
self.assertEqual(q.subquantizer(0).M, 3)
def test_factory(self):
self.subtest_factory('PRQ')
self.subtest_factory('PLSQ')
def subtest_io(self, factory_str):
d = 8
ds = datasets.SyntheticDataset(d, 1000, 500, 100)
index = faiss.index_factory(d, factory_str)
index.train(ds.get_train())
index.add(ds.get_database())
D1, I1 = index.search(ds.get_queries(), 1)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index, fname)
index2 = faiss.read_index(fname)
D2, I2 = index2.search(ds.get_queries(), 1)
np.testing.assert_array_equal(I1, I2)
finally:
if os.path.exists(fname):
os.unlink(fname)
def test_io(self):
self.subtest_io('PLSQ2x3x4fs_Nlsq2x4')
self.subtest_io('PRQ2x3x4fs_Nrq2x4')
| TestPAQFastScan |
python | openai__openai-python | src/openai/_types.py | {
"start": 6252,
"end": 7364
} | class ____(TypedDict, total=False):
auth: httpx.Auth
follow_redirects: bool
_T_co = TypeVar("_T_co", covariant=True)
if TYPE_CHECKING:
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
class SequenceNotStr(Protocol[_T_co]):
@overload
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
@overload
def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
def __contains__(self, value: object, /) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T_co]: ...
def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
def count(self, value: Any, /) -> int: ...
def __reversed__(self) -> Iterator[_T_co]: ...
else:
# just point this to a normal `Sequence` at runtime to avoid having to special case
# deserializing our custom sequence type
SequenceNotStr = Sequence
| HttpxSendArgs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.