sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
paperless-ngx/paperless-ngx:src/paperless_remote/parsers.py | from pathlib import Path
from django.conf import settings
from paperless_tesseract.parsers import RasterisedDocumentParser
class RemoteEngineConfig:
def __init__(
self,
engine: str,
api_key: str | None = None,
endpoint: str | None = None,
):
self.engine = engine
self.api_key = api_key
self.endpoint = endpoint
def engine_is_valid(self):
valid = self.engine in ["azureai"] and self.api_key is not None
if self.engine == "azureai":
valid = valid and self.endpoint is not None
return valid
class RemoteDocumentParser(RasterisedDocumentParser):
"""
This parser uses a remote OCR engine to parse documents. Currently, it supports Azure AI Vision
as this is the only service that provides a remote OCR API with text-embedded PDF output.
"""
logging_name = "paperless.parsing.remote"
def get_settings(self) -> RemoteEngineConfig:
"""
Returns the configuration for the remote OCR engine, loaded from Django settings.
"""
return RemoteEngineConfig(
engine=settings.REMOTE_OCR_ENGINE,
api_key=settings.REMOTE_OCR_API_KEY,
endpoint=settings.REMOTE_OCR_ENDPOINT,
)
def supported_mime_types(self):
if self.settings.engine_is_valid():
return {
"application/pdf": ".pdf",
"image/png": ".png",
"image/jpeg": ".jpg",
"image/tiff": ".tiff",
"image/bmp": ".bmp",
"image/gif": ".gif",
"image/webp": ".webp",
}
else:
return {}
def azure_ai_vision_parse(
self,
file: Path,
) -> str | None:
"""
Uses Azure AI Vision to parse the document and return the text content.
It requests a searchable PDF output with embedded text.
The PDF is saved to the archive_path attribute.
Returns the text content extracted from the document.
If the parsing fails, it returns None.
"""
from azure.ai.documentintelligence import DocumentIntelligenceClient
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest
from azure.ai.documentintelligence.models import AnalyzeOutputOption
from azure.ai.documentintelligence.models import DocumentContentFormat
from azure.core.credentials import AzureKeyCredential
client = DocumentIntelligenceClient(
endpoint=self.settings.endpoint,
credential=AzureKeyCredential(self.settings.api_key),
)
try:
with file.open("rb") as f:
analyze_request = AnalyzeDocumentRequest(bytes_source=f.read())
poller = client.begin_analyze_document(
model_id="prebuilt-read",
body=analyze_request,
output_content_format=DocumentContentFormat.TEXT,
output=[AnalyzeOutputOption.PDF], # request searchable PDF output
content_type="application/json",
)
poller.wait()
result_id = poller.details["operation_id"]
result = poller.result()
# Download the PDF with embedded text
self.archive_path = self.tempdir / "archive.pdf"
with self.archive_path.open("wb") as f:
for chunk in client.get_analyze_result_pdf(
model_id="prebuilt-read",
result_id=result_id,
):
f.write(chunk)
return result.content
except Exception as e:
self.log.error(f"Azure AI Vision parsing failed: {e}")
finally:
client.close()
return None
def parse(self, document_path: Path, mime_type, file_name=None):
if not self.settings.engine_is_valid():
self.log.warning(
"No valid remote parser engine is configured, content will be empty.",
)
self.text = ""
elif self.settings.engine == "azureai":
self.text = self.azure_ai_vision_parse(document_path)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_remote/parsers.py",
"license": "GNU General Public License v3.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/paperless_remote/signals.py | def get_parser(*args, **kwargs):
from paperless_remote.parsers import RemoteDocumentParser
return RemoteDocumentParser(*args, **kwargs)
def get_supported_mime_types():
from paperless_remote.parsers import RemoteDocumentParser
return RemoteDocumentParser(None).supported_mime_types()
def remote_consumer_declaration(sender, **kwargs):
return {
"parser": get_parser,
"weight": 5,
"mime_types": get_supported_mime_types(),
}
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_remote/signals.py",
"license": "GNU General Public License v3.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless_remote/tests/test_checks.py | from unittest import TestCase
from django.test import override_settings
from paperless_remote import check_remote_parser_configured
class TestChecks(TestCase):
@override_settings(REMOTE_OCR_ENGINE=None)
def test_no_engine(self) -> None:
msgs = check_remote_parser_configured(None)
self.assertEqual(len(msgs), 0)
@override_settings(REMOTE_OCR_ENGINE="azureai")
@override_settings(REMOTE_OCR_API_KEY="somekey")
@override_settings(REMOTE_OCR_ENDPOINT=None)
def test_azure_no_endpoint(self) -> None:
msgs = check_remote_parser_configured(None)
self.assertEqual(len(msgs), 1)
self.assertTrue(
msgs[0].msg.startswith(
"Azure AI remote parser requires endpoint and API key to be configured.",
),
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_remote/tests/test_checks.py",
"license": "GNU General Public License v3.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_remote/tests/test_parser.py | import uuid
from pathlib import Path
from unittest import mock
from django.test import TestCase
from django.test import override_settings
from documents.tests.utils import DirectoriesMixin
from documents.tests.utils import FileSystemAssertsMixin
from paperless_remote.parsers import RemoteDocumentParser
from paperless_remote.signals import get_parser
class TestParser(DirectoriesMixin, FileSystemAssertsMixin, TestCase):
SAMPLE_FILES = Path(__file__).resolve().parent / "samples"
def assertContainsStrings(self, content: str, strings: list[str]) -> None:
# Asserts that all strings appear in content, in the given order.
indices = []
for s in strings:
if s in content:
indices.append(content.index(s))
else:
self.fail(f"'{s}' is not in '{content}'")
self.assertListEqual(indices, sorted(indices))
@mock.patch("paperless_tesseract.parsers.run_subprocess")
@mock.patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
def test_get_text_with_azure(self, mock_client_cls, mock_subprocess) -> None:
# Arrange mock Azure client
mock_client = mock.Mock()
mock_client_cls.return_value = mock_client
# Simulate poller result and its `.details`
mock_poller = mock.Mock()
mock_poller.wait.return_value = None
mock_poller.details = {"operation_id": "fake-op-id"}
mock_client.begin_analyze_document.return_value = mock_poller
mock_poller.result.return_value.content = "This is a test document."
# Return dummy PDF bytes
mock_client.get_analyze_result_pdf.return_value = [
b"%PDF-",
b"1.7 ",
b"FAKEPDF",
]
# Simulate pdftotext by writing dummy text to sidecar file
def fake_run(cmd, *args, **kwargs) -> None:
with Path(cmd[-1]).open("w", encoding="utf-8") as f:
f.write("This is a test document.")
mock_subprocess.side_effect = fake_run
with override_settings(
REMOTE_OCR_ENGINE="azureai",
REMOTE_OCR_API_KEY="somekey",
REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com",
):
parser = get_parser(uuid.uuid4())
parser.parse(
self.SAMPLE_FILES / "simple-digital.pdf",
"application/pdf",
)
self.assertContainsStrings(
parser.text.strip(),
["This is a test document."],
)
@mock.patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
def test_get_text_with_azure_error_logged_and_returns_none(
self,
mock_client_cls,
) -> None:
mock_client = mock.Mock()
mock_client.begin_analyze_document.side_effect = RuntimeError("fail")
mock_client_cls.return_value = mock_client
with override_settings(
REMOTE_OCR_ENGINE="azureai",
REMOTE_OCR_API_KEY="somekey",
REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com",
):
parser = get_parser(uuid.uuid4())
with mock.patch.object(parser.log, "error") as mock_log_error:
parser.parse(
self.SAMPLE_FILES / "simple-digital.pdf",
"application/pdf",
)
self.assertIsNone(parser.text)
mock_client.begin_analyze_document.assert_called_once()
mock_client.close.assert_called_once()
mock_log_error.assert_called_once()
self.assertIn(
"Azure AI Vision parsing failed",
mock_log_error.call_args[0][0],
)
@override_settings(
REMOTE_OCR_ENGINE="azureai",
REMOTE_OCR_API_KEY="key",
REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com",
)
def test_supported_mime_types_valid_config(self) -> None:
parser = RemoteDocumentParser(uuid.uuid4())
expected_types = {
"application/pdf": ".pdf",
"image/png": ".png",
"image/jpeg": ".jpg",
"image/tiff": ".tiff",
"image/bmp": ".bmp",
"image/gif": ".gif",
"image/webp": ".webp",
}
self.assertEqual(parser.supported_mime_types(), expected_types)
def test_supported_mime_types_invalid_config(self) -> None:
parser = get_parser(uuid.uuid4())
self.assertEqual(parser.supported_mime_types(), {})
@override_settings(
REMOTE_OCR_ENGINE=None,
REMOTE_OCR_API_KEY=None,
REMOTE_OCR_ENDPOINT=None,
)
def test_parse_with_invalid_config(self) -> None:
parser = get_parser(uuid.uuid4())
parser.parse(self.SAMPLE_FILES / "simple-digital.pdf", "application/pdf")
self.assertEqual(parser.text, "")
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_remote/tests/test_parser.py",
"license": "GNU General Public License v3.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/regex.py | from __future__ import annotations
import logging
import textwrap
import regex
from django.conf import settings
logger = logging.getLogger("paperless.regex")
REGEX_TIMEOUT_SECONDS: float = getattr(settings, "MATCH_REGEX_TIMEOUT_SECONDS", 0.1)
def validate_regex_pattern(pattern: str) -> None:
"""
Validate user provided regex for basic compile errors.
Raises ValueError on validation failure.
"""
try:
regex.compile(pattern)
except regex.error as exc:
raise ValueError(exc.msg) from exc
def safe_regex_search(pattern: str, text: str, *, flags: int = 0):
"""
Run a regex search with a timeout. Returns a match object or None.
Validation errors and timeouts are logged and treated as no match.
"""
try:
validate_regex_pattern(pattern)
compiled = regex.compile(pattern, flags=flags)
except (regex.error, ValueError) as exc:
logger.error(
"Error while processing regular expression %s: %s",
textwrap.shorten(pattern, width=80, placeholder="…"),
exc,
)
return None
try:
return compiled.search(text, timeout=REGEX_TIMEOUT_SECONDS)
except TimeoutError:
logger.warning(
"Regular expression matching timed out for pattern %s",
textwrap.shorten(pattern, width=80, placeholder="…"),
)
return None
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/regex.py",
"license": "GNU General Public License v3.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/documents/workflows/actions.py | import logging
import re
import uuid
from pathlib import Path
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from documents.data_models import ConsumableDocument
from documents.data_models import DocumentMetadataOverrides
from documents.mail import EmailAttachment
from documents.mail import send_email
from documents.models import Correspondent
from documents.models import Document
from documents.models import DocumentType
from documents.models import WorkflowAction
from documents.models import WorkflowTrigger
from documents.plugins.base import StopConsumeTaskError
from documents.signals import document_consumption_finished
from documents.templating.workflows import parse_w_workflow_placeholders
from documents.workflows.webhooks import send_webhook
logger = logging.getLogger("paperless.workflows.actions")
def build_workflow_action_context(
document: Document | ConsumableDocument,
overrides: DocumentMetadataOverrides | None,
) -> dict:
"""
Build context dictionary for workflow action placeholder parsing.
"""
use_overrides = overrides is not None
if not use_overrides:
return {
"title": document.title,
"doc_url": f"{settings.PAPERLESS_URL}{settings.BASE_URL}documents/{document.pk}/",
"correspondent": document.correspondent.name
if document.correspondent
else "",
"document_type": document.document_type.name
if document.document_type
else "",
"owner_username": document.owner.username if document.owner else "",
"filename": document.original_filename or "",
"current_filename": document.filename or "",
"added": timezone.localtime(document.added),
"created": document.created,
"id": document.pk,
}
correspondent_obj = (
Correspondent.objects.filter(pk=overrides.correspondent_id).first()
if overrides and overrides.correspondent_id
else None
)
document_type_obj = (
DocumentType.objects.filter(pk=overrides.document_type_id).first()
if overrides and overrides.document_type_id
else None
)
owner_obj = (
User.objects.filter(pk=overrides.owner_id).first()
if overrides and overrides.owner_id
else None
)
filename = document.original_file if document.original_file else ""
return {
"title": overrides.title
if overrides and overrides.title
else str(document.original_file),
"doc_url": "",
"correspondent": correspondent_obj.name if correspondent_obj else "",
"document_type": document_type_obj.name if document_type_obj else "",
"owner_username": owner_obj.username if owner_obj else "",
"filename": filename,
"current_filename": filename,
"added": timezone.localtime(timezone.now()),
"created": overrides.created if overrides else None,
"id": "",
}
def execute_email_action(
action: WorkflowAction,
document: Document | ConsumableDocument,
context: dict,
logging_group,
original_file: Path,
trigger_type: WorkflowTrigger.WorkflowTriggerType,
) -> None:
"""
Execute an email action for a workflow.
"""
if not settings.EMAIL_ENABLED:
logger.error(
"Email backend has not been configured, cannot send email notifications",
extra={"group": logging_group},
)
return
subject = (
parse_w_workflow_placeholders(
action.email.subject,
context["correspondent"],
context["document_type"],
context["owner_username"],
context["added"],
context["filename"],
context["current_filename"],
context["created"],
context["title"],
context["doc_url"],
context["id"],
)
if action.email.subject
else ""
)
body = (
parse_w_workflow_placeholders(
action.email.body,
context["correspondent"],
context["document_type"],
context["owner_username"],
context["added"],
context["filename"],
context["current_filename"],
context["created"],
context["title"],
context["doc_url"],
context["id"],
)
if action.email.body
else ""
)
try:
attachments: list[EmailAttachment] = []
if action.email.include_document:
attachment: EmailAttachment | None = None
if trigger_type in [
WorkflowTrigger.WorkflowTriggerType.DOCUMENT_UPDATED,
WorkflowTrigger.WorkflowTriggerType.SCHEDULED,
] and isinstance(document, Document):
friendly_name = (
Path(context["current_filename"]).name
if context["current_filename"]
else document.source_path.name
)
attachment = EmailAttachment(
path=document.source_path,
mime_type=document.mime_type,
friendly_name=friendly_name,
)
elif original_file:
friendly_name = (
Path(context["current_filename"]).name
if context["current_filename"]
else original_file.name
)
attachment = EmailAttachment(
path=original_file,
mime_type=document.mime_type,
friendly_name=friendly_name,
)
if attachment:
attachments = [attachment]
n_messages = send_email(
subject=subject,
body=body,
to=action.email.to.split(","),
attachments=attachments,
)
logger.debug(
f"Sent {n_messages} notification email(s) to {action.email.to}",
extra={"group": logging_group},
)
except Exception as e:
logger.exception(
f"Error occurred sending notification email: {e}",
extra={"group": logging_group},
)
def execute_webhook_action(
action: WorkflowAction,
document: Document | ConsumableDocument,
context: dict,
logging_group,
original_file: Path,
):
try:
data = {}
if action.webhook.use_params:
if action.webhook.params:
try:
for key, value in action.webhook.params.items():
data[key] = parse_w_workflow_placeholders(
value,
context["correspondent"],
context["document_type"],
context["owner_username"],
context["added"],
context["filename"],
context["current_filename"],
context["created"],
context["title"],
context["doc_url"],
context["id"],
)
except Exception as e:
logger.error(
f"Error occurred parsing webhook params: {e}",
extra={"group": logging_group},
)
elif action.webhook.body:
data = parse_w_workflow_placeholders(
action.webhook.body,
context["correspondent"],
context["document_type"],
context["owner_username"],
context["added"],
context["filename"],
context["current_filename"],
context["created"],
context["title"],
context["doc_url"],
context["id"],
)
headers = {}
if action.webhook.headers:
try:
headers = {str(k): str(v) for k, v in action.webhook.headers.items()}
except Exception as e:
logger.error(
f"Error occurred parsing webhook headers: {e}",
extra={"group": logging_group},
)
files = None
if action.webhook.include_document:
with original_file.open("rb") as f:
files = {
"file": (
str(context["filename"])
if context["filename"]
else original_file.name,
f.read(),
document.mime_type,
),
}
send_webhook.delay(
url=action.webhook.url,
data=data,
headers=headers,
files=files,
as_json=action.webhook.as_json,
)
logger.debug(
f"Webhook to {action.webhook.url} queued",
extra={"group": logging_group},
)
except Exception as e:
logger.exception(
f"Error occurred sending webhook: {e}",
extra={"group": logging_group},
)
def execute_password_removal_action(
action: WorkflowAction,
document: Document | ConsumableDocument,
logging_group,
) -> None:
"""
Try to remove a password from a document using the configured list.
"""
passwords = action.passwords
if not passwords:
logger.warning(
"Password removal action %s has no passwords configured",
action.pk,
extra={"group": logging_group},
)
return
passwords = [
password.strip()
for password in re.split(r"[,\n]", passwords)
if password.strip()
]
if isinstance(document, ConsumableDocument):
# hook the consumption-finished signal to attempt password removal later
def handler(sender, **kwargs):
consumed_document: Document = kwargs.get("document")
if consumed_document is not None:
execute_password_removal_action(
action,
consumed_document,
logging_group,
)
document_consumption_finished.disconnect(handler)
document_consumption_finished.connect(handler, weak=False)
return
# import here to avoid circular dependency
from documents.bulk_edit import remove_password
for password in passwords:
try:
remove_password(
[document.id],
password=password,
update_document=True,
user=document.owner,
)
logger.info(
"Removed password from document %s using workflow action %s",
document.pk,
action.pk,
extra={"group": logging_group},
)
return
except ValueError as e:
logger.warning(
"Password removal failed for document %s with supplied password: %s",
document.pk,
e,
extra={"group": logging_group},
)
logger.error(
"Password removal failed for document %s after trying all provided passwords",
document.pk,
extra={"group": logging_group},
)
def execute_move_to_trash_action(
action: WorkflowAction,
document: Document | ConsumableDocument,
logging_group: uuid.UUID | None,
) -> None:
"""
Execute a move to trash action for a workflow on an existing document or a
document in consumption. In case of an existing document it soft-deletes
the document. In case of consumption it aborts consumption and deletes the
file.
"""
if isinstance(document, Document):
document.delete()
logger.debug(
f"Moved document {document} to trash",
extra={"group": logging_group},
)
else:
if document.original_file.exists():
document.original_file.unlink()
logger.info(
f"Workflow move to trash action triggered during consumption, "
f"deleting file {document.original_file}",
extra={"group": logging_group},
)
raise StopConsumeTaskError(
"Document deleted by workflow action during consumption",
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/workflows/actions.py",
"license": "GNU General Public License v3.0",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/documents/workflows/mutations.py | import logging
from django.utils import timezone
from guardian.shortcuts import remove_perm
from documents.data_models import DocumentMetadataOverrides
from documents.models import CustomFieldInstance
from documents.models import Document
from documents.models import WorkflowAction
from documents.permissions import set_permissions_for_object
from documents.templating.workflows import parse_w_workflow_placeholders
logger = logging.getLogger("paperless.workflows.mutations")
def apply_assignment_to_document(
action: WorkflowAction,
document: Document,
doc_tag_ids: list[int],
logging_group,
):
"""
Apply assignment actions to a Document instance.
action: WorkflowAction, annotated with 'has_assign_*' boolean fields
"""
if action.has_assign_tags:
tag_ids_to_add: set[int] = set()
for tag in action.assign_tags.all():
tag_ids_to_add.add(tag.pk)
tag_ids_to_add.update(int(pk) for pk in tag.get_ancestors_pks())
doc_tag_ids[:] = list(set(doc_tag_ids) | tag_ids_to_add)
if action.assign_correspondent:
document.correspondent = action.assign_correspondent
if action.assign_document_type:
document.document_type = action.assign_document_type
if action.assign_storage_path:
document.storage_path = action.assign_storage_path
if action.assign_owner:
document.owner = action.assign_owner
if action.assign_title:
try:
document.title = parse_w_workflow_placeholders(
action.assign_title,
document.correspondent.name if document.correspondent else "",
document.document_type.name if document.document_type else "",
document.owner.username if document.owner else "",
timezone.localtime(document.added),
document.original_filename or "",
document.filename or "",
document.created,
"", # dont pass the title to avoid recursion
"", # no urls in titles
document.pk,
)
except Exception: # pragma: no cover
logger.exception(
f"Error occurred parsing title assignment '{action.assign_title}', falling back to original",
extra={"group": logging_group},
)
if any(
[
action.has_assign_view_users,
action.has_assign_view_groups,
action.has_assign_change_users,
action.has_assign_change_groups,
],
):
permissions = {
"view": {
"users": action.assign_view_users.values_list("id", flat=True),
"groups": action.assign_view_groups.values_list("id", flat=True),
},
"change": {
"users": action.assign_change_users.values_list("id", flat=True),
"groups": action.assign_change_groups.values_list("id", flat=True),
},
}
set_permissions_for_object(
permissions=permissions,
object=document,
merge=True,
)
if action.has_assign_custom_fields:
for field in action.assign_custom_fields.all():
value_field_name = CustomFieldInstance.get_value_field_name(
data_type=field.data_type,
)
args = {
value_field_name: action.assign_custom_fields_values.get(
str(field.pk),
None,
),
}
# for some reason update_or_create doesn't work here
instance = CustomFieldInstance.objects.filter(
field=field,
document=document,
).first()
if instance and args[value_field_name] is not None:
setattr(instance, value_field_name, args[value_field_name])
instance.save()
elif not instance:
CustomFieldInstance.objects.create(
**args,
field=field,
document=document,
)
def apply_assignment_to_overrides(
action: WorkflowAction,
overrides: DocumentMetadataOverrides,
):
"""
Apply assignment actions to DocumentMetadataOverrides.
action: WorkflowAction, annotated with 'has_assign_*' boolean fields
"""
if action.has_assign_tags:
if overrides.tag_ids is None:
overrides.tag_ids = []
tag_ids_to_add: set[int] = set()
for tag in action.assign_tags.all():
tag_ids_to_add.add(tag.pk)
tag_ids_to_add.update(int(pk) for pk in tag.get_ancestors_pks())
overrides.tag_ids = list(set(overrides.tag_ids) | tag_ids_to_add)
if action.assign_correspondent:
overrides.correspondent_id = action.assign_correspondent.pk
if action.assign_document_type:
overrides.document_type_id = action.assign_document_type.pk
if action.assign_storage_path:
overrides.storage_path_id = action.assign_storage_path.pk
if action.assign_owner:
overrides.owner_id = action.assign_owner.pk
if action.assign_title:
overrides.title = action.assign_title
if any(
[
action.has_assign_view_users,
action.has_assign_view_groups,
action.has_assign_change_users,
action.has_assign_change_groups,
],
):
overrides.view_users = list(
set(
(overrides.view_users or [])
+ list(action.assign_view_users.values_list("id", flat=True)),
),
)
overrides.view_groups = list(
set(
(overrides.view_groups or [])
+ list(action.assign_view_groups.values_list("id", flat=True)),
),
)
overrides.change_users = list(
set(
(overrides.change_users or [])
+ list(action.assign_change_users.values_list("id", flat=True)),
),
)
overrides.change_groups = list(
set(
(overrides.change_groups or [])
+ list(action.assign_change_groups.values_list("id", flat=True)),
),
)
if action.has_assign_custom_fields:
if overrides.custom_fields is None:
overrides.custom_fields = {}
overrides.custom_fields.update(
{
field.pk: action.assign_custom_fields_values.get(
str(field.pk),
None,
)
for field in action.assign_custom_fields.all()
},
)
def apply_removal_to_document(
action: WorkflowAction,
document: Document,
doc_tag_ids: list[int],
):
"""
Apply removal actions to a Document instance.
action: WorkflowAction, annotated with 'has_remove_*' boolean fields
"""
if action.remove_all_tags:
doc_tag_ids.clear()
else:
tag_ids_to_remove: set[int] = set()
for tag in action.remove_tags.all():
tag_ids_to_remove.add(tag.pk)
tag_ids_to_remove.update(int(pk) for pk in tag.get_descendants_pks())
doc_tag_ids[:] = [t for t in doc_tag_ids if t not in tag_ids_to_remove]
if action.remove_all_correspondents or (
document.correspondent
and action.remove_correspondents.filter(pk=document.correspondent.pk).exists()
):
document.correspondent = None
if action.remove_all_document_types or (
document.document_type
and action.remove_document_types.filter(pk=document.document_type.pk).exists()
):
document.document_type = None
if action.remove_all_storage_paths or (
document.storage_path
and action.remove_storage_paths.filter(pk=document.storage_path.pk).exists()
):
document.storage_path = None
if action.remove_all_owners or (
document.owner and action.remove_owners.filter(pk=document.owner.pk).exists()
):
document.owner = None
if action.remove_all_permissions:
permissions = {
"view": {"users": [], "groups": []},
"change": {"users": [], "groups": []},
}
set_permissions_for_object(
permissions=permissions,
object=document,
merge=False,
)
if any(
[
action.has_remove_view_users,
action.has_remove_view_groups,
action.has_remove_change_users,
action.has_remove_change_groups,
],
):
for user in action.remove_view_users.all():
remove_perm("view_document", user, document)
for user in action.remove_change_users.all():
remove_perm("change_document", user, document)
for group in action.remove_view_groups.all():
remove_perm("view_document", group, document)
for group in action.remove_change_groups.all():
remove_perm("change_document", group, document)
if action.remove_all_custom_fields:
CustomFieldInstance.objects.filter(document=document).hard_delete()
elif action.has_remove_custom_fields:
CustomFieldInstance.objects.filter(
field__in=action.remove_custom_fields.all(),
document=document,
).hard_delete()
def apply_removal_to_overrides(
action: WorkflowAction,
overrides: DocumentMetadataOverrides,
):
"""
Apply removal actions to DocumentMetadataOverrides.
action: WorkflowAction, annotated with 'has_remove_*' boolean fields
"""
if action.remove_all_tags:
overrides.tag_ids = None
elif overrides.tag_ids:
tag_ids_to_remove: set[int] = set()
for tag in action.remove_tags.all():
tag_ids_to_remove.add(tag.pk)
tag_ids_to_remove.update(int(pk) for pk in tag.get_descendants_pks())
overrides.tag_ids = [t for t in overrides.tag_ids if t not in tag_ids_to_remove]
if action.remove_all_correspondents or (
overrides.correspondent_id
and action.remove_correspondents.filter(pk=overrides.correspondent_id).exists()
):
overrides.correspondent_id = None
if action.remove_all_document_types or (
overrides.document_type_id
and action.remove_document_types.filter(pk=overrides.document_type_id).exists()
):
overrides.document_type_id = None
if action.remove_all_storage_paths or (
overrides.storage_path_id
and action.remove_storage_paths.filter(pk=overrides.storage_path_id).exists()
):
overrides.storage_path_id = None
if action.remove_all_owners or (
overrides.owner_id
and action.remove_owners.filter(pk=overrides.owner_id).exists()
):
overrides.owner_id = None
if action.remove_all_permissions:
overrides.view_users = None
overrides.view_groups = None
overrides.change_users = None
overrides.change_groups = None
elif any(
[
action.has_remove_view_users,
action.has_remove_view_groups,
action.has_remove_change_users,
action.has_remove_change_groups,
],
):
if overrides.view_users:
for user in action.remove_view_users.filter(pk__in=overrides.view_users):
overrides.view_users.remove(user.pk)
if overrides.change_users:
for user in action.remove_change_users.filter(
pk__in=overrides.change_users,
):
overrides.change_users.remove(user.pk)
if overrides.view_groups:
for group in action.remove_view_groups.filter(pk__in=overrides.view_groups):
overrides.view_groups.remove(group.pk)
if overrides.change_groups:
for group in action.remove_change_groups.filter(
pk__in=overrides.change_groups,
):
overrides.change_groups.remove(group.pk)
if action.remove_all_custom_fields:
overrides.custom_fields = None
elif action.has_remove_custom_fields and overrides.custom_fields:
for field in action.remove_custom_fields.filter(
pk__in=overrides.custom_fields.keys(),
):
overrides.custom_fields.pop(field.pk, None)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/workflows/mutations.py",
"license": "GNU General Public License v3.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/documents/workflows/utils.py | import logging
from django.db.models import Exists
from django.db.models import OuterRef
from django.db.models import Prefetch
from documents.models import Workflow
from documents.models import WorkflowAction
from documents.models import WorkflowTrigger
logger = logging.getLogger("paperless.workflows")
def get_workflows_for_trigger(
trigger_type: WorkflowTrigger.WorkflowTriggerType,
workflow_to_run: Workflow | None = None,
):
"""
Return workflows relevant to a trigger. If a specific workflow is given,
wrap it in a list; otherwise fetch enabled workflows for the trigger with
the prefetches used by the runner.
"""
annotated_actions = (
WorkflowAction.objects.select_related(
"assign_correspondent",
"assign_document_type",
"assign_storage_path",
"assign_owner",
"email",
"webhook",
)
.prefetch_related(
"assign_tags",
"assign_view_users",
"assign_view_groups",
"assign_change_users",
"assign_change_groups",
"assign_custom_fields",
"remove_tags",
"remove_correspondents",
"remove_document_types",
"remove_storage_paths",
"remove_custom_fields",
"remove_owners",
)
.annotate(
has_assign_tags=Exists(
WorkflowAction.assign_tags.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_assign_view_users=Exists(
WorkflowAction.assign_view_users.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_assign_view_groups=Exists(
WorkflowAction.assign_view_groups.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_assign_change_users=Exists(
WorkflowAction.assign_change_users.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_assign_change_groups=Exists(
WorkflowAction.assign_change_groups.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_assign_custom_fields=Exists(
WorkflowAction.assign_custom_fields.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_remove_view_users=Exists(
WorkflowAction.remove_view_users.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_remove_view_groups=Exists(
WorkflowAction.remove_view_groups.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_remove_change_users=Exists(
WorkflowAction.remove_change_users.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_remove_change_groups=Exists(
WorkflowAction.remove_change_groups.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
has_remove_custom_fields=Exists(
WorkflowAction.remove_custom_fields.through.objects.filter(
workflowaction_id=OuterRef("pk"),
),
),
)
)
action_prefetch = Prefetch(
"actions",
queryset=annotated_actions.order_by("order", "pk"),
)
if workflow_to_run is not None:
return (
Workflow.objects.filter(pk=workflow_to_run.pk)
.prefetch_related(
action_prefetch,
"triggers",
)
.distinct()
)
return (
Workflow.objects.filter(enabled=True, triggers__type=trigger_type)
.prefetch_related(
action_prefetch,
"triggers",
)
.order_by("order")
.distinct()
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/workflows/utils.py",
"license": "GNU General Public License v3.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/documents/workflows/webhooks.py | import ipaddress
import logging
import socket
from urllib.parse import urlparse
import httpx
from celery import shared_task
from django.conf import settings
logger = logging.getLogger("paperless.workflows.webhooks")
class WebhookTransport(httpx.HTTPTransport):
"""
Transport that resolves/validates hostnames and rewrites to a vetted IP
while keeping Host/SNI as the original hostname.
"""
def __init__(
self,
hostname: str,
*args,
allow_internal: bool = False,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.hostname = hostname
self.allow_internal = allow_internal
def handle_request(self, request: httpx.Request) -> httpx.Response:
hostname = request.url.host
if not hostname:
raise httpx.ConnectError("No hostname in request URL")
try:
addr_info = socket.getaddrinfo(hostname, None)
except socket.gaierror as e:
raise httpx.ConnectError(f"Could not resolve hostname: {hostname}") from e
ips = [info[4][0] for info in addr_info if info and info[4]]
if not ips:
raise httpx.ConnectError(f"Could not resolve hostname: {hostname}")
if not self.allow_internal:
for ip_str in ips:
if not WebhookTransport.is_public_ip(ip_str):
raise httpx.ConnectError(
f"Connection blocked: {hostname} resolves to a non-public address",
)
ip_str = ips[0]
formatted_ip = self._format_ip_for_url(ip_str)
new_headers = httpx.Headers(request.headers)
if "host" in new_headers:
del new_headers["host"]
new_headers["Host"] = hostname
new_url = request.url.copy_with(host=formatted_ip)
request = httpx.Request(
method=request.method,
url=new_url,
headers=new_headers,
content=request.stream,
extensions=request.extensions,
)
request.extensions["sni_hostname"] = hostname
return super().handle_request(request)
def _format_ip_for_url(self, ip: str) -> str:
"""
Format IP address for use in URL (wrap IPv6 in brackets)
"""
try:
ip_obj = ipaddress.ip_address(ip)
if ip_obj.version == 6:
return f"[{ip}]"
return ip
except ValueError:
return ip
@staticmethod
def is_public_ip(ip: str | int) -> bool:
try:
obj = ipaddress.ip_address(ip)
return not (
obj.is_private
or obj.is_loopback
or obj.is_link_local
or obj.is_multicast
or obj.is_unspecified
)
except ValueError: # pragma: no cover
return False
@staticmethod
def resolve_first_ip(host: str) -> str | None:
try:
info = socket.getaddrinfo(host, None)
return info[0][4][0] if info else None
except Exception: # pragma: no cover
return None
@shared_task(
retry_backoff=True,
autoretry_for=(httpx.HTTPStatusError,),
max_retries=3,
throws=(httpx.HTTPError,),
)
def send_webhook(
url: str,
data: str | dict,
headers: dict,
files: dict,
*,
as_json: bool = False,
):
p = urlparse(url)
if p.scheme.lower() not in settings.WEBHOOKS_ALLOWED_SCHEMES or not p.hostname:
logger.warning("Webhook blocked: invalid scheme/hostname")
raise ValueError("Invalid URL scheme or hostname.")
port = p.port or (443 if p.scheme == "https" else 80)
if (
len(settings.WEBHOOKS_ALLOWED_PORTS) > 0
and port not in settings.WEBHOOKS_ALLOWED_PORTS
):
logger.warning("Webhook blocked: port not permitted")
raise ValueError("Destination port not permitted.")
transport = WebhookTransport(
hostname=p.hostname,
allow_internal=settings.WEBHOOKS_ALLOW_INTERNAL_REQUESTS,
)
try:
post_args = {
"url": url,
"headers": {
k: v for k, v in (headers or {}).items() if k.lower() != "host"
},
"files": files or None,
}
if as_json:
post_args["json"] = data
elif isinstance(data, dict):
post_args["data"] = data
else:
post_args["content"] = data
with httpx.Client(
transport=transport,
timeout=5.0,
follow_redirects=False,
) as client:
client.post(
**post_args,
).raise_for_status()
logger.info(
f"Webhook sent to {url}",
)
except Exception as e:
logger.error(
f"Failed attempt sending webhook to {url}: {e}",
)
raise e
finally:
transport.close()
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/workflows/webhooks.py",
"license": "GNU General Public License v3.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:docker/rootfs/usr/local/bin/deduplicate.py | #!/usr/bin/env python3
"""
File deduplication script that replaces identical files with symlinks.
Uses SHA256 hashing to identify duplicate files.
"""
import hashlib
from collections import defaultdict
from pathlib import Path
import click
import humanize
def calculate_sha256(filepath: Path) -> str | None:
sha256_hash = hashlib.sha256()
try:
with filepath.open("rb") as f:
# Read file in chunks to handle large files efficiently
while chunk := f.read(65536): # 64KB chunks
sha256_hash.update(chunk)
return sha256_hash.hexdigest()
except OSError as e:
click.echo(f"Error reading {filepath}: {e}", err=True)
return None
def find_duplicate_files(directory: Path) -> dict[str, list[Path]]:
"""
Recursively scan directory and group files by their SHA256 hash.
Returns a dictionary mapping hash -> list of file paths.
"""
hash_to_files: dict[str, list[Path]] = defaultdict(list)
for filepath in directory.rglob("*"):
# Skip symlinks
if filepath.is_symlink():
continue
# Skip if not a regular file
if not filepath.is_file():
continue
file_hash = calculate_sha256(filepath)
if file_hash:
hash_to_files[file_hash].append(filepath)
# Filter to only return hashes with duplicates
return {h: files for h, files in hash_to_files.items() if len(files) > 1}
def replace_with_symlinks(
duplicate_groups: dict[str, list[Path]],
*,
dry_run: bool = False,
) -> tuple[int, int]:
"""
Replace duplicate files with symlinks to the first occurrence.
Returns (number_of_files_replaced, space_saved_in_bytes).
"""
total_duplicates = 0
space_saved = 0
for file_hash, file_list in duplicate_groups.items():
# Keep the first file as the original, replace others with symlinks
original_file = file_list[0]
duplicates = file_list[1:]
click.echo(f"Found {len(duplicates)} duplicate(s) of: {original_file}")
for duplicate in duplicates:
try:
# Get file size before deletion
file_size = duplicate.stat().st_size
if dry_run:
click.echo(f" [DRY RUN] Would replace: {duplicate}")
else:
# Remove the duplicate file
duplicate.unlink()
# Create relative symlink if possible, otherwise absolute
try:
# Try to create a relative symlink
rel_path = original_file.relative_to(duplicate.parent)
duplicate.symlink_to(rel_path)
click.echo(f" Replaced: {duplicate} -> {rel_path}")
except ValueError:
# Fall back to absolute path
duplicate.symlink_to(original_file.resolve())
click.echo(f" Replaced: {duplicate} -> {original_file}")
space_saved += file_size
total_duplicates += 1
except OSError as e:
click.echo(f" Error replacing {duplicate}: {e}", err=True)
return total_duplicates, space_saved
@click.command()
@click.argument(
"directory",
type=click.Path(
exists=True,
file_okay=False,
dir_okay=True,
readable=True,
path_type=Path,
),
)
@click.option(
"--dry-run",
is_flag=True,
help="Show what would be done without making changes",
)
@click.option("--verbose", "-v", is_flag=True, help="Show verbose output")
def deduplicate(directory: Path, *, dry_run: bool, verbose: bool) -> None:
"""
Recursively search DIRECTORY for identical files and replace them with symlinks.
Uses SHA256 hashing to identify duplicate files. The first occurrence of each
unique file is kept, and all duplicates are replaced with symlinks pointing to it.
"""
directory = directory.resolve()
click.echo(f"Scanning directory: {directory}")
if dry_run:
click.echo("Running in DRY RUN mode - no changes will be made")
# Find all duplicate files
click.echo("Calculating file hashes...")
duplicate_groups = find_duplicate_files(directory)
if not duplicate_groups:
click.echo("No duplicate files found!")
return
total_files = sum(len(files) - 1 for files in duplicate_groups.values())
click.echo(
f"Found {len(duplicate_groups)} group(s) of duplicates "
f"({total_files} files to deduplicate)",
)
if verbose:
for file_hash, files in duplicate_groups.items():
click.echo(f"Hash: {file_hash}")
for f in files:
click.echo(f" - {f}")
# Replace duplicates with symlinks
click.echo("Processing duplicates...")
num_replaced, space_saved = replace_with_symlinks(duplicate_groups, dry_run=dry_run)
# Summary
click.echo(
f"{'Would replace' if dry_run else 'Replaced'} "
f"{num_replaced} duplicate file(s)",
)
if not dry_run:
click.echo(f"Space saved: {humanize.naturalsize(space_saved, binary=True)}")
if __name__ == "__main__":
deduplicate()
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "docker/rootfs/usr/local/bin/deduplicate.py",
"license": "GNU General Public License v3.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/documents/tests/test_api_email.py | import json
import shutil
from unittest import mock
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
from django.core import mail
from django.test import override_settings
from rest_framework import status
from rest_framework.test import APITestCase
from documents.models import Document
from documents.tests.utils import DirectoriesMixin
from documents.tests.utils import SampleDirMixin
class TestEmail(DirectoriesMixin, SampleDirMixin, APITestCase):
ENDPOINT = "/api/documents/email/"
def setUp(self) -> None:
super().setUp()
self.user = User.objects.create_superuser(username="temp_admin")
self.client.force_authenticate(user=self.user)
self.doc1 = Document.objects.create(
title="test1",
mime_type="application/pdf",
content="this is document 1",
checksum="1",
filename="test1.pdf",
archive_checksum="A1",
archive_filename="archive1.pdf",
)
self.doc2 = Document.objects.create(
title="test2",
mime_type="application/pdf",
content="this is document 2",
checksum="2",
filename="test2.pdf",
)
# Copy sample files to document paths (using different files to distinguish versions)
shutil.copy(
self.SAMPLE_DIR / "documents" / "originals" / "0000001.pdf",
self.doc1.archive_path,
)
shutil.copy(
self.SAMPLE_DIR / "documents" / "originals" / "0000002.pdf",
self.doc1.source_path,
)
shutil.copy(
self.SAMPLE_DIR / "documents" / "originals" / "0000003.pdf",
self.doc2.source_path,
)
@override_settings(
EMAIL_ENABLED=True,
EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend",
)
def test_email_success(self) -> None:
"""
GIVEN:
- Multiple existing documents (doc1 with archive, doc2 without)
WHEN:
- API request is made to bulk email documents
THEN:
- Email is sent with all documents attached
- Archive version used by default for doc1
- Original version used for doc2 (no archive available)
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk, self.doc2.pk],
"addresses": "hello@paperless-ngx.com,test@example.com",
"subject": "Bulk email test",
"message": "Here are your documents",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"], "Email sent")
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to, ["hello@paperless-ngx.com", "test@example.com"])
self.assertEqual(email.subject, "Bulk email test")
self.assertEqual(email.body, "Here are your documents")
self.assertEqual(len(email.attachments), 2)
attachment_names = [att[0] for att in email.attachments]
self.assertEqual(len(attachment_names), 2)
self.assertIn(f"{self.doc1!s}.pdf", attachment_names)
self.assertIn(f"{self.doc2!s}.pdf", attachment_names)
doc1_attachment = next(
att for att in email.attachments if att[0] == f"{self.doc1!s}.pdf"
)
archive_size = self.doc1.archive_path.stat().st_size
self.assertEqual(len(doc1_attachment[1]), archive_size)
doc2_attachment = next(
att for att in email.attachments if att[0] == f"{self.doc2!s}.pdf"
)
original_size = self.doc2.source_path.stat().st_size
self.assertEqual(len(doc2_attachment[1]), original_size)
@override_settings(
EMAIL_ENABLED=True,
EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend",
)
def test_email_use_original_version(self) -> None:
"""
GIVEN:
- Documents with archive versions
WHEN:
- API request is made to bulk email with use_archive_version=False
THEN:
- Original files are attached instead of archive versions
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
"use_archive_version": False,
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(mail.outbox), 1)
attachment = mail.outbox[0].attachments[0]
self.assertEqual(attachment[0], f"{self.doc1!s}.pdf")
original_size = self.doc1.source_path.stat().st_size
self.assertEqual(len(attachment[1]), original_size)
def test_email_missing_required_fields(self) -> None:
"""
GIVEN:
- Request with missing required fields
WHEN:
- API request is made to bulk email endpoint
THEN:
- Bad request response is returned
"""
# Missing addresses
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Missing subject
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "test@example.com",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Missing message
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "test@example.com",
"subject": "Test",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Missing documents
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_empty_document_list(self) -> None:
"""
GIVEN:
- Request with empty document list
WHEN:
- API request is made to bulk email endpoint
THEN:
- Bad request response is returned
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_invalid_document_id(self) -> None:
"""
GIVEN:
- Request with non-existent document ID
WHEN:
- API request is made to bulk email endpoint
THEN:
- Bad request response is returned
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [999],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_invalid_email_address(self) -> None:
"""
GIVEN:
- Request with invalid email address
WHEN:
- API request is made to bulk email endpoint
THEN:
- Bad request response is returned
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "invalid-email",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# Test multiple addresses with one invalid
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "valid@example.com,invalid-email",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_email_insufficient_permissions(self) -> None:
"""
GIVEN:
- User without permissions to view document
WHEN:
- API request is made to bulk email documents
THEN:
- Forbidden response is returned
"""
user1 = User.objects.create_user(username="test1")
user1.user_permissions.add(*Permission.objects.filter(codename="view_document"))
doc_owned = Document.objects.create(
title="owned_doc",
mime_type="application/pdf",
checksum="owned",
owner=self.user,
)
self.client.force_authenticate(user1)
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk, doc_owned.pk],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_email_only_requires_view_permission(self) -> None:
"""
GIVEN:
- User having only view documents permission
WHEN:
- API request is made to bulk email documents
THEN:
- Request succeeds
"""
user1 = User.objects.create_user(username="test1")
user1.user_permissions.add(*Permission.objects.filter(codename="view_document"))
self.client.force_authenticate(user1)
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@override_settings(
EMAIL_ENABLED=True,
EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend",
)
def test_email_duplicate_filenames(self) -> None:
"""
GIVEN:
- Multiple documents with the same title
WHEN:
- API request is made to bulk email documents
THEN:
- Filenames are made unique with counters
"""
doc3 = Document.objects.create(
title="test1",
mime_type="application/pdf",
content="this is document 3",
checksum="3",
filename="test3.pdf",
)
shutil.copy(self.SAMPLE_DIR / "simple.pdf", doc3.source_path)
doc4 = Document.objects.create(
title="test1",
mime_type="application/pdf",
content="this is document 4",
checksum="4",
filename="test4.pdf",
)
shutil.copy(self.SAMPLE_DIR / "simple.pdf", doc4.source_path)
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk, doc3.pk, doc4.pk],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(mail.outbox), 1)
attachment_names = [att[0] for att in mail.outbox[0].attachments]
self.assertEqual(len(attachment_names), 3)
self.assertIn(f"{self.doc1!s}.pdf", attachment_names)
self.assertIn(f"{doc3!s}_01.pdf", attachment_names)
self.assertIn(f"{doc3!s}_02.pdf", attachment_names)
@mock.patch(
"django.core.mail.message.EmailMessage.send",
side_effect=Exception("Email error"),
)
def test_email_send_error(self, mocked_send) -> None:
"""
GIVEN:
- Existing documents
WHEN:
- API request is made to bulk email and error occurs during email send
THEN:
- Server error response is returned
"""
response = self.client.post(
self.ENDPOINT,
json.dumps(
{
"documents": [self.doc1.pk],
"addresses": "test@example.com",
"subject": "Test",
"message": "Test message",
},
),
content_type="application/json",
)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertIn("Error emailing documents", response.content.decode())
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_api_email.py",
"license": "GNU General Public License v3.0",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless_mail/filters.py | from django_filters import FilterSet
from paperless_mail.models import ProcessedMail
class ProcessedMailFilterSet(FilterSet):
class Meta:
model = ProcessedMail
fields = {
"rule": ["exact"],
"status": ["exact"],
}
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless_mail/filters.py",
"license": "GNU General Public License v3.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/documents/tests/test_tag_hierarchy.py | from unittest import mock
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from documents import bulk_edit
from documents.models import Document
from documents.models import Tag
from documents.models import Workflow
from documents.models import WorkflowAction
from documents.models import WorkflowTrigger
from documents.serialisers import TagSerializer
from documents.signals.handlers import run_workflows
class TestTagHierarchy(APITestCase):
def setUp(self) -> None:
self.user = User.objects.create_superuser(username="admin")
self.client.force_authenticate(user=self.user)
self.parent = Tag.objects.create(name="Parent")
self.child = Tag.objects.create(name="Child", tn_parent=self.parent)
patcher = mock.patch("documents.bulk_edit.bulk_update_documents.delay")
self.async_task = patcher.start()
self.addCleanup(patcher.stop)
self.document = Document.objects.create(
title="doc",
content="",
checksum="1",
mime_type="application/pdf",
)
def test_document_api_add_child_adds_parent(self) -> None:
self.client.patch(
f"/api/documents/{self.document.pk}/",
{"tags": [self.child.pk]},
format="json",
)
self.document.refresh_from_db()
tags = set(self.document.tags.values_list("pk", flat=True))
assert tags == {self.parent.pk, self.child.pk}
def test_document_api_remove_parent_removes_children(self) -> None:
self.document.add_nested_tags([self.parent, self.child])
self.client.patch(
f"/api/documents/{self.document.pk}/",
{"tags": [self.child.pk]},
format="json",
)
self.document.refresh_from_db()
assert self.document.tags.count() == 0
def test_document_api_remove_parent_removes_child(self) -> None:
self.document.add_nested_tags([self.child])
self.client.patch(
f"/api/documents/{self.document.pk}/",
{"tags": []},
format="json",
)
self.document.refresh_from_db()
assert self.document.tags.count() == 0
def test_bulk_edit_respects_hierarchy(self) -> None:
bulk_edit.add_tag([self.document.pk], self.child.pk)
self.document.refresh_from_db()
tags = set(self.document.tags.values_list("pk", flat=True))
assert tags == {self.parent.pk, self.child.pk}
bulk_edit.remove_tag([self.document.pk], self.parent.pk)
self.document.refresh_from_db()
assert self.document.tags.count() == 0
bulk_edit.modify_tags([self.document.pk], [self.child.pk], [])
self.document.refresh_from_db()
tags = set(self.document.tags.values_list("pk", flat=True))
assert tags == {self.parent.pk, self.child.pk}
bulk_edit.modify_tags([self.document.pk], [], [self.parent.pk])
self.document.refresh_from_db()
assert self.document.tags.count() == 0
def test_workflow_actions(self) -> None:
workflow = Workflow.objects.create(name="wf", order=0)
trigger = WorkflowTrigger.objects.create(
type=WorkflowTrigger.WorkflowTriggerType.DOCUMENT_ADDED,
)
assign_action = WorkflowAction.objects.create()
assign_action.assign_tags.add(self.child)
workflow.triggers.add(trigger)
workflow.actions.add(assign_action)
run_workflows(trigger.type, self.document)
self.document.refresh_from_db()
tags = set(self.document.tags.values_list("pk", flat=True))
assert tags == {self.parent.pk, self.child.pk}
# removal
removal_action = WorkflowAction.objects.create(
type=WorkflowAction.WorkflowActionType.REMOVAL,
)
removal_action.remove_tags.add(self.parent)
workflow.actions.clear()
workflow.actions.add(removal_action)
run_workflows(trigger.type, self.document)
self.document.refresh_from_db()
assert self.document.tags.count() == 0
def test_tag_view_parent_update_adds_parent_to_docs(self) -> None:
orphan = Tag.objects.create(name="Orphan")
self.document.tags.add(orphan)
self.client.patch(
f"/api/tags/{orphan.pk}/",
{"parent": self.parent.pk},
format="json",
)
self.document.refresh_from_db()
tags = set(self.document.tags.values_list("pk", flat=True))
assert tags == {self.parent.pk, orphan.pk}
def test_child_document_count_included_when_parent_paginated(self) -> None:
self.document.tags.add(self.child)
response = self.client.get(
"/api/tags/",
{"page_size": 1, "ordering": "-name"},
)
assert response.status_code == 200
assert response.data["results"][0]["id"] == self.parent.pk
children = response.data["results"][0]["children"]
assert len(children) == 1
child_entry = children[0]
assert child_entry["id"] == self.child.pk
assert child_entry["document_count"] == 1
def test_tag_serializer_populates_document_filter_context(self) -> None:
context = {}
serializer = TagSerializer(self.parent, context=context)
assert serializer.data # triggers serialization
assert "document_count_filter" in context
def test_cannot_set_parent_to_self(self) -> None:
tag = Tag.objects.create(name="Selfie")
resp = self.client.patch(
f"/api/tags/{tag.pk}/",
{"parent": tag.pk},
format="json",
)
assert resp.status_code == 400
assert "Cannot set itself as parent" in str(resp.data["parent"])
def test_cannot_set_parent_to_descendant(self) -> None:
a = Tag.objects.create(name="A")
b = Tag.objects.create(name="B", tn_parent=a)
c = Tag.objects.create(name="C", tn_parent=b)
# Attempt to set A's parent to C (descendant) should fail
resp = self.client.patch(
f"/api/tags/{a.pk}/",
{"parent": c.pk},
format="json",
)
assert resp.status_code == 400
assert "Cannot set parent to a descendant" in str(resp.data["parent"])
def test_max_depth_on_create(self) -> None:
a = Tag.objects.create(name="A1")
b = Tag.objects.create(name="B1", tn_parent=a)
c = Tag.objects.create(name="C1", tn_parent=b)
d = Tag.objects.create(name="D1", tn_parent=c)
# Creating E under D yields depth 5: allowed
resp_ok = self.client.post(
"/api/tags/",
{"name": "E1", "parent": d.pk},
format="json",
)
assert resp_ok.status_code in (200, 201)
e_id = (
resp_ok.data["id"] if resp_ok.status_code == 201 else resp_ok.data.get("id")
)
assert e_id is not None
# Creating F under E would yield depth 6: rejected
resp_fail = self.client.post(
"/api/tags/",
{"name": "F1", "parent": e_id},
format="json",
)
assert resp_fail.status_code == 400
assert "parent" in resp_fail.data
assert "Maximum nesting depth exceeded" in str(resp_fail.data["parent"])
def test_max_depth_on_move_subtree(self) -> None:
a = Tag.objects.create(name="A2")
b = Tag.objects.create(name="B2", tn_parent=a)
c = Tag.objects.create(name="C2", tn_parent=b)
d = Tag.objects.create(name="D2", tn_parent=c)
x = Tag.objects.create(name="X2")
y = Tag.objects.create(name="Y2", tn_parent=x)
assert y.parent_pk == x.pk
# Moving X under D would make deepest node Y exceed depth 5 -> reject
resp_fail = self.client.patch(
f"/api/tags/{x.pk}/",
{"parent": d.pk},
format="json",
)
assert resp_fail.status_code == 400
assert "Maximum nesting depth exceeded" in str(
resp_fail.data["parent"],
)
# Moving X under C (depth 3) should be allowed (deepest becomes 5)
resp_ok = self.client.patch(
f"/api/tags/{x.pk}/",
{"parent": c.pk},
format="json",
)
assert resp_ok.status_code in (200, 202)
x.refresh_from_db()
assert x.parent_pk == c.id
def test_is_root_filter_returns_only_root_tags(self) -> None:
other_root = Tag.objects.create(name="Other parent")
response = self.client.get(
"/api/tags/",
{"is_root": "true"},
)
assert response.status_code == 200
assert response.data["count"] == 2
returned_ids = {row["id"] for row in response.data["results"]}
assert self.child.pk not in returned_ids
assert self.parent.pk in returned_ids
assert other_root.pk in returned_ids
parent_entry = next(
row for row in response.data["results"] if row["id"] == self.parent.pk
)
assert any(child["id"] == self.child.pk for child in parent_entry["children"])
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_tag_hierarchy.py",
"license": "GNU General Public License v3.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/templating/environment.py | from jinja2.sandbox import SandboxedEnvironment
class JinjaEnvironment(SandboxedEnvironment):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.undefined_tracker = None
def is_safe_callable(self, obj):
# Block access to .save() and .delete() methods
if callable(obj) and getattr(obj, "__name__", None) in (
"save",
"delete",
"update",
):
return False
# Call the parent method for other cases
return super().is_safe_callable(obj)
_template_environment = JinjaEnvironment(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=False,
autoescape=False,
extensions=["jinja2.ext.loopcontrols"],
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/templating/environment.py",
"license": "GNU General Public License v3.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/documents/templating/filters.py | from datetime import date
from datetime import datetime
from babel import Locale
from babel import dates
from django.utils.dateparse import parse_date
from django.utils.dateparse import parse_datetime
def localize_date(value: date | datetime | str, format: str, locale: str) -> str:
"""
Format a date, datetime or str object into a localized string using Babel.
Args:
value (date | datetime | str): The date or datetime to format. If a datetime
is provided, it should be timezone-aware (e.g., UTC from a Django DB object).
if str is provided is is parsed as date.
format (str): The format to use. Can be one of Babel's preset formats
('short', 'medium', 'long', 'full') or a custom pattern string.
locale (str): The locale code (e.g., 'en_US', 'fr_FR') to use for
localization.
Returns:
str: The localized, formatted date string.
Raises:
TypeError: If `value` is not a date, datetime or str instance.
"""
if isinstance(value, str):
value = parse_datetime(value)
try:
Locale.parse(locale)
except Exception as e:
raise ValueError(f"Invalid locale identifier: {locale}") from e
if isinstance(value, datetime):
return dates.format_datetime(value, format=format, locale=locale)
elif isinstance(value, date):
return dates.format_date(value, format=format, locale=locale)
else:
raise TypeError(f"Unsupported type {type(value)} for localize_date")
def format_datetime(value: str | datetime, format: str) -> str:
if isinstance(value, str):
value = parse_date(value)
return value.strftime(format=format)
def get_cf_value(
custom_field_data: dict[str, dict[str, str]],
name: str,
default: str | None = None,
) -> str | None:
if name in custom_field_data and custom_field_data[name]["value"] is not None:
return custom_field_data[name]["value"]
elif default is not None:
return default
return None
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/templating/filters.py",
"license": "GNU General Public License v3.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/documents/tests/test_filters.py | import datetime
from typing import Any
from typing import Literal
import pytest
from documents.templating.filters import localize_date
class TestDateLocalization:
"""
Groups all tests related to the `localize_date` function.
"""
TEST_DATE = datetime.date(2023, 10, 26)
TEST_DATETIME = datetime.datetime(
2023,
10,
26,
14,
30,
5,
tzinfo=datetime.timezone.utc,
)
TEST_DATETIME_STRING: str = "2023-10-26T14:30:05+00:00"
TEST_DATE_STRING: str = "2023-10-26"
@pytest.mark.parametrize(
"value, format_style, locale_str, expected_output",
[
pytest.param(
TEST_DATE,
"EEEE, MMM d, yyyy",
"en_US",
"Thursday, Oct 26, 2023",
id="date-en_US-custom",
),
pytest.param(
TEST_DATE,
"dd.MM.yyyy",
"de_DE",
"26.10.2023",
id="date-de_DE-custom",
),
# German weekday and month name translation
pytest.param(
TEST_DATE,
"EEEE",
"de_DE",
"Donnerstag",
id="weekday-de_DE",
),
pytest.param(
TEST_DATE,
"MMMM",
"de_DE",
"Oktober",
id="month-de_DE",
),
# French weekday and month name translation
pytest.param(
TEST_DATE,
"EEEE",
"fr_FR",
"jeudi",
id="weekday-fr_FR",
),
pytest.param(
TEST_DATE,
"MMMM",
"fr_FR",
"octobre",
id="month-fr_FR",
),
],
)
def test_localize_date_with_date_objects(
self,
value: datetime.date,
format_style: str,
locale_str: str,
expected_output: str,
):
"""
Tests `localize_date` with `date` objects across different locales and formats.
"""
assert localize_date(value, format_style, locale_str) == expected_output
@pytest.mark.parametrize(
"value, format_style, locale_str, expected_output",
[
pytest.param(
TEST_DATETIME,
"yyyy.MM.dd G 'at' HH:mm:ss zzz",
"en_US",
"2023.10.26 AD at 14:30:05 UTC",
id="datetime-en_US-custom",
),
pytest.param(
TEST_DATETIME,
"dd.MM.yyyy",
"fr_FR",
"26.10.2023",
id="date-fr_FR-custom",
),
# Spanish weekday and month translation
pytest.param(
TEST_DATETIME,
"EEEE",
"es_ES",
"jueves",
id="weekday-es_ES",
),
pytest.param(
TEST_DATETIME,
"MMMM",
"es_ES",
"octubre",
id="month-es_ES",
),
# Italian weekday and month translation
pytest.param(
TEST_DATETIME,
"EEEE",
"it_IT",
"giovedì",
id="weekday-it_IT",
),
pytest.param(
TEST_DATETIME,
"MMMM",
"it_IT",
"ottobre",
id="month-it_IT",
),
],
)
def test_localize_date_with_datetime_objects(
self,
value: datetime.datetime,
format_style: str,
locale_str: str,
expected_output: str,
):
# To handle the non-breaking space in French and other locales
result = localize_date(value, format_style, locale_str)
assert result.replace("\u202f", " ") == expected_output.replace("\u202f", " ")
@pytest.mark.parametrize(
"invalid_value",
[
1698330605,
None,
[],
{},
],
)
def test_localize_date_raises_type_error_for_invalid_input(
self,
invalid_value: None | list[object] | dict[Any, Any] | Literal[1698330605],
):
with pytest.raises(TypeError) as excinfo:
localize_date(invalid_value, "medium", "en_US")
assert f"Unsupported type {type(invalid_value)}" in str(excinfo.value)
def test_localize_date_raises_error_for_invalid_locale(self) -> None:
with pytest.raises(ValueError) as excinfo:
localize_date(self.TEST_DATE, "medium", "invalid_locale_code")
assert "Invalid locale identifier" in str(excinfo.value)
@pytest.mark.parametrize(
"value, format_style, locale_str, expected_output",
[
pytest.param(
TEST_DATETIME_STRING,
"EEEE, MMM d, yyyy",
"en_US",
"Thursday, Oct 26, 2023",
id="date-en_US-custom",
),
pytest.param(
TEST_DATETIME_STRING,
"dd.MM.yyyy",
"de_DE",
"26.10.2023",
id="date-de_DE-custom",
),
# German weekday and month name translation
pytest.param(
TEST_DATETIME_STRING,
"EEEE",
"de_DE",
"Donnerstag",
id="weekday-de_DE",
),
pytest.param(
TEST_DATETIME_STRING,
"MMMM",
"de_DE",
"Oktober",
id="month-de_DE",
),
# French weekday and month name translation
pytest.param(
TEST_DATETIME_STRING,
"EEEE",
"fr_FR",
"jeudi",
id="weekday-fr_FR",
),
pytest.param(
TEST_DATETIME_STRING,
"MMMM",
"fr_FR",
"octobre",
id="month-fr_FR",
),
],
)
def test_localize_date_with_datetime_string(
self,
value: str,
format_style: str,
locale_str: str,
expected_output: str,
):
"""
Tests `localize_date` with `date` string across different locales and formats.
"""
assert localize_date(value, format_style, locale_str) == expected_output
@pytest.mark.parametrize(
"value, format_style, locale_str, expected_output",
[
pytest.param(
TEST_DATE_STRING,
"EEEE, MMM d, yyyy",
"en_US",
"Thursday, Oct 26, 2023",
id="date-en_US-custom",
),
pytest.param(
TEST_DATE_STRING,
"dd.MM.yyyy",
"de_DE",
"26.10.2023",
id="date-de_DE-custom",
),
# German weekday and month name translation
pytest.param(
TEST_DATE_STRING,
"EEEE",
"de_DE",
"Donnerstag",
id="weekday-de_DE",
),
pytest.param(
TEST_DATE_STRING,
"MMMM",
"de_DE",
"Oktober",
id="month-de_DE",
),
# French weekday and month name translation
pytest.param(
TEST_DATE_STRING,
"EEEE",
"fr_FR",
"jeudi",
id="weekday-fr_FR",
),
pytest.param(
TEST_DATE_STRING,
"MMMM",
"fr_FR",
"octobre",
id="month-fr_FR",
),
],
)
def test_localize_date_with_date_string(
self,
value: str,
format_style: str,
locale_str: str,
expected_output: str,
):
"""
Tests `localize_date` with `date` string across different locales and formats.
"""
assert localize_date(value, format_style, locale_str) == expected_output
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_filters.py",
"license": "GNU General Public License v3.0",
"lines": 280,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless/tests/test_utils.py | import logging
import pytest
from paperless import utils
from paperless.utils import ocr_to_dateparser_languages
@pytest.mark.parametrize(
("ocr_language", "expected"),
[
# One language
("eng", ["en"]),
# Multiple languages
("fra+ita+lao", ["fr", "it", "lo"]),
# Languages that don't have a two-letter equivalent
("fil", ["fil"]),
# Languages with a script part supported by dateparser
("aze_cyrl+srp_latn", ["az-Cyrl", "sr-Latn"]),
# Languages with a script part not supported by dateparser
# In this case, default to the language without script
("deu_frak", ["de"]),
# Traditional and simplified chinese don't have the same name in dateparser,
# so they're converted to the general chinese language
("chi_tra+chi_sim", ["zh"]),
# If a language is not supported by dateparser, fallback to the supported ones
("eng+unsupported_language+por", ["en", "pt"]),
# If no language is supported, fallback to default
("unsupported1+unsupported2", []),
# Duplicate languages, should not duplicate in result
("eng+eng", ["en"]),
# Language with script, but script is not mapped
("ita_unknownscript", ["it"]),
],
)
def test_ocr_to_dateparser_languages(ocr_language, expected):
assert sorted(ocr_to_dateparser_languages(ocr_language)) == sorted(expected)
def test_ocr_to_dateparser_languages_exception(monkeypatch, caplog):
# Patch LocaleDataLoader.get_locale_map to raise an exception
class DummyLoader:
def get_locale_map(self, locales=None):
raise RuntimeError("Simulated error")
with caplog.at_level(logging.WARNING):
monkeypatch.setattr(utils, "LocaleDataLoader", lambda: DummyLoader())
result = utils.ocr_to_dateparser_languages("eng+fra")
assert result == []
assert (
"Set PAPERLESS_DATE_PARSER_LANGUAGES parameter to avoid this" in caplog.text
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/tests/test_utils.py",
"license": "GNU General Public License v3.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/documents/tests/test_caching.py | import pickle
from documents.caching import StoredLRUCache
def test_lru_cache_entries() -> None:
CACHE_TTL = 1
# LRU cache with a capacity of 2 elements
cache = StoredLRUCache("test_lru_cache_key", 2, backend_ttl=CACHE_TTL)
cache.set(1, 1)
cache.set(2, 2)
assert cache.get(2) == 2
assert cache.get(1) == 1
# The oldest entry (2) should be removed
cache.set(3, 3)
assert cache.get(3) == 3
assert not cache.get(2)
assert cache.get(1) == 1
# Save the cache, restore it and check it overwrites the current cache in memory
cache.save()
cache.set(4, 4)
assert not cache.get(3)
cache.load()
assert not cache.get(4)
assert cache.get(3) == 3
assert cache.get(1) == 1
def test_stored_lru_cache_key_ttl(mocker) -> None:
mock_backend = mocker.Mock()
cache = StoredLRUCache("test_key", backend=mock_backend, backend_ttl=321)
# Simulate storing values
cache.set("x", "X")
cache.set("y", "Y")
cache.save()
# Assert backend.set was called with pickled data, key and TTL
mock_backend.set.assert_called_once()
key, data, timeout = mock_backend.set.call_args[0]
assert key == "test_key"
assert timeout == 321
assert pickle.loads(data) == {"x": "X", "y": "Y"}
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/documents/tests/test_caching.py",
"license": "GNU General Public License v3.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless/db_cache.py | from cachalot.api import invalidate as cachalot_invalidate
from cachalot.utils import get_query_cache_key
from cachalot.utils import get_table_cache_key
PREFIX = "pngx_cachalot_"
def custom_get_query_cache_key(compiler):
return PREFIX + get_query_cache_key(compiler)
def custom_get_table_cache_key(db_alias, table):
return PREFIX + get_table_cache_key(db_alias, table)
def invalidate_db_cache() -> None:
return cachalot_invalidate(cache_alias="read-cache")
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/db_cache.py",
"license": "GNU General Public License v3.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
paperless-ngx/paperless-ngx:src/paperless/tests/test_db_cache.py | import os
import time
from unittest.mock import patch
import pytest
from cachalot.settings import cachalot_settings
from django.conf import settings
from django.db import connection
from django.test import override_settings
from django.test.utils import CaptureQueriesContext
from documents.models import Tag
from paperless.db_cache import invalidate_db_cache
from paperless.settings import _parse_cachalot_settings
from paperless.settings import _parse_caches
def test_all_redis_caches_have_same_custom_prefix(monkeypatch) -> None:
"""
Check that when setting a custom Redis prefix,
it is set for both the Django default cache and the read cache.
"""
from paperless import settings
monkeypatch.setattr(settings, "_REDIS_KEY_PREFIX", "test_a_custom_key_prefix")
caches = _parse_caches()
assert caches["read-cache"]["KEY_PREFIX"] == "test_a_custom_key_prefix"
assert caches["default"]["KEY_PREFIX"] == "test_a_custom_key_prefix"
class TestDbCacheSettings:
def test_cachalot_default_settings(self) -> None:
# Cachalot must be installed even if disabled,
# so the cache can be invalidated anytime
assert "cachalot" not in settings.INSTALLED_APPS
cachalot_settings = _parse_cachalot_settings()
caches = _parse_caches()
# Default settings
assert not cachalot_settings["CACHALOT_ENABLED"]
assert cachalot_settings["CACHALOT_TIMEOUT"] == 3600
assert caches["read-cache"]["KEY_PREFIX"] == ""
assert caches["read-cache"]["LOCATION"] == "redis://localhost:6379"
# Fixed settings
assert cachalot_settings["CACHALOT_CACHE"] == "read-cache"
assert (
cachalot_settings["CACHALOT_QUERY_KEYGEN"]
== "paperless.db_cache.custom_get_query_cache_key"
)
assert (
cachalot_settings["CACHALOT_TABLE_KEYGEN"]
== "paperless.db_cache.custom_get_table_cache_key"
)
assert cachalot_settings["CACHALOT_FINAL_SQL_CHECK"] is True
@patch.dict(
os.environ,
{
"PAPERLESS_DB_READ_CACHE_ENABLED": "true",
"PAPERLESS_READ_CACHE_REDIS_URL": "redis://localhost:6380/7",
"PAPERLESS_READ_CACHE_TTL": "7200",
},
)
def test_cachalot_custom_settings(self) -> None:
settings = _parse_cachalot_settings()
assert settings["CACHALOT_ENABLED"]
assert settings["CACHALOT_TIMEOUT"] == 7200
assert settings["CACHALOT_CACHE"] == "read-cache"
assert (
settings["CACHALOT_QUERY_KEYGEN"]
== "paperless.db_cache.custom_get_query_cache_key"
)
assert (
settings["CACHALOT_TABLE_KEYGEN"]
== "paperless.db_cache.custom_get_table_cache_key"
)
assert settings["CACHALOT_FINAL_SQL_CHECK"] is True
@pytest.mark.parametrize(
("env_var_ttl", "expected_cachalot_timeout"),
[
# 0 or less will be ignored, and the default TTL will be set
("0", 3600),
("-1", 3600),
("-500000", 3600),
# Any positive value will be set, for a maximum of one year
("1", 1),
("7524", 7524),
("99999999999999", 31536000),
],
)
def test_cachalot_ttl_parsing(
self,
env_var_ttl: int,
expected_cachalot_timeout: int,
) -> None:
with patch.dict(os.environ, {"PAPERLESS_READ_CACHE_TTL": f"{env_var_ttl}"}):
cachalot_timeout = _parse_cachalot_settings()["CACHALOT_TIMEOUT"]
assert cachalot_timeout == expected_cachalot_timeout
@override_settings(
CACHALOT_ENABLED=True,
CACHALOT_TIMEOUT=1,
)
@pytest.mark.django_db(transaction=True)
def test_cache_hit_when_enabled() -> None:
cachalot_settings.reload()
assert cachalot_settings.CACHALOT_ENABLED
assert cachalot_settings.CACHALOT_TIMEOUT == 1
assert settings.CACHALOT_TIMEOUT == 1
# Read a table to populate the cache
list(list(Tag.objects.values_list("id", flat=True)))
# Invalidate the cache then read the database, there should be DB hit
invalidate_db_cache()
with CaptureQueriesContext(connection) as ctx:
list(list(Tag.objects.values_list("id", flat=True)))
assert len(ctx)
# Doing the same request again should hit the cache, not the DB
with CaptureQueriesContext(connection) as ctx:
list(list(Tag.objects.values_list("id", flat=True)))
assert not len(ctx)
# Wait the end of TTL
# Redis expire accuracy should be between 0 and 1 ms
time.sleep(1.002)
# Read the DB again. The DB should be hit because the cache has expired
with CaptureQueriesContext(connection) as ctx:
list(list(Tag.objects.values_list("id", flat=True)))
assert len(ctx)
# Invalidate the cache at the end of test
invalidate_db_cache()
@pytest.mark.django_db(transaction=True)
def test_cache_is_disabled_by_default() -> None:
cachalot_settings.reload()
# Invalidate the cache just in case
invalidate_db_cache()
# Read the table multiple times: the DB should always be hit without cache
for _ in range(3):
with CaptureQueriesContext(connection) as ctx:
list(list(Tag.objects.values_list("id", flat=True)))
assert len(ctx)
# Invalidate the cache at the end of test
invalidate_db_cache()
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/tests/test_db_cache.py",
"license": "GNU General Public License v3.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
paperless-ngx/paperless-ngx:src/paperless/utils.py | import logging
from dateparser.languages.loader import LocaleDataLoader
logger = logging.getLogger("paperless.utils")
OCR_TO_DATEPARSER_LANGUAGES = {
"""
Translation map from languages supported by Tesseract OCR
to languages supported by dateparser.
To add a language, make sure it is supported by both libraries.
The ISO 639-2 will help you link a 3-char to 2-char language code.
Links:
- Tesseract languages: https://tesseract-ocr.github.io/tessdoc/Data-Files-in-different-versions.html
- Python dateparser languages: https://dateparser.readthedocs.io/en/latest/supported_locales.html
- ISO 639-2: https://www.loc.gov/standards/iso639-2/php/code_list.php
"""
# TODO check these Dateparser languages as they are not referenced on the ISO639-2 standard,
# so we didn't find the equivalent in Tesseract:
# agq, asa, bez, brx, cgg, ckb, dav, dje, dyo, ebu, guz, jgo, jmc, kde, kea, khq, kln,
# ksb, ksf, ksh, lag, lkt, lrc, luy, mer, mfe, mgh, mgo, mua, mzn, naq, nmg, nnh, nus,
# rof, rwk, saq, sbp, she, ses, shi, teo, twq, tzm, vun, wae, xog, yav, yue
"afr": "af",
"amh": "am",
"ara": "ar",
"asm": "as",
"ast": "ast",
"aze": "az",
"bel": "be",
"bul": "bg",
"ben": "bn",
"bod": "bo",
"bre": "br",
"bos": "bs",
"cat": "ca",
"cher": "chr",
"ces": "cs",
"cym": "cy",
"dan": "da",
"deu": "de",
"dzo": "dz",
"ell": "el",
"eng": "en",
"epo": "eo",
"spa": "es",
"est": "et",
"eus": "eu",
"fas": "fa",
"fin": "fi",
"fil": "fil",
"fao": "fo", # codespell:ignore
"fra": "fr",
"fry": "fy",
"gle": "ga",
"gla": "gd",
"glg": "gl",
"guj": "gu",
"heb": "he",
"hin": "hi",
"hrv": "hr",
"hun": "hu",
"hye": "hy",
"ind": "id",
"isl": "is",
"ita": "it",
"jpn": "ja",
"kat": "ka",
"kaz": "kk",
"khm": "km",
"knda": "kn",
"kor": "ko",
"kir": "ky",
"ltz": "lb",
"lao": "lo",
"lit": "lt",
"lav": "lv",
"mal": "ml",
"mon": "mn",
"mar": "mr",
"msa": "ms",
"mlt": "mt",
"mya": "my",
"nep": "ne",
"nld": "nl",
"ori": "or",
"pan": "pa",
"pol": "pl",
"pus": "ps",
"por": "pt",
"que": "qu",
"ron": "ro",
"rus": "ru",
"sin": "si",
"slk": "sk",
"slv": "sl",
"sqi": "sq",
"srp": "sr",
"swe": "sv",
"swa": "sw",
"tam": "ta",
"tel": "te", # codespell:ignore
"tha": "th", # codespell:ignore
"tir": "ti",
"tgl": "tl",
"ton": "to",
"tur": "tr",
"uig": "ug",
"ukr": "uk",
"urd": "ur",
"uzb": "uz",
"via": "vi",
"yid": "yi",
"yor": "yo",
"chi": "zh",
}
def ocr_to_dateparser_languages(ocr_languages: str) -> list[str]:
"""
Convert Tesseract OCR_LANGUAGE codes (ISO 639-2, e.g. "eng+fra", with optional scripts like "aze_Cyrl")
into a list of locales compatible with the `dateparser` library.
- If a script is provided (e.g., "aze_Cyrl"), attempts to use the full locale (e.g., "az-Cyrl").
Falls back to the base language (e.g., "az") if needed.
- If a language cannot be mapped or validated, it is skipped with a warning.
- Returns a list of valid locales, or an empty list if none could be converted.
"""
loader = LocaleDataLoader()
result = []
try:
for ocr_language in ocr_languages.split("+"):
# Split into language and optional script
ocr_lang_part, *script = ocr_language.split("_")
ocr_script_part = script[0] if script else None
language_part = OCR_TO_DATEPARSER_LANGUAGES.get(ocr_lang_part)
if language_part is None:
logger.debug(
f'Unable to map OCR language "{ocr_lang_part}" to dateparser locale. ',
)
continue
# Ensure base language is supported by dateparser
loader.get_locale_map(locales=[language_part])
# Try to add the script part if it's supported by dateparser
if ocr_script_part:
dateparser_language = f"{language_part}-{ocr_script_part.title()}"
try:
loader.get_locale_map(locales=[dateparser_language])
except Exception:
logger.info(
f"Language variant '{dateparser_language}' not supported by dateparser; falling back to base language '{language_part}'. You can manually set PAPERLESS_DATE_PARSER_LANGUAGES if needed.",
)
dateparser_language = language_part
else:
dateparser_language = language_part
if dateparser_language not in result:
result.append(dateparser_language)
except Exception as e:
logger.warning(
f"Error auto-configuring dateparser languages. Set PAPERLESS_DATE_PARSER_LANGUAGES parameter to avoid this. Detail: {e}",
)
return []
if not result:
logger.info(
"Unable to automatically determine dateparser languages from OCR_LANGUAGE, falling back to multi-language support.",
)
return result
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/utils.py",
"license": "GNU General Public License v3.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/paperless/validators.py | from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import UploadedFile
from lxml import etree
ALLOWED_SVG_TAGS: set[str] = {
# Basic shapes
"svg", # Root SVG element
"g", # Group elements together
"path", # Draw complex shapes with commands
"rect", # Rectangle
"circle", # Circle
"ellipse", # Ellipse/oval
"line", # Straight line
"polyline", # Connected lines (open path)
"polygon", # Connected lines (closed path)
# Text
"text", # Text container
"tspan", # Text span within text
"textpath", # Text along a path
"style", # Embedded CSS
# Definitions and reusable content
"defs", # Container for reusable elements
"symbol", # Reusable graphic template
"use", # Reference/instantiate reusable elements
"marker", # Arrowheads and path markers
"pattern", # Repeating pattern fills
"mask", # Masking effects
# Gradients
"lineargradient", # Linear gradient fill
"radialgradient", # Radial gradient fill
"stop", # Gradient color stop
# Clipping
"clippath", # Clipping path definition
# Metadata
"title", # Accessible title
"desc", # Accessible description
"metadata", # Document metadata
}
ALLOWED_SVG_ATTRIBUTES: set[str] = {
# Core attributes
"id", # Unique identifier
"class", # CSS class names
"style", # Inline CSS styles (validate content separately!)
# Positioning and sizing
"x", # X coordinate
"y", # Y coordinate
"cx", # Center X coordinate (circle/ellipse)
"cy", # Center Y coordinate (circle/ellipse)
"r", # Radius (circle)
"rx", # X radius (ellipse, rounded corners)
"ry", # Y radius (ellipse, rounded corners)
"width", # Width
"height", # Height
"x1", # Start X (line, gradient)
"y1", # Start Y (line, gradient)
"x2", # End X (line, gradient)
"y2", # End Y (line, gradient)
"dx", # X offset (text)
"dy", # Y offset (text)
"points", # Point list for polyline/polygon
# Path data
"d", # Path commands and coordinates
# Fill properties
"fill", # Fill color or none
"fill-opacity", # Fill transparency
"fill-rule", # Fill algorithm (nonzero/evenodd)
"color", # Current color
# Stroke properties
"stroke", # Stroke color or none
"stroke-width", # Stroke thickness
"stroke-opacity", # Stroke transparency
"stroke-linecap", # Line ending style (butt/round/square)
"stroke-linejoin", # Corner style (miter/round/bevel)
"stroke-miterlimit", # Miter join limit
"stroke-dasharray", # Dash pattern
"stroke-dashoffset", # Dash pattern offset
"vector-effect", # Non-scaling stroke, etc.
"clip-rule", # Rule for clipping paths
# Transforms and positioning
"overflow", # Overflow behavior
"transform", # Transformations (translate/rotate/scale)
"viewbox", # Coordinate system and viewport
"preserveaspectratio", # Scaling behavior
# Opacity
"opacity", # Overall element opacity
# Gradient attributes
"gradienttransform", # Transform applied to gradient
"gradientunits", # Gradient coordinate system
"spreadmethod", # Gradient spread method
"fx", # Radial gradient focal point X
"fy", # Radial gradient focal point Y
"fr", # Radial gradient focal radius
"offset", # Position of gradient stop
"stop-color", # Color at gradient stop
"stop-opacity", # Opacity at gradient stop
# Clipping and masking
"clip-path", # Reference to clipping path
"mask", # Reference to mask
# Markers
"marker-start", # Marker at path start
"marker-mid", # Marker at path vertices
"marker-end", # Marker at path end
"markerunits", # Marker coordinate system
"markerwidth", # Marker viewport width
"markerheight", # Marker viewport height
"refx", # Marker reference point X
"refy", # Marker reference point Y
"orient", # Marker orientation
# Text attributes
"font-family", # Font name
"font-size", # Font size
"font-weight", # Font weight (normal/bold)
"font-style", # Font style (normal/italic)
"text-anchor", # Text alignment (start/middle/end)
"text-decoration", # Text decoration (underline/etc)
"letter-spacing", # Space between letters
"word-spacing", # Space between words
"text-rendering", # Text rendering hint
"shape-rendering", # Shape rendering hint
"image-rendering", # Image rendering hint
"startoffset", # TextPath start offset
"method", # TextPath method
"spacing", # TextPath spacing
# Links and references
"href", # Link or reference (validate for javascript:!)
"xlink:href", # Legacy link reference (validate for javascript:!)
"xlink:title", # Accessible title for links
# Pattern attributes
"patternunits", # Pattern coordinate system
"patterntransform", # Transform applied to pattern
"patterncontentunits", # Pattern content coordinate system
# Mask attributes
"maskunits", # Mask coordinate system
"maskcontentunits", # Mask content coordinate system
# SVG namespace declarations
"xmlns", # XML namespace (usually http://www.w3.org/2000/svg)
"xmlns:xlink", # XLink namespace
"version", # SVG version
"type",
# Accessibility
"aria-label",
"aria-hidden",
"role",
"focusable",
}
# Dangerous patterns in style attributes that can execute code
DANGEROUS_STYLE_PATTERNS: set[str] = {
"javascript:", # javascript: URLs in url() functions
"data:text/html", # HTML data URIs can contain scripts
"expression(", # IE's CSS expressions (legacy but dangerous)
"import", # CSS @import can load external resources
"@import", # CSS @import directive
"-moz-binding:", # Firefox XBL bindings (can execute code)
"behaviour:", # IE behavior property
"behavior:", # IE behavior property (US spelling)
"vbscript:", # VBScript URLs
"data:application/", # Data URIs for arbitrary application payloads
}
XLINK_NS: set[str] = {
"http://www.w3.org/1999/xlink",
"https://www.w3.org/1999/xlink",
}
# Dangerous URI schemes
DANGEROUS_SCHEMES: set[str] = {
"javascript:",
"data:text/html",
"vbscript:",
"file:",
"data:application/", # Can contain scripts
}
SAFE_PREFIXES: set[str] = {"#", "/", "./", "../", "data:image/"}
def reject_dangerous_svg(file: UploadedFile) -> None:
"""
Rejects SVG files that contain dangerous tags or attributes.
Raises ValidationError if unsafe content is found.
See GHSA-6p53-hqqw-8j62
"""
try:
parser = etree.XMLParser(resolve_entities=False)
file.seek(0)
tree = etree.parse(file, parser)
root = tree.getroot()
except etree.XMLSyntaxError:
raise ValidationError("Invalid SVG file.")
for element in root.iter():
tag: str = etree.QName(element.tag).localname.lower()
if tag not in ALLOWED_SVG_TAGS:
raise ValidationError(f"Disallowed SVG tag: <{tag}>")
if tag == "style":
# Combine all text (including CDATA) to scan for dangerous patterns
style_text: str = "".join(element.itertext()).lower()
for pattern in DANGEROUS_STYLE_PATTERNS:
if pattern in style_text:
raise ValidationError(
f"Disallowed pattern in <style> content: {pattern}",
)
attr_name: str
attr_value: str
for attr_name, attr_value in element.attrib.items():
# lxml expands namespaces to {url}name. We must convert the standard
# XLink namespace back to 'xlink:' so it matches our allowlist.
if attr_name.startswith("{"):
qname = etree.QName(attr_name)
if qname.namespace in XLINK_NS:
attr_name_check = f"xlink:{qname.localname}"
else:
# Unknown namespace: keep raw name (will fail allowlist)
attr_name_check = attr_name
else:
attr_name_check = attr_name
attr_name_lower = attr_name_check.lower().strip()
if attr_name_lower not in ALLOWED_SVG_ATTRIBUTES:
raise ValidationError(f"Disallowed SVG attribute: {attr_name}")
if attr_name_lower == "style":
style_lower: str = attr_value.lower()
# Check if any dangerous pattern is a substring of the style
for pattern in DANGEROUS_STYLE_PATTERNS:
if pattern in style_lower:
raise ValidationError(
f"Disallowed pattern in style attribute: {pattern}",
)
# Validate URI attributes (href, xlink:href)
if attr_name_lower in {"href", "xlink:href"}:
value_stripped: str = attr_value.strip().lower()
# Check if value starts with any dangerous scheme
for scheme in DANGEROUS_SCHEMES:
if value_stripped.startswith(scheme):
raise ValidationError(
f"Disallowed URI scheme in {attr_name}: {scheme}",
)
# Allow safe schemes for logos: #anchor, relative paths, data:image/*
# No external resources (http/https) needed for logos
if value_stripped and not any(
value_stripped.startswith(prefix) for prefix in SAFE_PREFIXES
):
raise ValidationError(
f"URI scheme not allowed in {attr_name}: must be #anchor, relative path, or data:image/*",
)
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/validators.py",
"license": "GNU General Public License v3.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
paperless-ngx/paperless-ngx:src/paperless/tests/test_views.py | import tempfile
from pathlib import Path
from django.test import override_settings
def test_favicon_view(client):
with tempfile.TemporaryDirectory() as tmpdir:
static_dir = Path(tmpdir)
favicon_path = static_dir / "paperless" / "img" / "favicon.ico"
favicon_path.parent.mkdir(parents=True, exist_ok=True)
favicon_path.write_bytes(b"FAKE ICON DATA")
with override_settings(STATIC_ROOT=static_dir):
response = client.get("/favicon.ico")
assert response.status_code == 200
assert response["Content-Type"] == "image/x-icon"
assert b"".join(response.streaming_content) == b"FAKE ICON DATA"
def test_favicon_view_missing_file(client):
with override_settings(STATIC_ROOT=Path(tempfile.mkdtemp())):
response = client.get("/favicon.ico")
assert response.status_code == 404
| {
"repo_id": "paperless-ngx/paperless-ngx",
"file_path": "src/paperless/tests/test_views.py",
"license": "GNU General Public License v3.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/dropdown/test_dropdown_debounce.py | from dash import Dash, Input, Output, dcc, html
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
def test_ddde001_dropdown_debounce(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Dropdown(
id="dropdown",
options=[
{"label": "New York City", "value": "NYC"},
{"label": "Montreal", "value": "MTL"},
{"label": "San Francisco", "value": "SF"},
],
value=["MTL", "SF"],
multi=True,
closeOnSelect=False,
debounce=True,
),
html.Div(
id="dropdown-value-out", style={"height": "10px", "width": "10px"}
),
]
)
@app.callback(
Output("dropdown-value-out", "children"),
Input("dropdown", "value"),
)
def update_value(val):
return ", ".join(val)
dash_duo.start_server(app)
assert dash_duo.find_element("#dropdown-value-out").text == "MTL, SF"
dash_duo.find_element("#dropdown").click()
# deselect first item
selected = dash_duo.find_elements(".dash-dropdown-options input[checked]")
selected[0].click()
# UI should update immediately (local state updated)
assert dash_duo.find_element("#dropdown-value").text == "San Francisco"
# Callback output should not change while dropdown is still open
assert dash_duo.find_element("#dropdown-value-out").text == "MTL, SF"
# Close the dropdown (ESC simulates user dismiss)
actions = ActionChains(dash_duo.driver)
actions.send_keys(Keys.ESCAPE).perform()
time.sleep(0.1)
# After closing, the callback output should be updated
assert dash_duo.find_element("#dropdown-value-out").text == "SF"
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/dropdown/test_dropdown_debounce.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/button/test_button.py | from dash import Dash, Input, Output, dcc, html
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def test_btev001_clicks_and_blur(dash_dcc):
"""Test that n_clicks, n_clicks_timestamp, n_blur, and n_blur_timestamp work correctly"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Button(
"Click Me",
id="test-button",
className="custom-button-class",
),
html.Div(id="click-output"),
html.Div(id="blur-output"),
html.Div(id="click-timestamp-output"),
html.Div(id="blur-timestamp-output"),
# Add a second element to blur to
html.Button("Other Element", id="other-element"),
]
)
@app.callback(
Output("click-output", "children"),
Input("test-button", "n_clicks"),
)
def update_clicks(n_clicks):
if n_clicks is None:
return "Clicks: 0"
return f"Clicks: {n_clicks}"
@app.callback(
Output("blur-output", "children"),
Input("test-button", "n_blur"),
)
def update_blur(n_blur):
if n_blur is None:
return "Blurs: 0"
return f"Blurs: {n_blur}"
@app.callback(
Output("click-timestamp-output", "children"),
Input("test-button", "n_clicks_timestamp"),
)
def update_click_timestamp(n_clicks_timestamp):
if n_clicks_timestamp is None or n_clicks_timestamp == -1:
return "Click timestamp: None"
return f"Click timestamp: {n_clicks_timestamp}"
@app.callback(
Output("blur-timestamp-output", "children"),
Input("test-button", "n_blur_timestamp"),
)
def update_blur_timestamp(n_blur_timestamp):
if n_blur_timestamp is None or n_blur_timestamp == -1:
return "Blur timestamp: None"
return f"Blur timestamp: {n_blur_timestamp}"
dash_dcc.start_server(app)
# Verify custom class is applied
button = dash_dcc.find_element(".custom-button-class")
assert button is not None, "Custom className should be applied"
# Check initial state
dash_dcc.wait_for_text_to_equal("#click-output", "Clicks: 0")
dash_dcc.wait_for_text_to_equal("#blur-output", "Blurs: 0")
dash_dcc.wait_for_text_to_equal("#click-timestamp-output", "Click timestamp: None")
dash_dcc.wait_for_text_to_equal("#blur-timestamp-output", "Blur timestamp: None")
# Click the button
button.click()
dash_dcc.wait_for_text_to_equal("#click-output", "Clicks: 1")
# Verify click timestamp is set
click_timestamp_text = dash_dcc.find_element("#click-timestamp-output").text
assert "Click timestamp: " in click_timestamp_text
assert click_timestamp_text != "Click timestamp: None"
# Click again
button.click()
dash_dcc.wait_for_text_to_equal("#click-output", "Clicks: 2")
# Blur by clicking on other element
other_element = dash_dcc.find_element("#other-element")
other_element.click()
# Check blur was registered
dash_dcc.wait_for_text_to_equal("#blur-output", "Blurs: 1")
# Verify blur timestamp is set
blur_timestamp_text = dash_dcc.find_element("#blur-timestamp-output").text
assert "Blur timestamp: " in blur_timestamp_text
assert blur_timestamp_text != "Blur timestamp: None"
# Click the button again to focus it
button.click()
dash_dcc.wait_for_text_to_equal("#click-output", "Clicks: 3")
# Blur again by clicking other element
other_element.click()
dash_dcc.wait_for_text_to_equal("#blur-output", "Blurs: 2")
assert dash_dcc.get_logs() == []
def test_btev002_disabled_button(dash_dcc):
"""Test that disabled button doesn't trigger click or blur events"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Button(
"Disabled Button",
id="disabled-button",
className="disabled-test-button",
disabled=True,
),
html.Div(id="click-output"),
html.Div(id="blur-output"),
html.Button("Other Element", id="other-element"),
]
)
@app.callback(
Output("click-output", "children"),
Input("disabled-button", "n_clicks"),
)
def update_clicks(n_clicks):
return f"Clicks: {n_clicks or 0}"
@app.callback(
Output("blur-output", "children"),
Input("disabled-button", "n_blur"),
)
def update_blur(n_blur):
return f"Blurs: {n_blur or 0}"
dash_dcc.start_server(app)
button = dash_dcc.find_element(".disabled-test-button")
other_element = dash_dcc.find_element("#other-element")
# Verify button is disabled
assert button.get_attribute("disabled") is not None
# Initial state
dash_dcc.wait_for_text_to_equal("#click-output", "Clicks: 0")
dash_dcc.wait_for_text_to_equal("#blur-output", "Blurs: 0")
# Try to click - should not increment
button.click()
# Give it a moment and verify it's still 0
import time
time.sleep(0.5)
click_text = dash_dcc.find_element("#click-output").text
assert click_text == "Clicks: 0", "Disabled button should not trigger clicks"
# Try to blur by clicking other element - should not increment
other_element.click()
time.sleep(0.5)
blur_text = dash_dcc.find_element("#blur-output").text
assert blur_text == "Blurs: 0", "Disabled button should not trigger blur events"
assert dash_dcc.get_logs() == []
def test_btev003_button_states_visual(dash_dcc):
"""Visual test for button states: base, hover, and focus in one snapshot"""
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(
[
html.H3("Base State"),
dcc.Button("Base Button", id="base-button", className="state-base"),
],
style={"marginBottom": "30px"},
),
html.Div(
[
html.H3("Hover State"),
dcc.Button(
"Hover Button", id="hover-button", className="state-hover"
),
],
style={"marginBottom": "30px"},
),
html.Div(
[
html.H3("Focus State"),
dcc.Button(
"Focus Button", id="focus-button", className="state-focus"
),
],
style={"marginBottom": "30px"},
),
],
style={"padding": "40px"},
)
dash_dcc.start_server(app)
# Wait for all buttons to render
dash_dcc.wait_for_element(".state-base")
# Set up each state
# Tab to focus the focus button (using keyboard navigation)
body = dash_dcc.find_element("body")
body.send_keys(Keys.TAB) # Focus base button
body.send_keys(Keys.TAB) # Focus hover button
body.send_keys(Keys.TAB) # Focus focus button
# Hover over the hover button
hover_button = dash_dcc.find_element(".state-hover")
ActionChains(dash_dcc.driver).move_to_element(hover_button).perform()
# Take single snapshot showing all states
dash_dcc.percy_snapshot("Button States - Base, Hover, Focus")
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/button/test_button.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/calendar/locales/test_date_picker_locales.py | from datetime import datetime
from dash import Dash, html, dcc, Input, Output
from selenium.webdriver.common.keys import Keys
def test_dtps030_french_localization_via_cdn(dash_dcc):
"""Test that French locale from CDN is applied to date picker."""
app = Dash(
__name__,
)
app.layout = html.Div(
[
html.P("DatePicker localization - translations in assets folder"),
dcc.DatePickerSingle(
id="dps",
date="2025-01-15",
initial_visible_month=datetime(2025, 1, 1),
display_format="MMMM DD, YYYY",
month_format="MMMM YYYY",
),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
Input("dps", "date"),
)
def update_output(date):
return f"Date: {date}"
dash_dcc.start_server(app)
# Wait for date picker to render
input_element = dash_dcc.wait_for_element("#dps")
# Check initial callback output shows ISO format date
dash_dcc.wait_for_text_to_equal("#output", "Date: 2025-01-15")
# Check that display format uses French month name
display_value = input_element.get_attribute("value")
assert (
"janvier" in display_value.lower()
), f"Display format should use French month name 'janvier', but got: {display_value}"
# Test typing a French month name in the input
input_element.clear()
input_element.send_keys("février 20, 2025")
input_element.send_keys(Keys.TAB) # Blur to trigger parsing
# Wait for the date to be parsed and formatted
dash_dcc.wait_for_text_to_equal("#dps", "février 20, 2025")
# Verify the input now shows the French formatted date
display_value = input_element.get_attribute("value")
assert (
"février" in display_value.lower()
), f"Input should accept and display French month name 'février', but got: {display_value}"
# Verify the callback received the correct ISO format date (locale-independent)
dash_dcc.wait_for_text_to_equal("#output", "Date: 2025-02-20")
# Open the calendar
input_element.click()
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
# Check that days of the week are in French
# French abbreviated days: Lu, Ma, Me, Je, Ve, Sa, Di
day_headers = dash_dcc.find_elements(".dash-datepicker-calendar thead th span")
day_texts = [header.text for header in day_headers]
# Check for French day abbreviations (2-letter format)
french_days = ["lu", "ma", "me", "je", "ve", "sa", "di"]
assert (
len(day_texts) == 7
), f"Should have 7 day headers, but got {len(day_texts)}: {day_texts}"
for day_text in day_texts:
assert any(
french_day in day_text.lower() for french_day in french_days
), f"Day header '{day_text}' should be a French day abbreviation, expected one of: {french_days}"
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/calendar/locales/test_date_picker_locales.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/calendar/test_a11y_date_picker_range.py | from datetime import datetime
from dash import Dash, Input, Output
from dash.dcc import DatePickerRange
from dash.html import Div
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
def send_keys(driver, key):
"""Send keyboard keys to the browser"""
actions = ActionChains(driver)
actions.send_keys(key)
actions.perform()
def get_focused_text(driver):
"""Get the text content of the currently focused element"""
return driver.execute_script("return document.activeElement.textContent;")
def test_a11y_range_001_keyboard_range_selection_with_highlights(dash_dcc):
"""Test keyboard-based range selection with highlight verification"""
app = Dash(__name__)
app.layout = Div(
[
DatePickerRange(
id="date-picker-range",
initial_visible_month=datetime(2021, 1, 1),
),
Div(id="output-dates"),
]
)
@app.callback(
Output("output-dates", "children"),
Input("date-picker-range", "start_date"),
Input("date-picker-range", "end_date"),
)
def update_output(start_date, end_date):
if start_date and end_date:
return f"{start_date} to {end_date}"
elif start_date:
return f"Start: {start_date}"
return ""
dash_dcc.start_server(app)
# Find the first input field and open calendar with keyboard
date_picker_input = dash_dcc.find_element(".dash-datepicker-input")
date_picker_input.send_keys(Keys.ARROW_DOWN)
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
# Wait for focus to be applied
import time
time.sleep(0.1)
# Calendar opens with Jan 1 focused (first day of month since no dates selected)
# Navigate: Arrow Down (Jan 1 -> 8)
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
# Verify focused date is Jan 8
assert get_focused_text(dash_dcc.driver) == "8"
# Press Space to select the first date (Jan 8)
send_keys(dash_dcc.driver, Keys.SPACE)
# Verify first date was selected (only start_date, no end_date yet)
dash_dcc.wait_for_text_to_equal("#output-dates", "Start: 2021-01-08")
# Navigate to another date: Arrow Down (1 week) + Arrow Right (1 day)
# Jan 8 -> Jan 15 -> Jan 16
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
send_keys(dash_dcc.driver, Keys.ARROW_RIGHT)
# Verify focused date is Jan 16
assert get_focused_text(dash_dcc.driver) == "16"
# Verify that days between Jan 8 and Jan 16 are highlighted
# The highlighted dates should have the class 'dash-datepicker-calendar-date-highlighted'
highlighted_dates = dash_dcc.driver.find_elements(
"css selector", ".dash-datepicker-calendar-date-highlighted"
)
# Should have 9 highlighted dates (Jan 8 through Jan 16 inclusive)
assert (
len(highlighted_dates) == 9
), f"Expected 9 highlighted dates, got {len(highlighted_dates)}"
# Press Enter to select the second date
send_keys(dash_dcc.driver, Keys.ENTER)
# Calendar should close
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=0.25)
# Verify both dates were selected in the output
dash_dcc.wait_for_text_to_equal("#output-dates", "2021-01-08 to 2021-01-16")
assert dash_dcc.get_logs() == []
def test_a11y_range_002_keyboard_update_existing_range(dash_dcc):
"""Test keyboard-based updating of an existing date range"""
app = Dash(__name__)
app.layout = Div(
[
DatePickerRange(
id="date-picker-range",
start_date="2021-01-10",
end_date="2021-01-20",
initial_visible_month=datetime(2021, 1, 1),
),
Div(id="output-dates"),
]
)
@app.callback(
Output("output-dates", "children"),
Input("date-picker-range", "start_date"),
Input("date-picker-range", "end_date"),
)
def update_output(start_date, end_date):
if start_date and end_date:
return f"{start_date} to {end_date}"
elif start_date:
return f"Start: {start_date}"
return ""
dash_dcc.start_server(app)
# Verify initial range is displayed
dash_dcc.wait_for_text_to_equal("#output-dates", "2021-01-10 to 2021-01-20")
# Find the first input field and open calendar with keyboard
date_picker_input = dash_dcc.find_element(".dash-datepicker-input")
date_picker_input.send_keys(Keys.ARROW_DOWN)
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
# Wait for focus to be applied
import time
time.sleep(0.1)
# Calendar opens with Jan 10 focused (the current start date)
# Navigate: Arrow Down (Jan 10 -> 17), then 5x Arrow Left (17 -> 16 -> 15 -> 14 -> 13 -> 12)
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
# Verify focused date is Jan 12
assert get_focused_text(dash_dcc.driver) == "12"
# Press Space to start a NEW range selection with Jan 12 as start_date
# This should clear end_date and set only start_date
send_keys(dash_dcc.driver, Keys.SPACE)
# Verify new start date was selected (only start_date, no end_date)
dash_dcc.wait_for_text_to_equal("#output-dates", "2021-01-12 to 2021-01-20")
# Navigate to new end date: Arrow Down + Arrow Right (Jan 12 -> 19 -> 20)
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
send_keys(dash_dcc.driver, Keys.ARROW_RIGHT)
# Verify focused date is Jan 20
assert get_focused_text(dash_dcc.driver) == "20"
# Verify that days between Jan 12 and Jan 20 are highlighted
highlighted_dates = dash_dcc.driver.find_elements(
"css selector", ".dash-datepicker-calendar-date-highlighted"
)
# Should have 9 highlighted dates (Jan 12 through 20 inclusive)
assert (
len(highlighted_dates) == 9
), f"Expected 9 highlighted dates, got {len(highlighted_dates)}"
# Press Enter to select the new end date
send_keys(dash_dcc.driver, Keys.ENTER)
# Calendar should close
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=0.25)
# Verify both dates were updated in the output
dash_dcc.wait_for_text_to_equal("#output-dates", "2021-01-12 to 2021-01-20")
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/calendar/test_a11y_date_picker_range.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/calendar/test_a11y_date_picker_single.py | from datetime import datetime
from dash import Dash, Input, Output
from dash.dcc import DatePickerSingle
from dash.html import Div, Label, P
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
def send_keys(driver, key):
"""Send keyboard keys to the browser"""
actions = ActionChains(driver)
actions.send_keys(key)
actions.perform()
def get_focused_text(driver):
"""Get the text content of the currently focused element"""
return driver.execute_script("return document.activeElement.textContent;")
def create_date_picker_app(date_picker_props):
"""Create a Dash app with a DatePickerSingle component and output callback"""
app = Dash(__name__)
app.layout = Div(
[DatePickerSingle(id="date-picker", **date_picker_props), Div(id="output-date")]
)
@app.callback(Output("output-date", "children"), Input("date-picker", "date"))
def update_output(date):
return date or ""
return app
def open_calendar(dash_dcc, date_picker):
"""Open the calendar and wait for it to be visible"""
# The input element has the same ID as the date picker component itself
input_id = date_picker.get_attribute("id")
input_element = dash_dcc.find_element(f"#{input_id}")
input_element.send_keys(Keys.ARROW_DOWN)
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
# Wait for focus to be applied via requestAnimationFrame
sleep(0.1)
def close_calendar(dash_dcc, driver):
"""Close the calendar with Escape and wait for it to disappear"""
send_keys(driver, Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container")
def test_a11y001_label_focuses_date_picker(dash_dcc):
app = Dash(__name__)
app.layout = Label(
[
P("Click me", id="label"),
DatePickerSingle(
id="date-picker",
initial_visible_month=datetime(2021, 1, 1),
),
],
)
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#date-picker")
# Calendar should be closed initially
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=0.25)
dash_dcc.find_element("#label").click()
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
assert dash_dcc.get_logs() == []
def test_a11y002_label_with_htmlFor_can_focus_date_picker(dash_dcc):
app = Dash(__name__)
app.layout = Div(
[
Label("Click me", htmlFor="date-picker", id="label"),
DatePickerSingle(
id="date-picker",
initial_visible_month=datetime(2021, 1, 1),
),
],
)
dash_dcc.start_server(app)
dash_dcc.wait_for_element("#date-picker")
# Calendar should be closed initially
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=0.25)
dash_dcc.find_element("#label").click()
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
assert dash_dcc.get_logs() == []
def test_a11y003_keyboard_navigation_arrows(dash_dcc):
app = create_date_picker_app(
{
"date": "2021-01-15",
"initial_visible_month": datetime(2021, 1, 1),
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
open_calendar(dash_dcc, date_picker)
# Get the focused date element (should be Jan 15, 2021)
assert get_focused_text(dash_dcc.driver) == "15"
# Test ArrowRight - should move to Jan 16
send_keys(dash_dcc.driver, Keys.ARROW_RIGHT)
assert get_focused_text(dash_dcc.driver) == "16"
# Test ArrowLeft - should move back to Jan 15
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
assert get_focused_text(dash_dcc.driver) == "15"
# Test ArrowDown - should move to Jan 22 (one week down)
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
assert get_focused_text(dash_dcc.driver) == "22"
# Test ArrowUp - should move back to Jan 15 (one week up)
send_keys(dash_dcc.driver, Keys.ARROW_UP)
assert get_focused_text(dash_dcc.driver) == "15"
# Test PageDown - should move to Feb 15 (one month forward)
send_keys(dash_dcc.driver, Keys.PAGE_DOWN)
assert get_focused_text(dash_dcc.driver) == "15"
# Test PageUp - should move back to Jan 15 (one month back)
send_keys(dash_dcc.driver, Keys.PAGE_UP)
assert get_focused_text(dash_dcc.driver) == "15"
# Test Enter - should select the date and close calendar
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=0.25)
assert dash_dcc.get_logs() == []
def test_a11y004_keyboard_navigation_home_end(dash_dcc):
app = create_date_picker_app(
{
"date": "2021-01-15", # Friday, Jan 15, 2021
"initial_visible_month": datetime(2021, 1, 1),
"first_day_of_week": 0, # Sunday
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
open_calendar(dash_dcc, date_picker)
# Get the focused date element (should be Jan 15, 2021 - Friday)
assert get_focused_text(dash_dcc.driver) == "15"
# Test Home key - should move to week start (Sunday, Jan 10)
send_keys(dash_dcc.driver, Keys.HOME)
assert get_focused_text(dash_dcc.driver) == "10"
# Test End key - should move to week end (Saturday, Jan 16)
send_keys(dash_dcc.driver, Keys.END)
assert get_focused_text(dash_dcc.driver) == "16"
# Test Home key again - should move to week start (Sunday, Jan 10)
send_keys(dash_dcc.driver, Keys.HOME)
assert get_focused_text(dash_dcc.driver) == "10"
assert dash_dcc.get_logs() == []
def test_a11y005_keyboard_navigation_home_end_monday_start(dash_dcc):
app = create_date_picker_app(
{
"date": "2021-01-15", # Friday, Jan 15, 2021
"initial_visible_month": datetime(2021, 1, 1),
"first_day_of_week": 1, # Monday
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
open_calendar(dash_dcc, date_picker)
# Get the focused date element (should be Jan 15, 2021 - Friday)
assert get_focused_text(dash_dcc.driver) == "15"
# Test Home key - should move to week start (Monday, Jan 11)
send_keys(dash_dcc.driver, Keys.HOME)
assert get_focused_text(dash_dcc.driver) == "11"
# Test End key - should move to week end (Sunday, Jan 17)
send_keys(dash_dcc.driver, Keys.END)
assert get_focused_text(dash_dcc.driver) == "17"
assert dash_dcc.get_logs() == []
def test_a11y006_keyboard_navigation_rtl(dash_dcc):
app = create_date_picker_app(
{
"date": "2021-01-15",
"initial_visible_month": datetime(2021, 1, 1),
"is_RTL": True,
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
open_calendar(dash_dcc, date_picker)
assert get_focused_text(dash_dcc.driver) == "15"
# Moves to Jan 14 (reversed)
send_keys(dash_dcc.driver, Keys.ARROW_RIGHT)
assert get_focused_text(dash_dcc.driver) == "14"
# Moves to Jan 15 (reversed)
send_keys(dash_dcc.driver, Keys.ARROW_LEFT)
assert get_focused_text(dash_dcc.driver) == "15"
# Moves to week start
send_keys(dash_dcc.driver, Keys.HOME)
assert get_focused_text(dash_dcc.driver) == "10"
# Moves to week end
send_keys(dash_dcc.driver, Keys.END)
assert get_focused_text(dash_dcc.driver) == "16"
assert dash_dcc.get_logs() == []
def test_a11y007_all_keyboard_keys_respect_min_max(dash_dcc):
app = create_date_picker_app(
{
"date": "2021-02-15", # Monday
"min_date_allowed": datetime(2021, 2, 15), # Monday - same as start date
"max_date_allowed": datetime(2021, 2, 20), # Sat
"initial_visible_month": datetime(2021, 2, 1),
"first_day_of_week": 0, # Sunday
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
initial_value = "2021-02-15"
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test Arrow Down (would go to Feb 22, beyond max)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test Arrow Up (would go to Feb 8, before min)
close_calendar(dash_dcc, dash_dcc.driver)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.ARROW_UP)
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test Home (would go to Feb 14 Sunday, before min Feb 15)
close_calendar(dash_dcc, dash_dcc.driver)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.HOME)
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test End (would go to Feb 20 Saturday, at max - should succeed)
close_calendar(dash_dcc, dash_dcc.driver)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.END)
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container")
dash_dcc.wait_for_text_to_equal("#output-date", "2021-02-20")
# Reset and test PageUp (would go to Jan 20, before min)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.PAGE_UP)
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_text_to_equal("#output-date", "2021-02-20")
# Test PageDown (would go to Mar 20, after max)
send_keys(dash_dcc.driver, Keys.PAGE_DOWN)
send_keys(dash_dcc.driver, Keys.ENTER)
dash_dcc.wait_for_text_to_equal("#output-date", "2021-02-20")
assert dash_dcc.get_logs() == []
def test_a11y008_all_keyboard_keys_respect_disabled_days(dash_dcc):
initial_value = "2021-02-15"
app = create_date_picker_app(
{
"date": initial_value, # Monday
"disabled_days": [
datetime(2021, 2, 14), # Sunday - week start
datetime(2021, 2, 16), # Tuesday - ArrowRight target
datetime(2021, 2, 20), # Saturday - week end
datetime(2021, 2, 22), # Monday - ArrowDown target
datetime(2021, 1, 15), # PageUp target
datetime(2021, 3, 15), # PageDown target
],
"initial_visible_month": datetime(2021, 2, 1),
"first_day_of_week": 0, # Sunday
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
# Wait for initial date to be set in output
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test Arrow Right (would go to Feb 16, disabled)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.ARROW_RIGHT)
send_keys(dash_dcc.driver, Keys.ENTER)
# Should remain at Feb 15 since Feb 16 is disabled
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test Arrow Down (would go to Feb 22, disabled)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.ARROW_DOWN)
send_keys(dash_dcc.driver, Keys.ENTER)
# Should remain at Feb 15 since Feb 22 is disabled
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test Home (would go to Feb 14 Sunday, disabled)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.HOME)
send_keys(dash_dcc.driver, Keys.ENTER)
# Should remain at Feb 15 since Feb 14 is disabled
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test End (would go to Feb 20 Saturday, disabled)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.END)
send_keys(dash_dcc.driver, Keys.ENTER)
# Should remain at Feb 15 since Feb 20 is disabled
dash_dcc.wait_for_text_to_equal("#output-date", initial_value)
# Test PageUp (navigates to previous month, but not a disabled day within that month)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.PAGE_UP)
send_keys(dash_dcc.driver, Keys.ENTER)
output_text = dash_dcc.find_element("#output-date").text
assert output_text != "2021-01-15", "PageUp: Should not select disabled date"
# Test PageDown (navigates to next month, but not a disabled day within that month)
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.PAGE_DOWN)
send_keys(dash_dcc.driver, Keys.ENTER)
output_text = dash_dcc.find_element("#output-date").text
assert output_text != "2021-03-15", "PageDown: Should not select disabled date"
assert dash_dcc.get_logs() == []
def test_a11y009_keyboard_space_selects_date(dash_dcc):
app = create_date_picker_app(
{
"date": "2021-01-15",
"initial_visible_month": datetime(2021, 1, 1),
}
)
dash_dcc.start_server(app)
date_picker = dash_dcc.find_element("#date-picker")
dash_dcc.wait_for_text_to_equal("#output-date", "2021-01-15")
open_calendar(dash_dcc, date_picker)
send_keys(dash_dcc.driver, Keys.ARROW_RIGHT)
assert get_focused_text(dash_dcc.driver) == "16"
send_keys(dash_dcc.driver, Keys.SPACE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=0.25)
dash_dcc.wait_for_text_to_equal("#output-date", "2021-01-16")
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/calendar/test_a11y_date_picker_single.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/calendar/test_multi_month_selection.py | from datetime import datetime
from selenium.webdriver.common.action_chains import ActionChains
from dash import Dash, Input, Output, html, dcc
def test_dtps_multi_month_click_second_month(dash_dcc):
"""Test clicking a date in the second month with number_of_months_shown=2"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerSingle(
id="dps",
initial_visible_month=datetime(2021, 1, 1),
number_of_months_shown=2,
stay_open_on_select=True,
),
html.Div(id="output"),
]
)
@app.callback(Output("output", "children"), Input("dps", "date"))
def update_output(date):
return date or "No date selected"
dash_dcc.start_server(app)
# Click the date picker to open it
date_picker = dash_dcc.find_element("#dps")
date_picker.click()
dash_dcc._wait_until_day_is_clickable()
# Get all visible dates across both months
days = dash_dcc.find_elements(dash_dcc.date_picker_day_locator)
# Find a date in the second month (February 2021)
# We're looking for day "15" in the second month
second_month_days = [
day
for day in days
if day.text == "15"
and "dash-datepicker-calendar-date-outside" not in day.get_attribute("class")
]
# There should be two "15"s visible (Jan 15 and Feb 15)
# We want the second one (Feb 15)
assert len(second_month_days) >= 1, "Should find at least one day 15"
# Click on a date in the second visible month
if len(second_month_days) > 1:
second_month_days[1].click()
expected_date = "2021-02-15"
else:
# Fallback: just click the first one
second_month_days[0].click()
expected_date = "2021-01-15"
# Check the output
output = dash_dcc.find_element("#output")
assert output.text == expected_date, f"Expected {expected_date}, got {output.text}"
def test_dtpr_multi_month_drag_in_second_month(dash_dcc):
"""Test drag selection entirely within the second month with number_of_months_shown=2"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerRange(
id="dpr",
initial_visible_month=datetime(2021, 1, 1),
number_of_months_shown=2,
),
html.Div(id="output-start"),
html.Div(id="output-end"),
]
)
@app.callback(
Output("output-start", "children"),
Output("output-end", "children"),
Input("dpr", "start_date"),
Input("dpr", "end_date"),
)
def update_output(start_date, end_date):
return start_date or "No start", end_date or "No end"
dash_dcc.start_server(app)
# Click to open the calendar
date_picker = dash_dcc.find_element("#dpr")
date_picker.click()
dash_dcc._wait_until_day_is_clickable()
# Get all visible dates
days = dash_dcc.find_elements(dash_dcc.date_picker_day_locator)
# Find all day "10"s and "17"s (both should appear in Jan and Feb)
all_10s = [
day
for day in days
if day.text == "10"
and "dash-datepicker-calendar-date-outside" not in day.get_attribute("class")
]
all_17s = [
day
for day in days
if day.text == "17"
and "dash-datepicker-calendar-date-outside" not in day.get_attribute("class")
]
# Use the last occurrence of each (should be February)
feb_10 = all_10s[-1] if len(all_10s) > 1 else all_10s[0]
feb_17 = all_17s[-1] if len(all_17s) > 1 else all_17s[0]
# Perform drag operation: mouse down on Feb 10, drag to Feb 17, mouse up
actions = ActionChains(dash_dcc.driver)
actions.click_and_hold(feb_10).move_to_element(feb_17).release().perform()
# Wait for the callback to fire
dash_dcc.wait_for_text_to_equal("#output-start", "2021-02-10", timeout=2)
# Check the outputs
output_start = dash_dcc.find_element("#output-start")
output_end = dash_dcc.find_element("#output-end")
assert (
output_start.text == "2021-02-10"
), f"Expected 2021-02-10 as start, got {output_start.text}"
assert (
output_end.text == "2021-02-17"
), f"Expected 2021-02-17 as end, got {output_end.text}"
def test_dtpr_multi_month_click_in_second_month(dash_dcc):
"""Test click selection entirely within the second month with number_of_months_shown=2
This should produce the same result as the drag test above"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerRange(
id="dpr",
initial_visible_month=datetime(2021, 1, 1),
number_of_months_shown=2,
stay_open_on_select=True,
),
html.Div(id="output-start"),
html.Div(id="output-end"),
]
)
@app.callback(
Output("output-start", "children"),
Output("output-end", "children"),
Input("dpr", "start_date"),
Input("dpr", "end_date"),
)
def update_output(start_date, end_date):
return start_date or "No start", end_date or "No end"
dash_dcc.start_server(app)
# Open calendar
dash_dcc.find_element("#dpr").click()
dash_dcc._wait_until_day_is_clickable()
# Find and click Feb 10 and Feb 17
days = dash_dcc.find_elements(dash_dcc.date_picker_day_locator)
all_10s = [
d
for d in days
if d.text == "10"
and "dash-datepicker-calendar-date-outside" not in d.get_attribute("class")
]
all_17s = [
d
for d in days
if d.text == "17"
and "dash-datepicker-calendar-date-outside" not in d.get_attribute("class")
]
all_10s[-1].click() # Feb 10 (last occurrence)
all_17s[-1].click() # Feb 17 (last occurrence)
# Verify output
dash_dcc.wait_for_text_to_equal("#output-start", "2021-02-10", timeout=2)
dash_dcc.wait_for_text_to_equal("#output-end", "2021-02-17", timeout=2)
def test_dtpr_cross_month_drag_selection(dash_dcc):
"""Test drag selection from 15th of first month (Jan) to 15th of second month (Feb)"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerRange(
id="dpr",
initial_visible_month=datetime(2021, 1, 1),
number_of_months_shown=2,
),
html.Div(id="output-start"),
html.Div(id="output-end"),
]
)
@app.callback(
Output("output-start", "children"),
Output("output-end", "children"),
Input("dpr", "start_date"),
Input("dpr", "end_date"),
)
def update_output(start_date, end_date):
return start_date or "No start", end_date or "No end"
dash_dcc.start_server(app)
# Click to open the calendar
date_picker = dash_dcc.find_element("#dpr")
date_picker.click()
dash_dcc._wait_until_day_is_clickable()
# Get all visible dates
days = dash_dcc.find_elements(dash_dcc.date_picker_day_locator)
# Find all day "15"s (both Jan 15 and Feb 15)
all_15s = [
day
for day in days
if day.text == "15"
and "dash-datepicker-calendar-date-outside" not in day.get_attribute("class")
]
# Should have at least 2 instances of day 15 (Jan and Feb)
assert len(all_15s) >= 2, "Should find at least two day 15s (Jan and Feb)"
# First occurrence is Jan 15, second is Feb 15
jan_15 = all_15s[0]
feb_15 = all_15s[1]
# Perform drag operation: mouse down on Jan 15, drag to Feb 15, mouse up
actions = ActionChains(dash_dcc.driver)
actions.click_and_hold(jan_15).move_to_element(feb_15).release().perform()
# Wait for the callback to fire
dash_dcc.wait_for_text_to_equal("#output-start", "2021-01-15", timeout=2)
# Check the outputs
output_start = dash_dcc.find_element("#output-start")
output_end = dash_dcc.find_element("#output-end")
assert (
output_start.text == "2021-01-15"
), f"Expected 2021-01-15 as start, got {output_start.text}"
assert (
output_end.text == "2021-02-15"
), f"Expected 2021-02-15 as end, got {output_end.text}"
def test_dtpr_cross_month_click_selection(dash_dcc):
"""Test click selection from 15th of first month (Jan) to 15th of second month (Feb)
This should produce the same result as the drag test above"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerRange(
id="dpr",
initial_visible_month=datetime(2021, 1, 1),
number_of_months_shown=2,
stay_open_on_select=True,
),
html.Div(id="output-start"),
html.Div(id="output-end"),
]
)
@app.callback(
Output("output-start", "children"),
Output("output-end", "children"),
Input("dpr", "start_date"),
Input("dpr", "end_date"),
)
def update_output(start_date, end_date):
return start_date or "No start", end_date or "No end"
dash_dcc.start_server(app)
# Open calendar
dash_dcc.find_element("#dpr").click()
dash_dcc._wait_until_day_is_clickable()
# Find and click Jan 15 and Feb 15
days = dash_dcc.find_elements(dash_dcc.date_picker_day_locator)
all_15s = [
d
for d in days
if d.text == "15"
and "dash-datepicker-calendar-date-outside" not in d.get_attribute("class")
]
all_15s[0].click() # Jan 15 (first occurrence)
all_15s[1].click() # Feb 15 (second occurrence)
# Verify output
dash_dcc.wait_for_text_to_equal("#output-start", "2021-01-15", timeout=2)
dash_dcc.wait_for_text_to_equal("#output-end", "2021-02-15", timeout=2)
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/calendar/test_multi_month_selection.py",
"license": "MIT License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/calendar/test_portal.py | from datetime import date
from dash import Dash, html, dcc
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from time import sleep
import pytest
def click_everything_in_datepicker(datepicker_id, dash_dcc):
"""Click on every clickable element in a datepicker calendar.
Args:
datepicker_id: CSS selector for the datepicker element (e.g., "#dpr")
dash_dcc: The dash_dcc fixture
"""
# Click on the datepicker to open calendar
datepicker = dash_dcc.find_element(datepicker_id)
datepicker.click()
# Wait for calendar to open
popover = dash_dcc.find_element(".dash-datepicker-content")
interactive_elements = []
interactive_elements.extend(
popover.find_elements(
By.CSS_SELECTOR, "td:not(.dash-datepicker-calendar-padding)"
)
)
interactive_elements.extend(popover.find_elements(By.CSS_SELECTOR, "input"))
buttons = reversed(
popover.find_elements(By.CSS_SELECTOR, "button")
) # reversed so that "close" button will be clicked after all other buttons
interactive_elements.extend(buttons) # Add close buttons last
for el in interactive_elements:
try:
el.click()
sleep(0.05)
except Exception as e:
print(e)
assert (
not e
), f"Unable to click on {el.tag_name} {el.get_attribute('class')})"
def test_dppt000_datepicker_single_default(dash_dcc):
"""Test DatePickerSingle with default (no portal) configuration.
Verifies that the calendar opens without portal and all elements are clickable.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePickerSingle Default"),
dcc.DatePickerSingle(
id="dps-default",
date=date(2024, 1, 15),
stay_open_on_select=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
dash_dcc.wait_for_element("#dps-default")
click_everything_in_datepicker("#dps-default", dash_dcc)
dps_input = dash_dcc.find_element("#dps-default")
dps_input.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
assert dash_dcc.get_logs() == []
def test_dppt001_datepicker_single_with_portal(dash_dcc):
"""Test DatePickerSingle with with_portal=True.
Verifies that the calendar opens in a portal (document.body) and all
elements are clickable.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePickerSingle with Portal"),
dcc.DatePickerSingle(
id="dps-portal",
date=date(2024, 1, 15),
stay_open_on_select=True,
with_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dps-portal")
# Test DatePickerSingle with portal - click everything to verify all elements are accessible
click_everything_in_datepicker("#dps-portal", dash_dcc)
# Close the calendar by pressing escape
dps_input = dash_dcc.find_element("#dps-portal")
dps_input.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
assert dash_dcc.get_logs() == []
def test_dppt006_fullscreen_portal_close_button_keyboard(dash_dcc):
"""Test fullscreen portal dismiss behavior and keyboard accessibility.
Verifies clicking background doesn't close the portal and close button
is keyboard-accessible.
"""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerSingle(
id="dps-fullscreen",
date=date(2024, 1, 15),
with_full_screen_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
dash_dcc.wait_for_element("#dps-fullscreen")
dps = dash_dcc.find_element("#dps-fullscreen")
dps.click()
popover = dash_dcc.find_element(".dash-datepicker-content")
assert popover.is_displayed()
action = ActionChains(dash_dcc.driver)
action.move_to_element_with_offset(popover, 10, 10).click().perform()
sleep(0.2)
popover = dash_dcc.find_element(".dash-datepicker-content")
assert (
popover.is_displayed()
), "Fullscreen portal should not close when clicking background"
dash_dcc.find_element(".dash-datepicker-close-button")
action.send_keys(Keys.TAB).perform()
sleep(0.1)
action.send_keys(Keys.ENTER).perform()
sleep(0.2)
dash_dcc.wait_for_no_elements(".dash-datepicker-content", timeout=2)
assert dash_dcc.get_logs() == []
def test_dppt007_portal_close_by_clicking_outside(dash_dcc):
"""Test regular portal closes when clicking outside the calendar."""
app = Dash(__name__)
app.layout = html.Div(
[
dcc.DatePickerSingle(
id="dps-portal",
date=date(2024, 1, 15),
with_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
dash_dcc.wait_for_element("#dps-portal")
dps = dash_dcc.find_element("#dps-portal")
dps.click()
popover = dash_dcc.find_element(".dash-datepicker-content")
assert popover.is_displayed()
popover.click()
sleep(0.2)
dash_dcc.wait_for_no_elements(".dash-datepicker-content", timeout=2)
assert dash_dcc.get_logs() == []
def test_dppt001a_datepicker_range_default(dash_dcc):
"""Test DatePickerRange with default (no portal) configuration.
Verifies that the calendar opens without portal and all elements are clickable.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePickerRange Default"),
dcc.DatePickerRange(
id="dpr-default",
start_date=date(2024, 1, 1),
end_date=date(2024, 1, 15),
stay_open_on_select=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
dash_dcc.wait_for_element("#dpr-default")
click_everything_in_datepicker("#dpr-default", dash_dcc)
dpr_input = dash_dcc.find_element("#dpr-default")
dpr_input.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
assert dash_dcc.get_logs() == []
def test_dppt002_datepicker_range_with_portal(dash_dcc):
"""Test DatePickerRange with with_portal=True.
Verifies that the calendar opens in a portal (document.body) and all
elements are clickable.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePickerRange with Portal"),
dcc.DatePickerRange(
id="dpr-portal",
start_date=date(2024, 1, 1),
end_date=date(2024, 1, 15),
stay_open_on_select=True,
with_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dpr-portal")
# Test DatePickerRange with portal - click everything to verify all elements are accessible
click_everything_in_datepicker("#dpr-portal", dash_dcc)
# Close the calendar by pressing escape
dpr_input = dash_dcc.find_element("#dpr-portal")
dpr_input.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
def test_dppt003_datepicker_single_with_fullscreen_portal(dash_dcc):
"""Test DatePickerSingle with with_full_screen_portal=True.
Verifies that the calendar opens in a full-screen portal overlay and all
elements are clickable. Also verifies that the fullscreen CSS class is applied.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePickerSingle with Full Screen Portal"),
dcc.DatePickerSingle(
id="dps-fullscreen",
date=date(2024, 1, 15),
stay_open_on_select=True,
with_full_screen_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dps-fullscreen")
# Click to open the calendar
dps = dash_dcc.find_element("#dps-fullscreen")
dps.click()
# Wait for calendar to open
popover = dash_dcc.find_element(".dash-datepicker-content")
# Verify fullscreen class is applied
assert "dash-datepicker-fullscreen" in popover.get_attribute(
"class"
), "Full screen portal should have dash-datepicker-fullscreen class"
# Verify the popover has fixed positioning (full screen overlay)
position = popover.value_of_css_property("position")
assert position == "fixed", "Full screen portal should use fixed positioning"
# Close to prepare for click everything test
dps.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
# Test clicking everything to verify all elements are accessible
click_everything_in_datepicker("#dps-fullscreen", dash_dcc)
@pytest.mark.flaky(max_runs=3)
def test_dppt004_datepicker_range_with_fullscreen_portal(dash_dcc):
"""Test DatePickerRange with with_full_screen_portal=True.
Verifies that the calendar opens in a full-screen portal overlay and all
elements are clickable. Also verifies that the fullscreen CSS class is applied.
Note: Marked as flaky due to headless Chrome layout issues with wide calendars
(2 months shown by default in DatePickerRange). Test passes consistently in
non-headless mode.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePickerRange with Full Screen Portal"),
dcc.DatePickerRange(
id="dpr-fullscreen",
start_date=date(2024, 1, 1),
end_date=date(2024, 1, 15),
stay_open_on_select=True,
with_full_screen_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dpr-fullscreen")
# Click to open the calendar
dpr = dash_dcc.find_element("#dpr-fullscreen")
dpr.click()
# Wait for calendar to open
popover = dash_dcc.find_element(".dash-datepicker-content")
# Verify fullscreen class is applied
assert "dash-datepicker-fullscreen" in popover.get_attribute(
"class"
), "Full screen portal should have dash-datepicker-fullscreen class"
# Verify the popover has fixed positioning (full screen overlay)
position = popover.value_of_css_property("position")
assert position == "fixed", "Full screen portal should use fixed positioning"
# Close to prepare for click everything test
dpr.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
# Test clicking everything to verify all elements are accessible
click_everything_in_datepicker("#dpr-fullscreen", dash_dcc)
def test_dppt005_portal_has_correct_classes(dash_dcc):
"""Test that portal datepickers have the correct CSS classes.
Verifies that default datepickers don't have portal classes, while
with_portal=True datepickers have the portal class but not fullscreen class.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("Default (no portal)"),
dcc.DatePickerSingle(
id="dps-default",
date=date(2024, 1, 15),
),
html.H3("With portal", style={"marginTop": "50px"}),
dcc.DatePickerSingle(
id="dps-with-portal",
date=date(2024, 1, 15),
with_portal=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dps-default")
dash_dcc.wait_for_element("#dps-with-portal")
# Open default datepicker
dps_default = dash_dcc.find_element("#dps-default")
dps_default.click()
# Wait for calendar to open
popover_default = dash_dcc.find_element(".dash-datepicker-content")
# Verify it doesn't have fullscreen class
assert "dash-datepicker-fullscreen" not in popover_default.get_attribute(
"class"
), "Default datepicker should not have fullscreen class"
# Close default
dps_default.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-content", timeout=2)
# Open portal datepicker
dps_portal = dash_dcc.find_element("#dps-with-portal")
dps_portal.click()
# Wait for calendar to open
popover_portal = dash_dcc.find_element(".dash-datepicker-content")
# Verify it has portal class but not fullscreen class
assert "dash-datepicker-portal" in popover_portal.get_attribute(
"class"
), "Portal should have dash-datepicker-portal class"
assert "dash-datepicker-fullscreen" not in popover_portal.get_attribute(
"class"
), "Portal (non-fullscreen) should not have fullscreen class"
# Verify it uses fixed positioning (both portal types use fixed positioning)
position = popover_portal.value_of_css_property("position")
assert position == "fixed", "Portal should use fixed positioning"
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/calendar/test_portal.py",
"license": "MIT License",
"lines": 326,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/dropdown/test_a11y.py | import pytest
from dash import Dash, Input, Output
from dash.dcc import Dropdown
from dash.html import Div, Label, P
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
def test_a11y001_label_focuses_dropdown(dash_duo):
app = Dash(__name__)
app.layout = Label(
[
P("Click me", id="label"),
Dropdown(
id="dropdown",
options=[1, 2, 3],
multi=True,
placeholder="Testing label that wraps a dropdown can trigger the dropdown",
),
],
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#dropdown")
with pytest.raises(TimeoutException):
dash_duo.wait_for_element(".dash-dropdown-options", timeout=0.25)
dash_duo.find_element("#label").click()
dash_duo.wait_for_element(".dash-dropdown-options")
assert dash_duo.get_logs() == []
def test_a11y002_label_with_htmlFor_can_focus_dropdown(dash_duo):
app = Dash(__name__)
app.layout = Div(
[
Label("Click me", htmlFor="dropdown", id="label"),
Dropdown(
id="dropdown",
options=[1, 2, 3],
multi=True,
placeholder="Testing label with `htmlFor` triggers the dropdown",
),
],
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#dropdown")
with pytest.raises(TimeoutException):
dash_duo.wait_for_element(".dash-dropdown-options", timeout=0.25)
dash_duo.find_element("#label").click()
dash_duo.wait_for_element(".dash-dropdown-options")
assert dash_duo.get_logs() == []
def test_a11y003_keyboard_navigation(dash_duo):
def send_keys(key):
actions = ActionChains(dash_duo.driver)
actions.send_keys(key)
actions.perform()
app = Dash(__name__)
app.layout = Div(
[
Dropdown(
id="dropdown",
options=[i for i in range(0, 100)],
multi=True,
placeholder="Testing keyboard navigation",
),
],
)
dash_duo.start_server(app)
dropdown = dash_duo.find_element("#dropdown")
dropdown.send_keys(Keys.ENTER) # Open with Enter key
dash_duo.wait_for_element(".dash-dropdown-options")
send_keys(
Keys.ESCAPE
) # Expecting focus to remain on the dropdown after escaping out
with pytest.raises(TimeoutException):
dash_duo.wait_for_element(".dash-dropdown-options", timeout=0.25)
send_keys(Keys.ARROW_DOWN) # Expecting the dropdown to open up
dash_duo.wait_for_element(".dash-dropdown-search")
num_elements = len(dash_duo.find_elements(".dash-dropdown-option"))
assert num_elements == 100
send_keys(1) # Expecting to be typing into the searh bar
sleep(0.1) # Wait for search filtering to complete
num_elements = len(dash_duo.find_elements(".dash-dropdown-option"))
assert num_elements == 19
send_keys(Keys.ARROW_DOWN) # Expecting to be navigating through the options
send_keys(Keys.SPACE) # Expecting to be selecting
value_items = dash_duo.find_elements(".dash-dropdown-value-item")
assert len(value_items) == 1
assert value_items[0].text == "1"
send_keys(Keys.ARROW_DOWN) # Expecting to be navigating through the options
send_keys(Keys.SPACE) # Expecting to be selecting
value_items = dash_duo.find_elements(".dash-dropdown-value-item")
assert len(value_items) == 2
assert [item.text for item in value_items] == ["1", "10"]
send_keys(Keys.SPACE) # Expecting to be de-selecting
value_items = dash_duo.find_elements(".dash-dropdown-value-item")
assert len(value_items) == 1
assert value_items[0].text == "1"
send_keys(Keys.ARROW_UP)
send_keys(Keys.ARROW_UP)
send_keys(Keys.ARROW_UP) # Expecting to wrap over to the last item
send_keys(Keys.SPACE)
value_items = dash_duo.find_elements(".dash-dropdown-value-item")
assert len(value_items) == 2
assert [item.text for item in value_items] == ["1", "91"]
send_keys(
Keys.ESCAPE
) # Expecting focus to remain on the dropdown after escaping out
sleep(0.25)
value_items = dash_duo.find_elements(".dash-dropdown-value-item")
assert len(value_items) == 2
assert [item.text for item in value_items] == ["1", "91"]
assert dash_duo.get_logs() == []
def test_a11y004_selection_visibility_single(dash_duo):
app = Dash(__name__)
app.layout = (
Dropdown(
id="dropdown",
options=[f"Option {i}" for i in range(0, 100)],
value="Option 71",
multi=False,
placeholder="Testing selected item is visible on open",
),
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#dropdown")
dash_duo.find_element("#dropdown").click()
dash_duo.wait_for_element(".dash-dropdown-options")
# Assert that the selected option is visible in the dropdown
selected_option = dash_duo.find_element(".dash-dropdown-option.selected")
assert selected_option.text == "Option 71"
assert selected_option.is_displayed()
assert elements_are_visible(
dash_duo, selected_option
), "Selected option should be visible when the dropdown opens"
assert dash_duo.get_logs() == []
def test_a11y005_selection_visibility_multi(dash_duo):
app = Dash(__name__)
app.layout = (
Dropdown(
id="dropdown",
options=[f"Option {i}" for i in range(0, 100)],
value=[
"Option 71",
"Option 23",
"Option 42",
],
multi=True,
placeholder="Testing selected item is visible on open",
),
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#dropdown")
dash_duo.find_element("#dropdown").click()
dash_duo.wait_for_element(".dash-dropdown-options")
# Assert that the selected option is visible in the dropdown
selected_options = dash_duo.find_elements(".dash-dropdown-option.selected")
assert elements_are_visible(
dash_duo, selected_options
), "Selected options should be visible when the dropdown opens"
assert dash_duo.get_logs() == []
def test_a11y006_multi_select_keyboard_focus_retention(dash_duo):
def send_keys(key):
actions = ActionChains(dash_duo.driver)
actions.send_keys(key)
actions.perform()
app = Dash(__name__)
app.layout = Div(
[
Dropdown(
id="dropdown",
options=[f"Option {i}" for i in range(0, 10)],
value=[],
multi=True,
searchable=True,
),
Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
Input("dropdown", "value"),
)
def update_output(value):
return f"Selected: {value}"
dash_duo.start_server(app)
dropdown = dash_duo.find_element("#dropdown")
dropdown.click()
dash_duo.wait_for_element(".dash-dropdown-options")
# Select 3 items by alternating ArrowDown and Spacebar
send_keys(Keys.ARROW_DOWN) # Move to first option
sleep(0.05)
send_keys(Keys.SPACE) # Select Option 0
dash_duo.wait_for_text_to_equal("#output", "Selected: ['Option 0']")
send_keys(Keys.ARROW_DOWN) # Move to second option
sleep(0.05)
send_keys(Keys.SPACE) # Select Option 1
dash_duo.wait_for_text_to_equal("#output", "Selected: ['Option 0', 'Option 1']")
send_keys(Keys.ARROW_DOWN) # Move to third option
sleep(0.05)
send_keys(Keys.SPACE) # Select Option 2
dash_duo.wait_for_text_to_equal(
"#output", "Selected: ['Option 0', 'Option 1', 'Option 2']"
)
assert dash_duo.get_logs() == []
def test_a11y007_opens_and_closes_without_races(dash_duo):
def send_keys(key):
actions = ActionChains(dash_duo.driver)
actions.send_keys(key)
actions.perform()
app = Dash(__name__)
app.layout = Div(
[
Dropdown(
id="dropdown",
options=[f"Option {i}" for i in range(0, 10)],
value="Option 5",
multi=False,
),
Div(id="output"),
]
)
def assert_focus_in_dropdown():
# Verify focus is inside the dropdown
assert dash_duo.driver.execute_script(
"""
const activeElement = document.activeElement;
const dropdownContent = document.querySelector('.dash-dropdown-content');
return dropdownContent && dropdownContent.contains(activeElement);
"""
), "Focus must be inside the dropdown when it opens"
@app.callback(
Output("output", "children"),
Input("dropdown", "value"),
)
def update_output(value):
return f"Selected: {value}"
dash_duo.start_server(app)
# Verify initial value is set
dash_duo.wait_for_text_to_equal("#output", "Selected: Option 5")
dropdown = dash_duo.find_element("#dropdown")
# Test repeated open/close to confirm no race conditions or side effects
for i in range(3):
# Open with Enter
dropdown.send_keys(Keys.ENTER)
dash_duo.wait_for_element(".dash-dropdown-options")
assert_focus_in_dropdown()
# Verify the value is still "Option 5" (not cleared)
dash_duo.wait_for_text_to_equal("#output", "Selected: Option 5")
# Close with Escape
send_keys(Keys.ESCAPE)
sleep(0.1)
# Verify the value is still "Option 5"
dash_duo.wait_for_text_to_equal("#output", "Selected: Option 5")
for i in range(3):
# Open with mouse
dropdown.click()
dash_duo.wait_for_element(".dash-dropdown-options")
assert_focus_in_dropdown()
# Verify the value is still "Option 5" (not cleared)
dash_duo.wait_for_text_to_equal("#output", "Selected: Option 5")
# Close with Escape
dropdown.click()
sleep(0.1)
# Verify the value is still "Option 5"
dash_duo.wait_for_text_to_equal("#output", "Selected: Option 5")
assert dash_duo.get_logs() == []
def test_a11y008_home_end_pageup_pagedown_navigation(dash_duo):
def send_keys(key):
actions = ActionChains(dash_duo.driver)
actions.send_keys(key)
actions.perform()
def get_focused_option_text():
return dash_duo.driver.execute_script(
"""
const focused = document.activeElement;
if (focused && focused.closest('.dash-options-list-option')) {
return focused.closest('.dash-options-list-option').textContent.trim();
}
return null;
"""
)
app = Dash(__name__)
app.layout = Div(
[
Dropdown(
id="dropdown",
options=[f"Option {i}" for i in range(0, 50)],
multi=True,
),
]
)
dash_duo.start_server(app)
dropdown = dash_duo.find_element("#dropdown")
dropdown.send_keys(Keys.ENTER) # Open with Enter key
dash_duo.wait_for_element(".dash-dropdown-options")
# Navigate from search input to options
send_keys(Keys.ARROW_DOWN) # Move from search to first option
sleep(0.05)
send_keys(Keys.ARROW_DOWN) # Move to second option
sleep(0.05)
send_keys(Keys.ARROW_DOWN) # Move to third option
sleep(0.05)
send_keys(Keys.ARROW_DOWN) # Move to fourth option
sleep(0.05)
assert get_focused_option_text() == "Option 3"
send_keys(Keys.HOME) # Should go back to search input (index 0)
# Verify we're back at search input
assert dash_duo.driver.execute_script(
"return document.activeElement.type === 'search';"
)
# Now arrow down to first option
send_keys(Keys.ARROW_DOWN)
assert get_focused_option_text() == "Option 0"
# Test End key - should go to last option
send_keys(Keys.END)
assert get_focused_option_text() == "Option 49"
# Test PageUp - should jump up by 10
send_keys(Keys.PAGE_UP)
assert get_focused_option_text() == "Option 39"
# Test PageDown - should jump down by 10
send_keys(Keys.PAGE_DOWN)
assert get_focused_option_text() == "Option 49"
# Test PageUp from middle
send_keys(Keys.HOME) # Back to search input (index 0)
send_keys(Keys.PAGE_DOWN) # Jump to index 10 (Option 9)
send_keys(Keys.PAGE_DOWN) # Jump to index 20 (Option 19)
assert get_focused_option_text() == "Option 19"
send_keys(Keys.PAGE_UP) # Jump to index 10 (Option 9)
assert get_focused_option_text() == "Option 9"
assert dash_duo.get_logs() == []
def elements_are_visible(dash_duo, elements):
# Check if the given elements are within the visible viewport of the dropdown
elements = elements if isinstance(elements, list) else [elements]
dropdown_content = dash_duo.find_element(".dash-dropdown-content")
def is_visible(el):
return dash_duo.driver.execute_script(
"""
const option = arguments[0];
const container = arguments[1];
const optionRect = option.getBoundingClientRect();
const containerRect = container.getBoundingClientRect();
return optionRect.top >= containerRect.top &&
optionRect.bottom <= containerRect.bottom;
""",
el,
dropdown_content,
)
return all([is_visible(el) for el in elements])
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/dropdown/test_a11y.py",
"license": "MIT License",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/dropdown/test_localization.py | from time import sleep
from dash import Dash
from dash.dcc import Dropdown
from dash.html import Div
def test_ddlo001_translations(dash_duo):
app = Dash(__name__)
app.layout = Div(
[
Dropdown(
id="dropdown",
options=[1, 2, 3],
multi=True,
labels={
"select_all": "Sélectionner tout",
"deselect_all": "Désélectionner tout",
"selected_count": "{num_selected} sélections",
"search": "Rechercher",
"clear_search": "Annuler",
"clear_selection": "Effacer les sélections",
"no_options_found": "Aucun d'options",
},
),
]
)
dash_duo.start_server(app)
dash_duo.find_element("#dropdown").click()
dash_duo.wait_for_contains_text(
".dash-dropdown-action-button:first-child", "Sélectionner tout"
)
dash_duo.wait_for_contains_text(
".dash-dropdown-action-button:last-child", "Désélectionner tout"
)
assert (
dash_duo.find_element(".dash-dropdown-search").accessible_name == "Rechercher"
)
dash_duo.find_element(".dash-dropdown-search").send_keys(1)
sleep(0.1)
assert dash_duo.find_element(".dash-dropdown-clear").accessible_name == "Annuler"
dash_duo.find_element(".dash-dropdown-action-button:first-child").click()
dash_duo.find_element(".dash-dropdown-search").send_keys(9)
sleep(0.1)
assert dash_duo.find_element(".dash-dropdown-option").text == "Aucun d'options"
assert (
dash_duo.find_element(
".dash-dropdown-trigger .dash-dropdown-clear"
).accessible_name
== "Effacer les sélections"
)
assert dash_duo.get_logs() == []
def test_ddlo002_partial_translations(dash_duo):
app = Dash(__name__)
app.layout = Div(
[
Dropdown(
id="dropdown",
options=[1, 2, 3],
multi=True,
labels={
"search": "Lookup",
},
),
]
)
dash_duo.start_server(app)
dash_duo.find_element("#dropdown").click()
dash_duo.wait_for_contains_text(
".dash-dropdown-action-button:first-child", "Select All"
)
dash_duo.wait_for_contains_text(
".dash-dropdown-action-button:last-child", "Deselect All"
)
assert dash_duo.find_element(".dash-dropdown-search").accessible_name == "Lookup"
dash_duo.find_element(".dash-dropdown-search").send_keys(1)
sleep(0.1)
assert (
dash_duo.find_element(".dash-dropdown-clear").accessible_name == "Clear search"
)
dash_duo.find_element(".dash-dropdown-action-button:first-child").click()
dash_duo.find_element(".dash-dropdown-search").send_keys(9)
sleep(0.1)
assert dash_duo.find_element(".dash-dropdown-option").text == "No options found"
assert (
dash_duo.find_element(
".dash-dropdown-trigger .dash-dropdown-clear"
).accessible_name
== "Clear selection"
)
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/dropdown/test_localization.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/input/test_a11y_input.py | import pytest
from dash import Dash
from dash.dcc import Input
from dash.html import Div, Label, P
input_types = [
"text",
"number",
]
@pytest.mark.parametrize("input_type", input_types)
def test_a11y001_label_focuses_input(dash_duo, input_type):
app = Dash(__name__)
app.layout = Label(
[
P("Click me", id="label"),
Input(
type=input_type,
id="input",
placeholder="Testing label that wraps a input can trigger the input",
),
],
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#input")
dash_duo.find_element("#label").click()
assert input_has_focus(dash_duo, "#input"), "Input element is not focused"
assert dash_duo.get_logs() == []
@pytest.mark.parametrize("input_type", input_types)
def test_a11y002_label_with_htmlFor_can_focus_input(dash_duo, input_type):
app = Dash(__name__)
app.layout = Div(
[
Label("Click me", htmlFor="input", id="label"),
Input(
type=input_type,
id="input",
placeholder="Testing label with `htmlFor` triggers the dropdown",
),
],
)
dash_duo.start_server(app)
dash_duo.wait_for_element("#input")
dash_duo.find_element("#label").click()
assert input_has_focus(dash_duo, "#input"), "Input element is not focused"
assert dash_duo.get_logs() == []
def input_has_focus(dash_duo, id):
element = dash_duo.find_element(id)
return dash_duo.driver.execute_script(
"""
const container = arguments[0];
const activeElement = document.activeElement;
return container === activeElement || container.contains(activeElement);
""",
element,
)
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/input/test_a11y_input.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/misc/test_popover_visibility.py | from datetime import date
from dash import Dash, html, dcc
from selenium.webdriver.common.by import By
from time import sleep
def click_everything_in_datepicker(datepicker_id, dash_dcc):
"""Click on every clickable element in a datepicker calendar.
Args:
datepicker_id: CSS selector for the datepicker element (e.g., "#dpr")
dash_dcc: The dash_dcc fixture
"""
# Click on the datepicker to open calendar
datepicker = dash_dcc.find_element(datepicker_id)
datepicker.click()
# Wait for calendar to open
popover = dash_dcc.find_element(".dash-datepicker-content")
interactive_elements = []
interactive_elements.extend(
popover.find_elements(
By.CSS_SELECTOR, "td:not(.dash-datepicker-calendar-padding)"
)
)
interactive_elements.extend(popover.find_elements(By.CSS_SELECTOR, "button"))
interactive_elements.extend(popover.find_elements(By.CSS_SELECTOR, "input"))
for el in interactive_elements:
try:
el.click()
sleep(0.05)
except Exception as e:
print(e)
assert not e, f"Unable to click on {el.tag_name})"
def test_mspv001_popover_visibility_when_app_is_smaller_than_popup(dash_dcc):
"""
This test clicks on each datepicker and verifies all calendar elements are clickable.
It verifies that the calendar popover is properly positioned and not clipped.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("Popover Visibility when app is only a few pixels tall"),
dcc.DatePickerSingle(
id="dps",
date=date(2024, 1, 15),
stay_open_on_select=True,
),
dcc.DatePickerRange(
id="dpr",
start_date=date(2024, 1, 1),
end_date=date(2024, 1, 15),
stay_open_on_select=True,
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False)
dash_dcc.driver.set_window_size(1280, 1024)
# Wait for the page to load
dash_dcc.wait_for_element("#dps")
dash_dcc.wait_for_element("#dpr")
# Test DatePickerSingle - click everything to verify all elements are accessible
click_everything_in_datepicker("#dps", dash_dcc)
# Close the calendar by pressing escape
from selenium.webdriver.common.keys import Keys
dps_input = dash_dcc.find_element("#dps")
dps_input.send_keys(Keys.ESCAPE)
dash_dcc.wait_for_no_elements(".dash-datepicker-calendar-container", timeout=2)
# Test DatePickerRange - click everything to verify all elements are accessible
click_everything_in_datepicker("#dpr", dash_dcc)
def test_mspv002_popover_visibility_when_app_is_scrolled_down(dash_dcc):
"""
This test clicks on each datepicker scrolled far down the page and verifies
that the popover contents are still visible
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("Popover Visibility when app is only a few pixels tall"),
html.P("", style={"height": "2000px"}),
dcc.DatePickerSingle(
id="dps",
date=date(2024, 1, 1),
stay_open_on_select=True,
),
html.P("", style={"height": "2000px"}),
],
)
dash_dcc.start_server(app, debug=True, use_reloader=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dps")
click_everything_in_datepicker("#dps", dash_dcc)
def test_mspv003_popover_contained_within_dash_app(dash_dcc):
"""Test that datepicker popovers are visible and clickable when multiple pickers are present.
This test clicks on each datepicker and selects the first day of the month that appears.
It verifies that the calendar popover is properly positioned and not clipped.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H1(
"Test popover is visible inside an embedded app",
style={"width": "200px"},
),
html.Div(
[
html.H3("DatePicker Popover Visibility Test"),
dcc.DatePickerSingle(id="dps", date=date(2024, 1, 15)),
dcc.DatePickerRange(
id="dpr",
start_date=date(2024, 1, 1),
end_date=date(2024, 1, 15),
stay_open_on_select=True,
),
],
id="react-entry-point",
style={"overflow": "hidden", "display": "inline-flex"},
),
html.Div("This column is outside of embedded app"),
],
style={
"display": "inline-flex",
"minHeight": "600px",
},
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dpr")
# Click everything in the datepicker to verify all elements are accessible
click_everything_in_datepicker("#dpr", dash_dcc)
def test_mspv004_popover_inherits_container_styles(dash_dcc):
"""Test that calendar days inherit font color and size from container.
This test verifies that when a datepicker is placed inside a container with
specific font styles (color and size), the calendar days inherit those styles.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.H3("DatePicker Style Inheritance Test"),
html.Div(
[
dcc.DatePickerSingle(id="dps", date=date(2024, 1, 15)),
],
style={"color": "limegreen", "fontSize": "24px"},
),
]
)
dash_dcc.start_server(app, debug=True, use_reloader=False, dev_tools_ui=False)
# Wait for the page to load
dash_dcc.wait_for_element("#dps")
# Click to open the calendar
dps_input = dash_dcc.find_element("#dps")
dps_input.click()
# Wait for calendar to open
dash_dcc.wait_for_element(".dash-datepicker-calendar-container")
# Find a calendar day element (inside date, not outside days)
calendar_day = dash_dcc.find_element(".dash-datepicker-calendar-date-inside")
# Get computed styles
font_size = calendar_day.value_of_css_property("font-size")
# Font size should be 24px
assert font_size == "24px", "Expected calendar day to inherit its font size"
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/misc/test_popover_visibility.py",
"license": "MIT License",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/sliders/test_marks_density.py | from dash import Dash, dcc, html
def test_slsl_extreme_range_marks_density(dash_dcc):
"""
Test that extreme ranges don't generate too many overlapping marks.
With min=-1, max=480256671, and container width ~365px, we should have
no more than ~7 marks to prevent overlap (given the long labels).
"""
app = Dash(__name__)
app.layout = html.Div(
style={"width": "365px"},
children=[
dcc.RangeSlider(
id="rangeslider-extreme",
min=-1,
max=480256671,
value=[-1, 480256671],
)
],
)
dash_dcc.start_server(app)
# Wait for component to render
dash_dcc.wait_for_element("#rangeslider-extreme")
# Count the rendered marks
marks = dash_dcc.find_elements(".dash-slider-mark")
mark_count = len(marks)
# Get the actual mark text to verify what's rendered
mark_texts = [mark.text for mark in marks]
# Should have between 2 and 7 marks (min/max plus a few intermediate)
assert 2 <= mark_count <= 7, (
f"Expected 2-7 marks for extreme range, but found {mark_count}. "
f"This suggests overlapping marks. Labels: {mark_texts}"
)
# Verify min and max are included
assert "-1" in mark_texts, "Min value (-1) should be included in marks"
assert any(
"480" in text or "M" in text for text in mark_texts
), "Max value should be included in marks"
assert dash_dcc.get_logs() == []
def test_slsl_extreme_range_no_width(dash_dcc):
"""
Test that extreme ranges work even before width is measured.
This simulates the initial render state where sliderWidth is null.
"""
app = Dash(__name__)
app.layout = html.Div(
# No explicit width, so ResizeObserver will measure it
children=[
dcc.RangeSlider(
id="rangeslider-no-width",
min=-1,
max=480256671,
value=[-1, 480256671],
)
],
)
dash_dcc.start_server(app)
# Wait for component to render
dash_dcc.wait_for_element("#rangeslider-no-width")
# Count the rendered marks
marks = dash_dcc.find_elements(".dash-slider-mark")
mark_count = len(marks)
assert mark_count <= 11, f"Expected default 11 marks, but found {mark_count}"
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/sliders/test_marks_density.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/sliders/test_sliders_keyboard_input.py | from dash import Dash, Input, Output, dcc, html
from selenium.webdriver.common.keys import Keys
def test_slkb001_input_constrained_by_min_max(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(
id="slider",
min=1,
max=20,
value=5,
),
html.Div(id="value"),
html.Div(id="drag_value"),
]
)
@app.callback(Output("value", "children"), [Input("slider", "value")])
def update_output(value):
return f"value is {value}"
@app.callback(Output("drag_value", "children"), [Input("slider", "drag_value")])
def update_drag_value(value):
return f"drag_value is {value}"
dash_dcc.start_server(app)
dash_dcc.driver.set_window_size(800, 600)
dash_dcc.wait_for_text_to_equal("#value", "value is 5")
inpt = dash_dcc.find_element("#slider .dash-range-slider-max-input")
inpt.send_keys(Keys.BACKSPACE, 4, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 4")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is 4")
# cannot enter a value greater than `max`
inpt.send_keys(Keys.BACKSPACE, 42, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 20")
# cannot enter a value less than `min`
inpt.send_keys(Keys.ARROW_LEFT, Keys.ARROW_LEFT, "-", Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 1")
# cannot enter a value less than `min`
inpt.send_keys(5, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 15")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is 15")
assert dash_dcc.get_logs() == []
def test_slkb002_range_input_constrained_by_min_max(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.RangeSlider(
id="slider",
min=1,
max=20,
value=[5, 7],
),
html.Div(id="value"),
html.Div(id="drag_value"),
]
)
@app.callback(Output("value", "children"), [Input("slider", "value")])
def update_output(value):
return f"value is {value}"
@app.callback(Output("drag_value", "children"), [Input("slider", "drag_value")])
def update_drag_value(value):
return f"drag_value is {value}"
dash_dcc.start_server(app)
dash_dcc.driver.set_window_size(800, 600)
dash_dcc.wait_for_text_to_equal("#value", "value is [5, 7]")
min_inpt = dash_dcc.find_element("#slider .dash-range-slider-min-input")
max_inpt = dash_dcc.find_element("#slider .dash-range-slider-max-input")
max_inpt.send_keys(Keys.BACKSPACE, 8, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [5, 8]")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is [5, 8]")
min_inpt.send_keys(Keys.BACKSPACE, 4, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [4, 8]")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is [4, 8]")
# cannot enter a value greater than `max`
max_inpt.send_keys(Keys.BACKSPACE, 42, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [4, 20]")
# cannot enter a value less than `min`
min_inpt.send_keys(Keys.ARROW_LEFT, Keys.ARROW_LEFT, "-", Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [1, 20]")
# cannot enter a value less than `min`
max_inpt.send_keys(5, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [1, 5]")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is [1, 5]")
# cannot enter a value less than `min`
min_inpt.send_keys(Keys.BACKSPACE, 7, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [5, 5]")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is [7, 5]")
assert dash_dcc.get_logs() == []
def test_slkb003_input_constrained_by_step(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(
id="slider",
min=-20,
max=20,
step=5,
value=5,
),
html.Div(id="value"),
html.Div(id="drag_value"),
]
)
@app.callback(Output("value", "children"), [Input("slider", "value")])
def update_output(value):
return f"value is {value}"
@app.callback(Output("drag_value", "children"), [Input("slider", "drag_value")])
def update_drag_value(value):
return f"drag_value is {value}"
dash_dcc.start_server(app)
dash_dcc.driver.set_window_size(800, 600)
dash_dcc.wait_for_text_to_equal("#value", "value is 5")
inpt = dash_dcc.find_element("#slider .dash-range-slider-max-input")
inpt.send_keys(Keys.BACKSPACE, -15, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is -15")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is -15")
inpt.send_keys(Keys.BACKSPACE, 4, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is -15")
inpt.send_keys(Keys.BACKSPACE, 2, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is -10")
inpt.send_keys(Keys.BACKSPACE, Keys.BACKSPACE, Keys.BACKSPACE, 2, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 0")
inpt.send_keys(20, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 20")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is 20")
assert dash_dcc.get_logs() == []
def test_slkb004_range_input_constrained_by_step(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.RangeSlider(
id="slider",
min=-20,
max=20,
step=5,
value=[-5, 5],
),
html.Div(id="value"),
html.Div(id="drag_value"),
]
)
@app.callback(Output("value", "children"), [Input("slider", "value")])
def update_output(value):
return f"value is {value}"
@app.callback(Output("drag_value", "children"), [Input("slider", "drag_value")])
def update_drag_value(value):
return f"drag_value is {value}"
dash_dcc.start_server(app)
dash_dcc.driver.set_window_size(800, 600)
dash_dcc.wait_for_text_to_equal("#value", "value is [-5, 5]")
min_inpt = dash_dcc.find_element("#slider .dash-range-slider-min-input")
max_inpt = dash_dcc.find_element("#slider .dash-range-slider-max-input")
max_inpt.send_keys(Keys.BACKSPACE, 19, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [-5, 20]")
min_inpt.send_keys(Keys.BACKSPACE, Keys.BACKSPACE, -14, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is [-15, 20]")
assert dash_dcc.get_logs() == []
def test_slkb005_input_decimals_precision(dash_dcc):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(
id="slider",
min=-20.5,
max=20.5,
step=0.01,
value=5,
),
html.Div(id="value"),
html.Div(id="drag_value"),
]
)
@app.callback(Output("value", "children"), [Input("slider", "value")])
def update_output(value):
return f"value is {value}"
@app.callback(Output("drag_value", "children"), [Input("slider", "drag_value")])
def update_drag_value(value):
return f"drag_value is {value}"
dash_dcc.start_server(app)
dash_dcc.driver.set_window_size(800, 600)
dash_dcc.wait_for_text_to_equal("#value", "value is 5")
inpt = dash_dcc.find_element("#slider .dash-range-slider-max-input")
# value should respect the slider's `step` prop
inpt.send_keys(Keys.BACKSPACE, 3.14159, Keys.TAB)
dash_dcc.wait_for_text_to_equal("#value", "value is 3.14")
dash_dcc.wait_for_text_to_equal("#drag_value", "drag_value is 3.14")
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/sliders/test_sliders_keyboard_input.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/sliders/test_sliders_step.py | import pytest
from dash import Dash, Input, Output, dcc, html
from humanfriendly import parse_size
test_cases = [
{"step": 2, "min": 0, "max": 10, "value": 6},
{"step": 3, "min": 0, "max": 100, "value": 33},
{"step": 0.05, "min": 0, "max": 1, "value": 0.5},
{"step": 1_000_000, "min": 1e9, "max": 1e10, "value": 1e10},
]
def slider_value_divisible_by_step(slider_args, slider_value) -> bool:
if type(slider_value) is str:
slider_value = float(slider_value.split()[-1])
if slider_value == slider_args["min"] or slider_value == slider_args["max"]:
return True
step = slider_args["step"]
remainder = slider_value % step
# For float equality, we check if the remainder is close to 0 or close to step
return remainder < 1e-10 or abs(remainder - step) < 1e-10
@pytest.mark.parametrize("test_case", test_cases)
def test_slst001_step_params(dash_dcc, test_case):
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Slider(id="slider", **test_case),
html.Div(id="out"),
]
)
@app.callback(Output("out", "children"), [Input("slider", "value")])
def update_output(value):
return f"{value}"
dash_dcc.start_server(app)
dash_dcc.driver.set_window_size(800, 600)
slider = dash_dcc.find_element("#slider")
marks = dash_dcc.find_elements(".dash-slider-mark")
# Expect to find some amount of marks in between the first and last mark
assert len(marks) > 2
# Every mark must be divisible by the given `step`.
for mark in marks:
value = parse_size(mark.text)
assert slider_value_divisible_by_step(test_case, value)
# Perform multiple clicks along the slider track. After every click, the
# resulting slider value must be divisible by the step
i = 0
while i < 1:
dash_dcc.click_at_coord_fractions(slider, i, 0.25)
value = dash_dcc.find_element("#out").text
assert slider_value_divisible_by_step(test_case, value)
i += 0.05
assert dash_dcc.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/sliders/test_sliders_step.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:components/dash-core-components/tests/integration/upload/test_folder_upload.py | from dash import Dash, Input, Output, dcc, html
def test_upfd001_folder_upload_with_enable_folder_selection(dash_dcc):
"""
Test that folder upload is enabled when enable_folder_selection=True.
Note: Full end-to-end testing of folder upload functionality is limited
by Selenium's capabilities. This test verifies the component renders
correctly with enable_folder_selection=True which enables folder support.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Folder Upload Test", id="title"),
dcc.Upload(
id="upload-folder",
children=html.Div(["Drag and Drop or ", html.A("Select Folder")]),
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
},
multiple=True,
enable_folder_selection=True, # Enables folder selection
accept=".txt,.csv", # Test accept filtering
),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
[Input("upload-folder", "contents")],
)
def update_output(contents_list):
if contents_list is not None:
return html.Div(f"Uploaded {len(contents_list)} file(s)", id="file-count")
return html.Div("No files uploaded")
dash_dcc.start_server(app)
# Verify the component renders
dash_dcc.wait_for_text_to_equal("#title", "Folder Upload Test")
# Verify the upload component and input are present
dash_dcc.wait_for_element("#upload-folder")
# Verify the input has folder selection attributes when enable_folder_selection=True
upload_input = dash_dcc.wait_for_element("#upload-folder input[type=file]")
webkitdir_attr = upload_input.get_attribute("webkitdirectory")
assert webkitdir_attr == "true", (
f"webkitdirectory attribute should be 'true' when enable_folder_selection=True, "
f"but got '{webkitdir_attr}'"
)
assert dash_dcc.get_logs() == [], "browser console should contain no error"
def test_upfd002_multiple_files_without_folder_selection(dash_dcc):
"""
Test that multiple file upload does NOT enable folder selection
when enable_folder_selection=False (default).
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Multiple Files Test", id="title"),
dcc.Upload(
id="upload-multiple",
children=html.Div(
["Drag and Drop or ", html.A("Select Multiple Files")]
),
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
},
multiple=True, # Allows multiple files
enable_folder_selection=False, # But NOT folder selection
accept=".txt,.csv", # Accept should work with file picker
),
html.Div(id="output", children="Upload ready"),
]
)
dash_dcc.start_server(app)
# Wait for the component to render
dash_dcc.wait_for_text_to_equal("#title", "Multiple Files Test")
dash_dcc.wait_for_text_to_equal("#output", "Upload ready")
# Verify the input does NOT have folder selection attributes
upload_input = dash_dcc.wait_for_element("#upload-multiple input[type=file]")
webkitdir_attr = upload_input.get_attribute("webkitdirectory")
# webkitdirectory should not be set when enable_folder_selection=False
assert webkitdir_attr in [None, "", "false"], (
f"webkitdirectory attribute should not be 'true' when enable_folder_selection=False, "
f"but got '{webkitdir_attr}'"
)
# Verify multiple attribute is set
multiple_attr = upload_input.get_attribute("multiple")
assert multiple_attr == "true", (
f"multiple attribute should be 'true' when multiple=True, "
f"but got '{multiple_attr}'"
)
assert dash_dcc.get_logs() == [], "browser console should contain no error"
def test_upfd003_single_file_upload(dash_dcc):
"""
Test that single file upload does NOT enable folder selection.
"""
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Single File Test", id="title"),
dcc.Upload(
id="upload-single",
children=html.Div(["Drag and Drop or ", html.A("Select File")]),
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
},
multiple=False, # Single file only
accept="application/pdf",
),
html.Div(id="output", children="Upload ready"),
]
)
dash_dcc.start_server(app)
# Wait for the component to render
dash_dcc.wait_for_text_to_equal("#title", "Single File Test")
dash_dcc.wait_for_text_to_equal("#output", "Upload ready")
# Verify the input does NOT have folder selection attributes when multiple=False
upload_input = dash_dcc.wait_for_element("#upload-single input[type=file]")
webkitdir_attr = upload_input.get_attribute("webkitdirectory")
# webkitdirectory should not be set when multiple=False
assert webkitdir_attr in [None, "", "false"], (
f"webkitdirectory attribute should not be 'true' when multiple=False, "
f"but got '{webkitdir_attr}'"
)
assert dash_dcc.get_logs() == [], "browser console should contain no error"
| {
"repo_id": "plotly/dash",
"file_path": "components/dash-core-components/tests/integration/upload/test_folder_upload.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/unit/test_health_endpoint_unit.py | """
Tests for the health endpoint.
Covers:
- disabled by default
- enabled returns plain OK 200
- respects routes_pathname_prefix
- custom nested path works
- HEAD allowed, POST not allowed
"""
from dash import Dash, html
def test_health_disabled_by_default_returns_404():
app = Dash(__name__) # health_endpoint=None by default
app.layout = html.Div("Test")
client = app.server.test_client()
r = client.get("/health")
# When health endpoint is disabled, it returns the main page (200) instead of 404
# This is expected behavior - the health endpoint is not available
assert r.status_code == 200
# Should return HTML content, not "OK"
assert b"OK" not in r.data
def test_health_enabled_returns_ok_200_plain_text():
app = Dash(__name__, health_endpoint="health")
app.layout = html.Div("Test")
client = app.server.test_client()
r = client.get("/health")
assert r.status_code == 200
assert r.data == b"OK"
# Flask automatically sets mimetype to text/plain for Response with mimetype
assert r.mimetype == "text/plain"
def test_health_respects_routes_pathname_prefix():
app = Dash(__name__, routes_pathname_prefix="/x/", health_endpoint="health")
app.layout = html.Div("Test")
client = app.server.test_client()
ok = client.get("/x/health")
miss = client.get("/health")
assert ok.status_code == 200 and ok.data == b"OK"
assert miss.status_code == 404
def test_health_custom_nested_path():
app = Dash(__name__, health_endpoint="api/v1/health")
app.layout = html.Div("Test")
client = app.server.test_client()
r = client.get("/api/v1/health")
assert r.status_code == 200
assert r.data == b"OK"
def test_health_head_allowed_and_post_405():
app = Dash(__name__, health_endpoint="health")
app.layout = html.Div("Test")
client = app.server.test_client()
head = client.head("/health")
assert head.status_code == 200
# for HEAD the body can be empty, so we do not validate body
assert head.mimetype == "text/plain"
post = client.post("/health")
assert post.status_code == 405
| {
"repo_id": "plotly/dash",
"file_path": "tests/unit/test_health_endpoint_unit.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:dash/_plotly_cli.py | import sys
def cli():
try:
from plotly_cloud.cli import main # pylint: disable=import-outside-toplevel
main()
except ImportError:
print(
"Plotly cloud is not installed,"
" install it with `pip install dash[cloud]` to use the plotly command",
file=sys.stderr,
)
sys.exit(-1)
| {
"repo_id": "plotly/dash",
"file_path": "dash/_plotly_cli.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
plotly/dash:tests/integration/callbacks/test_api_callback.py | from dash import (
Dash,
Input,
Output,
html,
ctx,
)
import requests
import json
from flask import jsonify
test_string = (
'{"step_0": "Data fetched - 1", "step_1": "Data fetched - 1", "step_2": "Data fetched - 1", '
'"step_3": "Data fetched - 1", "step_4": "Data fetched - 1"}'
)
def test_apib001_api_callback(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("Slow Callback", id="slow-btn"),
html.Div(id="slow-output"),
]
)
def get_data(n_clicks):
# Simulate an async data fetch
return f"Data fetched - {n_clicks}"
@app.callback(
Output("slow-output", "children"),
Input("slow-btn", "n_clicks"),
prevent_initial_call=True,
api_endpoint="/api/slow_callback", # Example API path for the slow callback
)
def slow_callback(n_clicks):
data = {}
for i in range(5):
data[f"step_{i}"] = get_data(n_clicks)
ret = f"{json.dumps(data)}"
if ctx:
return ret
return jsonify(ret)
app.setup_apis()
dash_duo.start_server(app)
dash_duo.wait_for_element("#slow-btn").click()
dash_duo.wait_for_text_to_equal("#slow-output", test_string)
r = requests.post(
dash_duo.server_url + "/api/slow_callback",
json={"n_clicks": 1},
headers={"Content-Type": "application/json"},
)
assert r.status_code == 200
assert r.json() == test_string
| {
"repo_id": "plotly/dash",
"file_path": "tests/integration/callbacks/test_api_callback.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/integration/clientside/test_clientside_multiple_output_return_single_no_update.py | from dash import (
Dash,
Input,
Output,
html,
clientside_callback,
)
def test_cmorsnu001_clientside_multiple_output_return_single_no_update(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("trigger", id="trigger-demo"),
html.Div("demo1", id="output-demo1"),
html.Div("demo2", id="output-demo2"),
],
style={"padding": 50},
)
clientside_callback(
"""(n_clicks) => {
try {
return window.dash_clientside.no_update;
} catch (e) {
return [null, null];
}
}""",
Output("output-demo1", "children"),
Output("output-demo2", "children"),
Input("trigger-demo", "n_clicks"),
prevent_initial_call=True,
)
dash_duo.start_server(app)
trigger_clicker = dash_duo.wait_for_element("#trigger-demo")
trigger_clicker.click()
dash_duo.wait_for_text_to_equal(
"#output-demo1",
"demo1",
)
dash_duo.wait_for_text_to_equal(
"#output-demo2",
"demo2",
)
| {
"repo_id": "plotly/dash",
"file_path": "tests/integration/clientside/test_clientside_multiple_output_return_single_no_update.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/integration/test_clientside_patch.py | import json
import flaky
from selenium.webdriver.common.keys import Keys
from dash import Dash, html, dcc, Input, Output, State
from dash.testing.wait import until
@flaky.flaky(max_runs=3)
def test_pch_cs001_patch_operations_clientside(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div([dcc.Input(id="set-value"), html.Button("Set", id="set-btn")]),
html.Div(
[dcc.Input(id="append-value"), html.Button("Append", id="append-btn")]
),
html.Div(
[
dcc.Input(id="prepend-value"),
html.Button("prepend", id="prepend-btn"),
]
),
html.Div(
[
dcc.Input(id="insert-value"),
dcc.Input(id="insert-index", type="number", value=1),
html.Button("insert", id="insert-btn"),
]
),
html.Div(
[dcc.Input(id="extend-value"), html.Button("extend", id="extend-btn")]
),
html.Div(
[dcc.Input(id="merge-value"), html.Button("Merge", id="merge-btn")]
),
html.Button("Delete", id="delete-btn"),
html.Button("Delete index", id="delete-index"),
html.Button("Clear", id="clear-btn"),
html.Button("Reverse", id="reverse-btn"),
html.Button("Remove", id="remove-btn"),
dcc.Store(
data={
"value": "unset",
"n_clicks": 0,
"array": ["initial"],
"delete": "Delete me",
},
id="store",
),
html.Div(id="store-content"),
]
)
app.clientside_callback(
"function a(value) {return JSON.stringify(value)}",
Output("store-content", "children"),
Input("store", "data"),
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
return patch
.assign(["value"], value)
.add(["n_clicks"], 1)
.build();
}
""",
Output("store", "data"),
Input("set-btn", "n_clicks"),
State("set-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
return patch
.append(["array"], value)
.add(["n_clicks"], 1)
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("append-btn", "n_clicks"),
State("append-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
return patch
.prepend(["array"], value)
.add(["n_clicks"], 1)
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("prepend-btn", "n_clicks"),
State("prepend-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
return patch
.extend(["array"], [value])
.add(["n_clicks"], 1)
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("extend-btn", "n_clicks"),
State("extend-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
return patch
.merge([], {merged: value})
.add(["n_clicks"], 1)
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("merge-btn", "n_clicks"),
State("merge-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
return patch
.delete(["delete"])
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("delete-btn", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value, index) {
const patch = new dash_clientside.Patch
return patch
.insert(["array"], index, value)
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("insert-btn", "n_clicks"),
State("insert-value", "value"),
State("insert-index", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
return patch
.delete(["array", 1])
.delete(["array", -2])
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("delete-index", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
return patch
.clear(["array"])
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("clear-btn", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
return patch
.reverse(["array"])
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("reverse-btn", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
return patch
.remove(["array"], "initial")
.build();
}
""",
Output("store", "data", allow_duplicate=True),
Input("remove-btn", "n_clicks"),
prevent_initial_call=True,
)
dash_duo.start_server(app)
assert dash_duo.get_logs() == []
def get_output():
e = dash_duo.find_element("#store-content")
return json.loads(e.text)
_input = dash_duo.find_element("#set-value")
_input.send_keys("Set Value")
dash_duo.find_element("#set-btn").click()
until(lambda: get_output().get("value") == "Set Value", 2)
_input = dash_duo.find_element("#append-value")
_input.send_keys("Append")
dash_duo.find_element("#append-btn").click()
until(lambda: get_output().get("array") == ["initial", "Append"], 2)
_input = dash_duo.find_element("#prepend-value")
_input.send_keys("Prepend")
dash_duo.find_element("#prepend-btn").click()
until(lambda: get_output().get("array") == ["Prepend", "initial", "Append"], 2)
_input = dash_duo.find_element("#extend-value")
_input.send_keys("Extend")
dash_duo.find_element("#extend-btn").click()
until(
lambda: get_output().get("array") == ["Prepend", "initial", "Append", "Extend"],
2,
)
undef = object()
until(lambda: get_output().get("merged", undef) is undef, 2)
_input = dash_duo.find_element("#merge-value")
_input.send_keys("Merged")
dash_duo.find_element("#merge-btn").click()
until(lambda: get_output().get("merged") == "Merged", 2)
until(lambda: get_output().get("delete") == "Delete me", 2)
dash_duo.find_element("#delete-btn").click()
until(lambda: get_output().get("delete", undef) is undef, 2)
_input = dash_duo.find_element("#insert-value")
_input.send_keys("Inserted")
dash_duo.find_element("#insert-btn").click()
until(
lambda: get_output().get("array")
== [
"Prepend",
"Inserted",
"initial",
"Append",
"Extend",
],
2,
)
_input.send_keys(" with negative index")
_input = dash_duo.find_element("#insert-index")
_input.send_keys(Keys.BACKSPACE)
_input.send_keys("-1")
dash_duo.find_element("#insert-btn").click()
until(
lambda: get_output().get("array")
== [
"Prepend",
"Inserted",
"initial",
"Append",
"Inserted with negative index",
"Extend",
],
2,
)
dash_duo.find_element("#delete-index").click()
until(
lambda: get_output().get("array")
== [
"Prepend",
"initial",
"Append",
"Extend",
],
2,
)
dash_duo.find_element("#reverse-btn").click()
until(
lambda: get_output().get("array")
== [
"Extend",
"Append",
"initial",
"Prepend",
],
2,
)
dash_duo.find_element("#remove-btn").click()
until(
lambda: get_output().get("array")
== [
"Extend",
"Append",
"Prepend",
],
2,
)
dash_duo.find_element("#clear-btn").click()
until(lambda: get_output()["array"] == [], 2)
@flaky.flaky(max_runs=3)
def test_pch_cs002_patch_operations_set_props(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div([dcc.Input(id="set-value"), html.Button("Set", id="set-btn")]),
html.Div(
[dcc.Input(id="append-value"), html.Button("Append", id="append-btn")]
),
html.Div(
[
dcc.Input(id="prepend-value"),
html.Button("prepend", id="prepend-btn"),
]
),
html.Div(
[
dcc.Input(id="insert-value"),
dcc.Input(id="insert-index", type="number", value=1),
html.Button("insert", id="insert-btn"),
]
),
html.Div(
[dcc.Input(id="extend-value"), html.Button("extend", id="extend-btn")]
),
html.Div(
[dcc.Input(id="merge-value"), html.Button("Merge", id="merge-btn")]
),
html.Button("Delete", id="delete-btn"),
html.Button("Delete index", id="delete-index"),
html.Button("Clear", id="clear-btn"),
html.Button("Reverse", id="reverse-btn"),
html.Button("Remove", id="remove-btn"),
dcc.Store(
data={
"value": "unset",
"n_clicks": 0,
"array": ["initial"],
"delete": "Delete me",
},
id="store",
),
html.Div(id="store-content"),
]
)
app.clientside_callback(
"function a(value) {return JSON.stringify(value)}",
Output("store-content", "children"),
Input("store", "data"),
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.assign(["value"], value)
.add(["n_clicks"], 1)
.build()});
}
""",
Input("set-btn", "n_clicks"),
State("set-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.append(["array"], value)
.add(["n_clicks"], 1)
.build()});
}
""",
Input("append-btn", "n_clicks"),
State("append-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.prepend(["array"], value)
.add(["n_clicks"], 1)
.build()});
}
""",
Input("prepend-btn", "n_clicks"),
State("prepend-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.extend(["array"], [value])
.add(["n_clicks"], 1)
.build()});
}
""",
Input("extend-btn", "n_clicks"),
State("extend-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.merge([], {merged: value})
.add(["n_clicks"], 1)
.build()});
}
""",
Input("merge-btn", "n_clicks"),
State("merge-value", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.delete(["delete"])
.build()});
}
""",
Input("delete-btn", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks, value, index) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.insert(["array"], index, value)
.build()});
}
""",
Input("insert-btn", "n_clicks"),
State("insert-value", "value"),
State("insert-index", "value"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.delete(["array", 1])
.delete(["array", -2])
.build()});
}
""",
Input("delete-index", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.clear(["array"])
.build()});
}
""",
Input("clear-btn", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.reverse(["array"])
.build()});
}
""",
Input("reverse-btn", "n_clicks"),
prevent_initial_call=True,
)
app.clientside_callback(
"""
function a(n_clicks) {
const patch = new dash_clientside.Patch
dash_clientside.set_props('store', {data: patch
.remove(["array"], "initial")
.build()});
}
""",
Input("remove-btn", "n_clicks"),
prevent_initial_call=True,
)
dash_duo.start_server(app)
assert dash_duo.get_logs() == []
def get_output():
e = dash_duo.find_element("#store-content")
return json.loads(e.text)
_input = dash_duo.find_element("#set-value")
_input.send_keys("Set Value")
dash_duo.find_element("#set-btn").click()
until(lambda: get_output().get("value") == "Set Value", 2)
_input = dash_duo.find_element("#append-value")
_input.send_keys("Append")
dash_duo.find_element("#append-btn").click()
until(lambda: get_output().get("array") == ["initial", "Append"], 2)
_input = dash_duo.find_element("#prepend-value")
_input.send_keys("Prepend")
dash_duo.find_element("#prepend-btn").click()
until(lambda: get_output().get("array") == ["Prepend", "initial", "Append"], 2)
_input = dash_duo.find_element("#extend-value")
_input.send_keys("Extend")
dash_duo.find_element("#extend-btn").click()
until(
lambda: get_output().get("array") == ["Prepend", "initial", "Append", "Extend"],
2,
)
undef = object()
until(lambda: get_output().get("merged", undef) is undef, 2)
_input = dash_duo.find_element("#merge-value")
_input.send_keys("Merged")
dash_duo.find_element("#merge-btn").click()
until(lambda: get_output().get("merged") == "Merged", 2)
until(lambda: get_output().get("delete") == "Delete me", 2)
dash_duo.find_element("#delete-btn").click()
until(lambda: get_output().get("delete", undef) is undef, 2)
_input = dash_duo.find_element("#insert-value")
_input.send_keys("Inserted")
dash_duo.find_element("#insert-btn").click()
until(
lambda: get_output().get("array")
== [
"Prepend",
"Inserted",
"initial",
"Append",
"Extend",
],
2,
)
_input.send_keys(" with negative index")
_input = dash_duo.find_element("#insert-index")
_input.send_keys(Keys.BACKSPACE)
_input.send_keys("-1")
dash_duo.find_element("#insert-btn").click()
until(
lambda: get_output().get("array")
== [
"Prepend",
"Inserted",
"initial",
"Append",
"Inserted with negative index",
"Extend",
],
2,
)
dash_duo.find_element("#delete-index").click()
until(
lambda: get_output().get("array")
== [
"Prepend",
"initial",
"Append",
"Extend",
],
2,
)
dash_duo.find_element("#reverse-btn").click()
until(
lambda: get_output().get("array")
== [
"Extend",
"Append",
"initial",
"Prepend",
],
2,
)
dash_duo.find_element("#remove-btn").click()
until(
lambda: get_output().get("array")
== [
"Extend",
"Append",
"Prepend",
],
2,
)
dash_duo.find_element("#clear-btn").click()
until(lambda: get_output()["array"] == [], 2)
| {
"repo_id": "plotly/dash",
"file_path": "tests/integration/test_clientside_patch.py",
"license": "MIT License",
"lines": 602,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/async_tests/app1_async.py | import time
from dash import Dash, Input, Output, dcc, html
from .utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="input", value="initial value"),
html.Div(html.Div([1.5, None, "string", html.Div(id="output-1")])),
]
)
@app.callback(
Output("output-1", "children"),
[Input("input", "value")],
interval=500,
manager=background_callback_manager,
background=True,
)
async def update_output(value):
time.sleep(0.1)
return value
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/async_tests/app1_async.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/async_tests/app_arbitrary_async.py | import time
from dash import Dash, Input, Output, html, callback, set_props
from .utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button("start", id="start"),
html.Div(id="secondary"),
html.Div(id="no-output"),
html.Div("initial", id="output"),
html.Button("start-no-output", id="start-no-output"),
]
)
@callback(
Output("output", "children"),
Input("start", "n_clicks"),
prevent_initial_call=True,
background=True,
interval=500,
)
async def on_click(_):
set_props("secondary", {"children": "first"})
set_props("secondary", {"style": {"background": "red"}})
time.sleep(2)
set_props("secondary", {"children": "second"})
return "completed"
@callback(
Input("start-no-output", "n_clicks"),
prevent_initial_call=True,
background=True,
)
async def on_click2(_):
set_props("no-output", {"children": "started"})
time.sleep(2)
set_props("no-output", {"children": "completed"})
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/async_tests/app_arbitrary_async.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/async_tests/test_async_background_callbacks.py | import sys
import time
from multiprocessing import Lock
import pytest
from flaky import flaky
from tests.utils import is_dash_async
from .utils import setup_background_callback_app
def test_001ab_arbitrary(dash_duo, manager):
if not is_dash_async():
return
with setup_background_callback_app(manager, "app_arbitrary_async") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", "initial")
# pause for sync
time.sleep(0.2)
dash_duo.find_element("#start").click()
dash_duo.wait_for_text_to_equal("#secondary", "first")
dash_duo.wait_for_style_to_equal(
"#secondary", "background-color", "rgba(255, 0, 0, 1)"
)
dash_duo.wait_for_text_to_equal("#output", "initial")
dash_duo.wait_for_text_to_equal("#secondary", "second")
dash_duo.wait_for_text_to_equal("#output", "completed")
dash_duo.find_element("#start-no-output").click()
dash_duo.wait_for_text_to_equal("#no-output", "started")
dash_duo.wait_for_text_to_equal("#no-output", "completed")
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_002ab_basic(dash_duo, manager):
"""
Make sure that we settle to the correct final value when handling rapid inputs
"""
if not is_dash_async():
return
lock = Lock()
with setup_background_callback_app(manager, "app1_async") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output-1", "initial value", 15)
input_ = dash_duo.find_element("#input")
# pause for sync
time.sleep(0.2)
dash_duo.clear_input(input_)
for key in "hello world":
with lock:
input_.send_keys(key)
dash_duo.wait_for_text_to_equal("#output-1", "hello world", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/async_tests/test_async_background_callbacks.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/async_tests/test_async_callbacks.py | import json
import time
import flaky
from multiprocessing import Lock, Value
import pytest
import numpy as np
from dash_test_components import (
AsyncComponent,
CollapseComponent,
DelayedEventComponent,
FragmentComponent,
)
from dash import (
Dash,
Input,
Output,
State,
html,
dcc,
dash_table,
no_update,
)
from dash.exceptions import PreventUpdate
from tests.integration.utils import json_engine
from tests.utils import is_dash_async
@flaky.flaky(max_runs=3)
def test_async_cbsc001_simple_callback(dash_duo):
if not is_dash_async():
return
lock = Lock()
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="input", value="initial value"),
html.Div(html.Div([1.5, None, "string", html.Div(id="output-1")])),
]
)
call_count = Value("i", 0)
@app.callback(Output("output-1", "children"), [Input("input", "value")])
async def update_output(value):
with lock:
call_count.value = call_count.value + 1
return value
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output-1", "initial value")
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "hello world":
with lock:
input_.send_keys(key)
dash_duo.wait_for_text_to_equal("#output-1", "hello world")
assert call_count.value == 2 + len("hello world"), "initial count + each key stroke"
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
def test_async_cbsc002_callbacks_generating_children(dash_duo):
"""Modify the DOM tree by adding new components in the callbacks."""
if not is_dash_async():
return
# some components don't exist in the initial render
app = Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div(
[dcc.Input(id="input", value="initial value"), html.Div(id="output")]
)
@app.callback(Output("output", "children"), [Input("input", "value")])
async def pad_output(_):
return html.Div(
[
dcc.Input(id="sub-input-1", value="sub input initial value"),
html.Div(id="sub-output-1"),
]
)
call_count = Value("i", 0)
@app.callback(Output("sub-output-1", "children"), [Input("sub-input-1", "value")])
async def update_input(value):
call_count.value += 1
return value
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#sub-output-1", "sub input initial value")
assert call_count.value == 1, "called once at initial stage"
pad_input = dash_duo.dash_innerhtml_dom.select_one("#output input")
pad_div = dash_duo.dash_innerhtml_dom.select_one("#output #sub-output-1")
assert (
pad_input.attrs["value"] == "sub input initial value"
and pad_input.attrs["id"] == "sub-input-1"
)
assert pad_input.name == "input"
assert (
pad_div.text == pad_input.attrs["value"] and pad_div.get("id") == "sub-output-1"
), "the sub-output-1 content reflects to sub-input-1 value"
paths = dash_duo.redux_state_paths
assert paths["objs"] == {}
assert paths["strs"] == {
"input": ["components", "props", "children", 0],
"output": ["components", "props", "children", 1],
"sub-input-1": [
"components",
"props",
"children",
1,
"props",
"children",
"props",
"children",
0,
],
"sub-output-1": [
"components",
"props",
"children",
1,
"props",
"children",
"props",
"children",
1,
],
}, "the paths should include these new output IDs"
# editing the input should modify the sub output
dash_duo.find_element("#sub-input-1").send_keys("deadbeef")
# the total updates is initial one + the text input changes
dash_duo.wait_for_text_to_equal(
"#sub-output-1", pad_input.attrs["value"] + "deadbeef"
)
assert not dash_duo.redux_state_is_loading, "loadingMap is empty"
assert dash_duo.get_logs() == [], "console is clean"
def test_async_cbsc003_callback_with_unloaded_async_component(dash_duo):
if not is_dash_async():
return
app = Dash()
app.layout = html.Div(
children=[
dcc.Tabs(
children=[
dcc.Tab(
children=[
html.Button(id="btn", children="Update Input"),
html.Div(id="output", children=["Hello"]),
]
),
dcc.Tab(children=dash_table.DataTable(id="other-table")),
]
)
]
)
@app.callback(Output("output", "children"), [Input("btn", "n_clicks")])
async def update_out(n_clicks):
if n_clicks is None:
raise PreventUpdate
return "Bye"
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", "Hello")
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#output", "Bye")
assert dash_duo.get_logs() == []
def test_async_cbsc004_callback_using_unloaded_async_component(dash_duo):
if not is_dash_async():
return
app = Dash()
app.layout = html.Div(
[
dcc.Tabs(
[
dcc.Tab("boo!"),
dcc.Tab(
dash_table.DataTable(
id="table",
columns=[{"id": "a", "name": "A"}],
data=[{"a": "b"}],
)
),
]
),
html.Button("Update Input", id="btn"),
html.Div("Hello", id="output"),
html.Div(id="output2"),
]
)
@app.callback(
Output("output", "children"),
[Input("btn", "n_clicks")],
[State("table", "data")],
)
async def update_out(n_clicks, data):
return json.dumps(data) + " - " + str(n_clicks)
@app.callback(
Output("output2", "children"),
[Input("btn", "n_clicks")],
[State("table", "derived_viewport_data")],
)
async def update_out2(n_clicks, data):
return json.dumps(data) + " - " + str(n_clicks)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", '[{"a": "b"}] - None')
dash_duo.wait_for_text_to_equal("#output2", "null - None")
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#output", '[{"a": "b"}] - 1')
dash_duo.wait_for_text_to_equal("#output2", "null - 1")
dash_duo.find_element(".tab:not(.tab--selected)").click()
dash_duo.wait_for_text_to_equal("#table th", "A")
# table props are in state so no change yet
dash_duo.wait_for_text_to_equal("#output2", "null - 1")
# repeat a few times, since one of the failure modes I saw during dev was
# intermittent - but predictably so?
for i in range(2, 10):
expected = '[{"a": "b"}] - ' + str(i)
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#output", expected)
# now derived props are available
dash_duo.wait_for_text_to_equal("#output2", expected)
assert dash_duo.get_logs() == []
@pytest.mark.parametrize("engine", ["json", "orjson"])
def test_async_cbsc005_children_types(dash_duo, engine):
if not is_dash_async():
return
with json_engine(engine):
app = Dash()
app.layout = html.Div([html.Button(id="btn"), html.Div("init", id="out")])
outputs = [
[None, ""],
["a string", "a string"],
[123, "123"],
[123.45, "123.45"],
[[6, 7, 8], "678"],
[["a", "list", "of", "strings"], "alistofstrings"],
[["strings", 2, "numbers"], "strings2numbers"],
[["a string", html.Div("and a div")], "a string\nand a div"],
]
@app.callback(Output("out", "children"), [Input("btn", "n_clicks")])
async def set_children(n):
if n is None or n > len(outputs):
return no_update
return outputs[n - 1][0]
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "init")
for _, text in outputs:
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#out", text)
@pytest.mark.parametrize("engine", ["json", "orjson"])
def test_async_cbsc006_array_of_objects(dash_duo, engine):
if not is_dash_async():
return
with json_engine(engine):
app = Dash()
app.layout = html.Div(
[html.Button(id="btn"), dcc.Dropdown(id="dd"), html.Div(id="out")]
)
@app.callback(Output("dd", "options"), [Input("btn", "n_clicks")])
async def set_options(n):
return [{"label": f"opt{i}", "value": i} for i in range(n or 0)]
@app.callback(Output("out", "children"), [Input("dd", "options")])
async def set_out(opts):
print(repr(opts))
return len(opts)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", "0")
for i in range(5):
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#out", str(i + 1))
dash_duo.select_dcc_dropdown("#dd", f"opt{i}")
@pytest.mark.parametrize("refresh", [False, True])
def test_async_cbsc007_parallel_updates(refresh, dash_duo):
# This is a funny case, that seems to mostly happen with dcc.Location
# but in principle could happen in other cases too:
# A callback chain (in this case the initial hydration) is set to update a
# value, but after that callback is queued and before it returns, that value
# is also set explicitly from the front end (in this case Location.pathname,
# which gets set in its componentDidMount during the render process, and
# callbacks are delayed until after rendering is finished because of the
# async table)
# At one point in the wildcard PR #1103, changing from requestQueue to
# pendingCallbacks, calling PreventUpdate in the callback would also skip
# any callbacks that depend on pathname, despite the new front-end-provided
# value.
if not is_dash_async():
return
app = Dash()
app.layout = html.Div(
[
dcc.Location(id="loc", refresh=refresh),
html.Button("Update path", id="btn"),
dash_table.DataTable(id="t", columns=[{"name": "a", "id": "a"}]),
html.Div(id="out"),
]
)
@app.callback(Output("t", "data"), [Input("loc", "pathname")])
async def set_data(path):
return [{"a": (path or repr(path)) + ":a"}]
@app.callback(
Output("out", "children"), [Input("loc", "pathname"), Input("t", "data")]
)
async def set_out(path, data):
return json.dumps(data) + " - " + (path or repr(path))
@app.callback(Output("loc", "pathname"), [Input("btn", "n_clicks")])
async def set_path(n):
if not n:
raise PreventUpdate
return f"/{n}"
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out", '[{"a": "/:a"}] - /')
dash_duo.find_element("#btn").click()
# the refresh=True case here is testing that we really do get the right
# pathname, not the prevented default value from the layout.
dash_duo.wait_for_text_to_equal("#out", '[{"a": "/1:a"}] - /1')
if not refresh:
dash_duo.find_element("#btn").click()
dash_duo.wait_for_text_to_equal("#out", '[{"a": "/2:a"}] - /2')
@flaky.flaky(max_runs=3)
def test_async_cbsc008_wildcard_prop_callbacks(dash_duo):
if not is_dash_async():
return
lock = Lock()
app = Dash(__name__)
app.layout = html.Div(
[
dcc.Input(id="input", value="initial value", debounce=False),
html.Div(
html.Div(
[
1.5,
None,
"string",
html.Div(
id="output-1",
**{"data-cb": "initial value", "aria-cb": "initial value"},
),
]
)
),
]
)
input_call_count = Value("i", 0)
percy_enabled = Value("b", False)
@app.callback(Output("output-1", "data-cb"), [Input("input", "value")])
async def update_data(value):
with lock:
if not percy_enabled.value:
input_call_count.value += 1
return value
@app.callback(Output("output-1", "children"), [Input("output-1", "data-cb")])
async def update_text(data):
return data
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output-1", "initial value")
assert (
dash_duo.find_element("#output-1").get_attribute("data-cb") == "initial value"
)
input1 = dash_duo.find_element("#input")
dash_duo.clear_input(input1)
for key in "hello world":
with lock:
input1.send_keys(key)
time.sleep(0.05) # allow some time for debounced callback to be sent
dash_duo.wait_for_text_to_equal("#output-1", "hello world")
assert dash_duo.find_element("#output-1").get_attribute("data-cb") == "hello world"
# an initial call, one for clearing the input
# and one for each hello world character
assert input_call_count.value == 2 + len("hello world")
assert dash_duo.get_logs() == []
def test_async_cbsc009_callback_using_unloaded_async_component_and_graph(dash_duo):
if not is_dash_async():
return
app = Dash(__name__)
app.layout = FragmentComponent(
[
CollapseComponent([AsyncComponent(id="async", value="A")], id="collapse"),
html.Button("n", id="n"),
DelayedEventComponent(id="d"),
html.Div("Output init", id="output"),
]
)
@app.callback(
Output("output", "children"),
Output("collapse", "display"),
Input("n", "n_clicks"),
Input("d", "n_clicks"),
Input("async", "value"),
)
async def content(n, d, v):
return json.dumps([n, d, v]), (n or 0) > 1
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", '[null, null, "A"]')
dash_duo.wait_for_element("#d").click()
dash_duo.wait_for_text_to_equal("#output", '[null, 1, "A"]')
dash_duo.wait_for_element("#n").click()
dash_duo.wait_for_text_to_equal("#output", '[1, 1, "A"]')
dash_duo.wait_for_element("#d").click()
dash_duo.wait_for_text_to_equal("#output", '[1, 2, "A"]')
dash_duo.wait_for_no_elements("#async")
dash_duo.wait_for_element("#n").click()
dash_duo.wait_for_text_to_equal("#output", '[2, 2, "A"]')
dash_duo.wait_for_text_to_equal("#async", "A")
assert dash_duo.get_logs() == []
def test_async_cbsc010_event_properties(dash_duo):
if not is_dash_async():
return
app = Dash(__name__)
app.layout = html.Div([html.Button("Click Me", id="button"), html.Div(id="output")])
call_count = Value("i", 0)
@app.callback(Output("output", "children"), [Input("button", "n_clicks")])
async def update_output(n_clicks):
if not n_clicks:
raise PreventUpdate
call_count.value += 1
return "Click"
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output", "")
assert call_count.value == 0
dash_duo.find_element("#button").click()
dash_duo.wait_for_text_to_equal("#output", "Click")
assert call_count.value == 1
def test_async_cbsc011_one_call_for_multiple_outputs_initial(dash_duo):
if not is_dash_async():
return
app = Dash(__name__)
call_count = Value("i", 0)
app.layout = html.Div(
[
html.Div(
[dcc.Input(value=f"Input {i}", id=f"input-{i}") for i in range(10)]
),
html.Div(id="container"),
dcc.RadioItems(),
]
)
@app.callback(
Output("container", "children"),
[Input(f"input-{i}", "value") for i in range(10)],
)
async def dynamic_output(*args):
call_count.value += 1
return json.dumps(args)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#input-9", "Input 9")
dash_duo.wait_for_contains_text("#container", "Input 9")
assert call_count.value == 1
inputs = [f'"Input {i}"' for i in range(10)]
expected = f'[{", ".join(inputs)}]'
dash_duo.wait_for_text_to_equal("#container", expected)
assert dash_duo.get_logs() == []
def test_async_cbsc012_one_call_for_multiple_outputs_update(dash_duo):
if not is_dash_async():
return
app = Dash(__name__, suppress_callback_exceptions=True)
call_count = Value("i", 0)
app.layout = html.Div(
[
html.Button(id="display-content", children="Display Content"),
html.Div(id="container"),
dcc.RadioItems(),
]
)
@app.callback(Output("container", "children"), Input("display-content", "n_clicks"))
async def display_output(n_clicks):
if not n_clicks:
return ""
return html.Div(
[
html.Div(
[dcc.Input(value=f"Input {i}", id=f"input-{i}") for i in range(10)]
),
html.Div(id="dynamic-output"),
]
)
@app.callback(
Output("dynamic-output", "children"),
[Input(f"input-{i}", "value") for i in range(10)],
)
async def dynamic_output(*args):
call_count.value += 1
return json.dumps(args)
dash_duo.start_server(app)
dash_duo.find_element("#display-content").click()
dash_duo.wait_for_text_to_equal("#input-9", "Input 9")
### order altered from the original, as these are non-blocking callbacks now
inputs = [f'"Input {i}"' for i in range(10)]
expected = f'[{", ".join(inputs)}]'
dash_duo.wait_for_text_to_equal("#dynamic-output", expected)
assert call_count.value == 1
assert dash_duo.get_logs() == []
def test_async_cbsc013_multi_output_out_of_order(dash_duo):
if not is_dash_async():
return
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("Click", id="input", n_clicks=0),
html.Div(id="output1"),
html.Div(id="output2"),
]
)
call_count = Value("i", 0)
lock = Lock()
@app.callback(
Output("output1", "children"),
Output("output2", "children"),
Input("input", "n_clicks"),
)
async def update_output(n_clicks):
call_count.value += 1
if n_clicks == 1:
with lock:
pass
return n_clicks, n_clicks + 1
dash_duo.start_server(app)
button = dash_duo.find_element("#input")
with lock:
button.click()
button.click()
dash_duo.wait_for_text_to_equal("#output1", "2")
dash_duo.wait_for_text_to_equal("#output2", "3")
assert call_count.value == 3
assert dash_duo.driver.execute_script("return !window.store.getState().isLoading;")
assert dash_duo.get_logs() == []
def test_async_cbsc014_multiple_properties_update_at_same_time_on_same_component(
dash_duo,
):
if not is_dash_async():
return
call_count = Value("i", 0)
timestamp_1 = Value("d", -5)
timestamp_2 = Value("d", -5)
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(id="container"),
html.Button("Click 1", id="button-1", n_clicks=0, n_clicks_timestamp=-1),
html.Button("Click 2", id="button-2", n_clicks=0, n_clicks_timestamp=-1),
]
)
@app.callback(
Output("container", "children"),
Input("button-1", "n_clicks"),
Input("button-1", "n_clicks_timestamp"),
Input("button-2", "n_clicks"),
Input("button-2", "n_clicks_timestamp"),
)
async def update_output(n1, t1, n2, t2):
call_count.value += 1
timestamp_1.value = t1
timestamp_2.value = t2
return f"{n1}, {n2}"
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#container", "0, 0")
assert timestamp_1.value == -1
assert timestamp_2.value == -1
assert call_count.value == 1
dash_duo.find_element("#button-1").click()
dash_duo.wait_for_text_to_equal("#container", "1, 0")
assert timestamp_1.value > ((time.time() - (24 * 60 * 60)) * 1000)
assert timestamp_2.value == -1
assert call_count.value == 2
prev_timestamp_1 = timestamp_1.value
dash_duo.find_element("#button-2").click()
dash_duo.wait_for_text_to_equal("#container", "1, 1")
assert timestamp_1.value == prev_timestamp_1
assert timestamp_2.value > ((time.time() - 24 * 60 * 60) * 1000)
assert call_count.value == 3
prev_timestamp_2 = timestamp_2.value
dash_duo.find_element("#button-2").click()
dash_duo.wait_for_text_to_equal("#container", "1, 2")
assert timestamp_1.value == prev_timestamp_1
assert timestamp_2.value > prev_timestamp_2
assert timestamp_2.value > timestamp_1.value
assert call_count.value == 4
def test_async_cbsc016_extra_components_callback(dash_duo):
if not is_dash_async():
return
lock = Lock()
app = Dash(__name__)
# pylint: disable=protected-access
app._extra_components.append(dcc.Store(id="extra-store", data=123))
app.layout = html.Div(
[
dcc.Input(id="input", value="initial value"),
html.Div(html.Div([1.5, None, "string", html.Div(id="output-1")])),
]
)
store_data = Value("i", 0)
@app.callback(
Output("output-1", "children"),
[Input("input", "value"), Input("extra-store", "data")],
)
async def update_output(value, data):
with lock:
store_data.value = data
return value
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output-1", "initial value")
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
input_.send_keys("A")
dash_duo.wait_for_text_to_equal("#output-1", "A")
assert store_data.value == 123
assert dash_duo.get_logs() == []
def test_async_cbsc018_callback_ndarray_output(dash_duo):
if not is_dash_async():
return
app = Dash(__name__)
app.layout = html.Div([dcc.Store(id="output"), html.Button("click", id="clicker")])
@app.callback(
Output("output", "data"),
Input("clicker", "n_clicks"),
)
async def on_click(_):
return np.array([[1, 2, 3], [4, 5, 6]], np.int32)
dash_duo.start_server(app)
assert dash_duo.get_logs() == []
def test_async_cbsc019_callback_running(dash_duo):
if not is_dash_async():
return
lock = Lock()
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("off", id="running"),
html.Button("start", id="start"),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
Input("start", "n_clicks"),
running=[[Output("running", "children"), html.B("on", id="content"), "off"]],
prevent_initial_call=True,
)
async def on_click(_):
with lock:
pass
return "done"
dash_duo.start_server(app)
with lock:
dash_duo.find_element("#start").click()
dash_duo.wait_for_text_to_equal("#content", "on")
dash_duo.wait_for_text_to_equal("#output", "done")
dash_duo.wait_for_text_to_equal("#running", "off")
def test_async_cbsc020_callback_running_non_existing_component(dash_duo):
if not is_dash_async():
return
lock = Lock()
app = Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div(
[
html.Button("start", id="start"),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
Input("start", "n_clicks"),
running=[
[
Output("non_existent_component", "children"),
html.B("on", id="content"),
"off",
]
],
prevent_initial_call=True,
)
async def on_click(_):
with lock:
pass
return "done"
dash_duo.start_server(app)
with lock:
dash_duo.find_element("#start").click()
dash_duo.wait_for_text_to_equal("#output", "done")
def test_async_cbsc021_callback_running_non_existing_component(dash_duo):
if not is_dash_async():
return
lock = Lock()
app = Dash(__name__)
app.layout = html.Div(
[
html.Button("start", id="start"),
html.Div(id="output"),
]
)
@app.callback(
Output("output", "children"),
Input("start", "n_clicks"),
running=[
[
Output("non_existent_component", "children"),
html.B("on", id="content"),
"off",
]
],
prevent_initial_call=True,
)
async def on_click(_):
with lock:
pass
return "done"
dash_duo.start_server(
app,
debug=True,
use_reloader=False,
use_debugger=True,
dev_tools_hot_reload=False,
)
with lock:
dash_duo.find_element("#start").click()
dash_duo.wait_for_text_to_equal("#output", "done")
error_title = "ID running component not found in layout"
error_message = [
"Component defined in running keyword not found in layout.",
'Component id: "non_existent_component"',
"This ID was used in the callback(s) for Output(s):",
"output.children",
"You can suppress this exception by setting",
"`suppress_callback_exceptions=True`.",
]
# The error should show twice, once for trying to set running on and once for
# turning it off.
dash_duo.wait_for_text_to_equal(dash_duo.devtools_error_count_locator, "2")
for error in dash_duo.find_elements(".dash-fe-error__title"):
assert error.text == error_title
for error_text in dash_duo.find_elements(".dash-backend-error"):
assert all(line in error_text for line in error_message)
| {
"repo_id": "plotly/dash",
"file_path": "tests/async_tests/test_async_callbacks.py",
"license": "MIT License",
"lines": 719,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/utils.py | import os
import sys
import shutil
import subprocess
import tempfile
import time
from contextlib import contextmanager
import psutil
import redis
from dash.background_callback import DiskcacheManager
manager = None
class TestDiskCacheManager(DiskcacheManager):
def __init__(self, cache=None, cache_by=None, expire=None):
super().__init__(cache=cache, cache_by=cache_by, expire=expire)
self.running_jobs = []
def call_job_fn(
self,
key,
job_fn,
args,
context,
):
pid = super().call_job_fn(key, job_fn, args, context)
self.running_jobs.append(pid)
return pid
def get_background_callback_manager():
"""
Get the long callback mangaer configured by environment variables
"""
if os.environ.get("LONG_CALLBACK_MANAGER", None) == "celery":
from dash.background_callback import CeleryManager
from celery import Celery
import redis
celery_app = Celery(
__name__,
broker=os.environ.get("CELERY_BROKER"),
backend=os.environ.get("CELERY_BACKEND"),
)
background_callback_manager = CeleryManager(celery_app)
redis_conn = redis.Redis(host="localhost", port=6379, db=1)
background_callback_manager.test_lock = redis_conn.lock("test-lock")
elif os.environ.get("LONG_CALLBACK_MANAGER", None) == "diskcache":
import diskcache
cache = diskcache.Cache(os.environ.get("DISKCACHE_DIR"))
background_callback_manager = TestDiskCacheManager(cache)
background_callback_manager.test_lock = diskcache.Lock(cache, "test-lock")
else:
raise ValueError(
"Invalid long callback manager specified as LONG_CALLBACK_MANAGER "
"environment variable"
)
global manager
manager = background_callback_manager
return background_callback_manager
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
@contextmanager
def setup_background_callback_app(manager_name, app_name):
from dash.testing.application_runners import import_app
if manager_name == "celery":
os.environ["LONG_CALLBACK_MANAGER"] = "celery"
redis_url = os.environ["REDIS_URL"].rstrip("/")
os.environ["CELERY_BROKER"] = f"{redis_url}/0"
os.environ["CELERY_BACKEND"] = f"{redis_url}/1"
# Clear redis of cached values
redis_conn = redis.Redis(host="localhost", port=6379, db=1)
cache_keys = redis_conn.keys()
if cache_keys:
redis_conn.delete(*cache_keys)
worker = subprocess.Popen(
[
sys.executable,
"-m",
"celery",
"-A",
f"tests.background_callback.{app_name}:handle",
"worker",
"-P",
"prefork",
"--concurrency",
"2",
"--loglevel=info",
],
encoding="utf8",
preexec_fn=os.setpgrp,
stderr=subprocess.PIPE,
)
# Wait for the worker to be ready, if you cancel before it is ready, the job
# will still be queued.
lines = []
for line in iter(worker.stderr.readline, ""):
if "ready" in line:
break
lines.append(line)
else:
error = "\n".join(lines)
error += f"\nPath: {sys.path}"
raise RuntimeError(f"celery failed to start: {error}")
try:
yield import_app(f"tests.background_callback.{app_name}")
finally:
# Interval may run one more time after settling on final app state
# Sleep for 1 interval of time
time.sleep(0.5)
os.environ.pop("LONG_CALLBACK_MANAGER")
os.environ.pop("CELERY_BROKER")
os.environ.pop("CELERY_BACKEND")
kill(worker.pid)
from dash import page_registry
page_registry.clear()
elif manager_name == "diskcache":
os.environ["LONG_CALLBACK_MANAGER"] = "diskcache"
cache_directory = tempfile.mkdtemp(prefix="lc-diskcache-")
print(cache_directory)
os.environ["DISKCACHE_DIR"] = cache_directory
try:
app = import_app(f"tests.background_callback.{app_name}")
yield app
finally:
# Interval may run one more time after settling on final app state
# Sleep for a couple of intervals
time.sleep(2.0)
if hasattr(manager, "running_jobs"):
for job in manager.running_jobs:
manager.terminate_job(job)
time.sleep(1)
shutil.rmtree(cache_directory, ignore_errors=True)
os.environ.pop("LONG_CALLBACK_MANAGER")
os.environ.pop("DISKCACHE_DIR")
from dash import page_registry
page_registry.clear()
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/utils.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/compliance/test_typing.py | import os
import shlex
import subprocess
import sys
import json
import sysconfig
import pytest
component_template = """
from dash_generator_test_component_typescript import TypeScriptComponent
t = TypeScriptComponent({0})
"""
basic_app_template = """
from dash import Dash, html, dcc, callback, Input, Output
app = Dash()
{0}
app.layout = {1}
@callback(Output("out", "children"), Input("btn", "n_clicks"))
def on_click() -> html.Div:
return {2}
"""
valid_layout = """html.Div([
html.H2('Valid'),
'String in middle',
123,
404.4,
dcc.Input(value='', id='in')
])
"""
valid_layout_list = """[
html.H2('Valid'),
'String in middle',
123,
404.4,
dcc.Input(value='', id='in')
]
"""
valid_layout_function = """
def layout() -> html.Div:
return html.Div(["hello layout"])
"""
invalid_layout = """html.Div([
{"invalid": "dictionary in children"}
])
"""
# There is not invalid layout for function & list as explicitly typed as Any to avoid special cases.
valid_callback = "html.Div('Valid')"
invalid_callback = "[]"
def run_module(codefile: str, module: str, extra: str = ""):
config_file_to_cleanup = None
# For pyright, create a pyrightconfig.json to help it find installed packages
# and adjust the command to use relative path
if module == "pyright":
config_dir = os.path.dirname(codefile)
config_file = os.path.join(config_dir, "pyrightconfig.json")
# For editable installs, we need to find the actual source location
# The test component is installed as an editable package
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
# Get the site-packages directory for standard packages
site_packages = sysconfig.get_path("purelib")
# Check if dash is installed as editable or regular install
# If editable, we need project root first; if regular, site-packages first
import dash
dash_file = dash.__file__
is_editable = project_root in dash_file
if is_editable:
# Editable install: prioritize project root
extra_paths = [project_root, site_packages]
else:
# Regular install (CI): prioritize site-packages
extra_paths = [site_packages, project_root]
# Add the test component source directories
# They are in the @plotly subdirectory of the project root
test_components_dir = os.path.join(project_root, "@plotly")
if os.path.exists(test_components_dir):
for component in os.listdir(test_components_dir):
component_path = os.path.join(test_components_dir, component)
if os.path.isdir(component_path):
extra_paths.append(component_path)
# For files in /tmp (component tests), we need a different approach
# Include the directory containing the test file
test_file_dir = os.path.dirname(codefile)
config = {
"pythonVersion": f"{sys.version_info.major}.{sys.version_info.minor}",
"pythonPlatform": sys.platform,
"executionEnvironments": [
{"root": project_root, "extraPaths": extra_paths},
{"root": test_file_dir, "extraPaths": extra_paths},
],
}
# Write config to project root instead of test directory
config_file = os.path.join(project_root, "pyrightconfig.json")
config_file_to_cleanup = config_file # Store for cleanup later
with open(config_file, "w") as f:
json.dump(config, f)
# Run pyright from project root with absolute path to test file
codefile_arg = codefile
cwd = project_root
else:
codefile_arg = codefile
cwd = None
cmd = shlex.split(
f"{sys.executable} -m {module} {codefile_arg}{extra}",
posix=sys.platform != "win32",
comments=True,
)
env = os.environ.copy()
# For mypy, set MYPYPATH to help it find editable installs
# Note: mypy doesn't want site-packages in MYPYPATH
if module == "mypy":
project_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
test_components_dir = os.path.join(project_root, "@plotly")
mypy_paths = [project_root]
if os.path.exists(test_components_dir):
for component in os.listdir(test_components_dir):
component_path = os.path.join(test_components_dir, component)
if os.path.isdir(component_path):
mypy_paths.append(component_path)
env["MYPYPATH"] = os.pathsep.join(mypy_paths)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
)
out, err = proc.communicate()
# Cleanup pyrightconfig.json if we created it
if config_file_to_cleanup and os.path.exists(config_file_to_cleanup):
try:
os.remove(config_file_to_cleanup)
except OSError:
pass # Ignore cleanup errors
return out.decode(), err.decode(), proc.poll()
def assert_output(
codefile: str,
code: str,
expected_outputs=tuple(),
expected_errors=tuple(),
expected_status=0,
module="pyright",
):
output, error, status = run_module(codefile, module)
assert (
status == expected_status
), f"Status: {status}\nOutput: {output}\nError: {error}\nCode: {code}"
for ex_out in expected_outputs:
assert ex_out in output, f"Invalid output:\n {output}\n\nCode: {code}"
def format_template_and_save(template, filename, *args):
formatted = template.format(*args)
with open(filename, "w") as f:
f.write(formatted)
return formatted
def expect(status=None, outputs=None, modular=False):
data = {}
if status is not None:
data["expected_status"] = status
if outputs is not None:
data["expected_outputs"] = outputs
if modular:
# The expectations are per module.
data["modular"] = modular
return data
@pytest.fixture()
def change_dir():
original_dir = os.getcwd()
def change(dirname):
os.chdir(dirname)
yield change
os.chdir(original_dir)
@pytest.mark.parametrize(
"arguments, assertions",
[
(
"a_string=4",
{
"expected_status": 1,
"expected_outputs": [
'Argument of type "Literal[4]" cannot be assigned to parameter "a_string" of type "str | None"'
],
},
),
(
"a_string='FooBar'",
{
"expected_status": 0,
},
),
(
"a_number=''",
{
"expected_status": 1,
"expected_outputs": [
'Argument of type "Literal[\'\']" cannot be assigned to parameter "a_number" ',
'"__float__" is not present',
'"__int__" is not present',
'"__complex__" is not present',
],
},
),
(
"a_number=0",
{
"expected_status": 0,
},
),
(
"a_number=2.2",
{
"expected_status": 0,
},
),
(
"a_bool=4",
{
"expected_status": 1,
},
),
(
"a_bool=True",
{
"expected_status": 0,
},
),
(
"array_string={}",
{
"expected_status": 1,
"expected_outputs": [
'Argument of type "dict[Any, Any]" cannot be assigned to parameter "array_string" '
'of type "Sequence[str] | None"'
],
},
),
(
"array_string=[]",
{
"expected_status": 0,
},
),
(
"array_string=[1,2,4]",
{
"expected_status": 1,
},
),
(
"array_number=[1,2]",
{
"expected_status": 0,
},
),
(
"array_number=['not','a', 'number']",
{
"expected_status": 1,
},
),
(
"array_obj=[{'a': 'b'}]",
{
"expected_status": 0,
},
),
(
"array_obj=[1]",
{
"expected_status": 1,
},
),
(
"array_obj=[1, {}]",
{
"expected_status": 1,
},
),
(
"union='Union'",
{
"expected_status": 0,
},
),
(
"union=1",
{
"expected_status": 0,
},
),
(
"union=0.42",
{
"expected_status": 0,
},
),
(
"union=[]",
{
"expected_status": 1,
},
),
(
"element=[]",
{
"expected_status": 0,
},
),
(
"element=[TypeScriptComponent()]",
{
"expected_status": 0,
},
),
(
"element=TypeScriptComponent()",
{
"expected_status": 0,
},
),
pytest.param(
"element=set()",
{
"expected_status": 1,
},
marks=pytest.mark.skip(reason="Ignoring element=set() test case"),
),
(
"a_tuple=(1,2)",
{
"expected_status": 1,
"expected_outputs": [
'Argument of type "tuple[Literal[1], Literal[2]]" cannot be assigned '
'to parameter "a_tuple" of type "Tuple[NumberType, str] | None'
],
},
),
(
"a_tuple=(1, 'tuple')",
{
"expected_status": 0,
},
),
(
"obj=set()",
{
"expected_status": 1,
},
),
(
"obj={}",
{
"expected_status": 1,
"expected_outputs": [
'"dict[Any, Any]" cannot be assigned to parameter "obj" of type "Obj | None"'
],
},
),
(
"obj={'value': 'a', 'label': 1}",
{
"expected_status": 1,
"expected_outputs": [
'"dict[str, str | int]" cannot be assigned to parameter "obj" of type "Obj | None"'
],
},
),
(
"obj={'value': 'a', 'label': 'lab'}",
{
"expected_status": 0,
},
),
],
)
def test_typi001_component_typing(arguments, assertions, tmp_path):
codefile = os.path.join(tmp_path, "code.py")
code = format_template_and_save(component_template, codefile, arguments)
assert_output(codefile, code, module="pyright", **assertions)
typing_modules = ["pyright"]
if sys.version_info.minor >= 10:
typing_modules.append("mypy")
@pytest.mark.parametrize("typing_module", typing_modules)
@pytest.mark.parametrize(
"prelayout, layout, callback_return, assertions",
[
("", valid_layout, valid_callback, expect(status=0)),
("", valid_layout_list, valid_callback, expect(status=0)),
(valid_layout_function, "layout", valid_callback, expect(status=0)),
("", valid_layout, invalid_callback, expect(status=1)),
("", invalid_layout, valid_callback, expect(status=1)),
],
)
def test_typi002_typing_compliance(
typing_module, prelayout, layout, callback_return, assertions, tmp_path
):
codefile = os.path.join(tmp_path, "code.py")
code = format_template_and_save(
basic_app_template, codefile, prelayout, layout, callback_return
)
assert_output(codefile, code, module=typing_module, **assertions)
| {
"repo_id": "plotly/dash",
"file_path": "tests/compliance/test_typing.py",
"license": "MIT License",
"lines": 402,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/async_tests/utils.py | # pylint: disable=import-outside-toplevel,global-statement,subprocess-popen-preexec-fn,W0201
import os
import shutil
import subprocess
import tempfile
import time
from contextlib import contextmanager
import psutil
import redis
from dash.background_callback import DiskcacheManager
manager = None
class TestDiskCacheManager(DiskcacheManager):
def __init__(self, cache=None, cache_by=None, expire=None):
super().__init__(cache=cache, cache_by=cache_by, expire=expire)
self.running_jobs = []
def call_job_fn(
self,
key,
job_fn,
args,
context,
):
pid = super().call_job_fn(key, job_fn, args, context)
self.running_jobs.append(pid)
return pid
def get_background_callback_manager():
"""
Get the long callback mangaer configured by environment variables
"""
if os.environ.get("LONG_CALLBACK_MANAGER", None) == "celery":
from dash.background_callback import CeleryManager
from celery import Celery
celery_app = Celery(
__name__,
broker=os.environ.get("CELERY_BROKER"),
backend=os.environ.get("CELERY_BACKEND"),
)
background_callback_manager = CeleryManager(celery_app)
redis_conn = redis.Redis(host="localhost", port=6379, db=1)
background_callback_manager.test_lock = redis_conn.lock("test-lock")
elif os.environ.get("LONG_CALLBACK_MANAGER", None) == "diskcache":
import diskcache
cache = diskcache.Cache(os.environ.get("DISKCACHE_DIR"))
background_callback_manager = TestDiskCacheManager(cache)
background_callback_manager.test_lock = diskcache.Lock(cache, "test-lock")
else:
raise ValueError(
"Invalid long callback manager specified as LONG_CALLBACK_MANAGER "
"environment variable"
)
global manager
manager = background_callback_manager
return background_callback_manager
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
@contextmanager
def setup_background_callback_app(manager_name, app_name):
from dash.testing.application_runners import import_app
if manager_name == "celery":
os.environ["LONG_CALLBACK_MANAGER"] = "celery"
redis_url = os.environ["REDIS_URL"].rstrip("/")
os.environ["CELERY_BROKER"] = f"{redis_url}/0"
os.environ["CELERY_BACKEND"] = f"{redis_url}/1"
# Clear redis of cached values
redis_conn = redis.Redis(host="localhost", port=6379, db=1)
cache_keys = redis_conn.keys()
if cache_keys:
redis_conn.delete(*cache_keys)
worker = subprocess.Popen(
[
"celery",
"-A",
f"tests.async_tests.{app_name}:handle",
"worker",
"-P",
"prefork",
"--concurrency",
"2",
"--loglevel=info",
],
encoding="utf8",
preexec_fn=os.setpgrp,
stderr=subprocess.PIPE,
)
# Wait for the worker to be ready, if you cancel before it is ready, the job
# will still be queued.
lines = []
for line in iter(worker.stderr.readline, ""):
if "ready" in line:
break
lines.append(line)
else:
error = "\n".join(lines)
raise RuntimeError(f"celery failed to start: {error}")
try:
yield import_app(f"tests.async_tests.{app_name}")
finally:
# Interval may run one more time after settling on final app state
# Sleep for 1 interval of time
time.sleep(0.5)
os.environ.pop("LONG_CALLBACK_MANAGER")
os.environ.pop("CELERY_BROKER")
os.environ.pop("CELERY_BACKEND")
kill(worker.pid)
from dash import page_registry
page_registry.clear()
elif manager_name == "diskcache":
os.environ["LONG_CALLBACK_MANAGER"] = "diskcache"
cache_directory = tempfile.mkdtemp(prefix="lc-diskcache-")
print(cache_directory)
os.environ["DISKCACHE_DIR"] = cache_directory
try:
app = import_app(f"tests.async_tests.{app_name}")
yield app
finally:
# Interval may run one more time after settling on final app state
# Sleep for a couple of intervals
time.sleep(2.0)
if hasattr(manager, "running_jobs"):
for job in manager.running_jobs:
manager.terminate_job(job)
shutil.rmtree(cache_directory, ignore_errors=True)
os.environ.pop("LONG_CALLBACK_MANAGER")
os.environ.pop("DISKCACHE_DIR")
from dash import page_registry
page_registry.clear()
| {
"repo_id": "plotly/dash",
"file_path": "tests/async_tests/utils.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_ctx_cookies.py | from dash import Dash, Input, Output, html, callback, ctx
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button("set-cookies", id="set-cookies"),
html.Button("use-cookies", id="use-cookies"),
html.Div(id="intermediate"),
html.Div("output", id="output"),
]
)
app.test_lock = lock = background_callback_manager.test_lock
@callback(
Output("intermediate", "children"),
Input("set-cookies", "n_clicks"),
prevent_initial_call=True,
)
def set_cookies(_):
ctx.response.set_cookie("bg-cookie", "cookie-value")
return "ok"
@callback(
Output("output", "children"),
Input("use-cookies", "n_clicks"),
prevent_initial_call=True,
background=True,
)
def use_cookies(_):
value = ctx.cookies.get("bg-cookie")
return value
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_ctx_cookies.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_ctx_cookies.py | from tests.background_callback.utils import setup_background_callback_app
def test_lcbc019_ctx_cookies(dash_duo, manager):
with setup_background_callback_app(manager, "app_ctx_cookies") as app:
dash_duo.start_server(app)
dash_duo.find_element("#set-cookies").click()
dash_duo.wait_for_contains_text("#intermediate", "ok")
dash_duo.find_element("#use-cookies").click()
dash_duo.wait_for_contains_text("#output", "cookie-value")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_ctx_cookies.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_arbitrary.py | from dash import Dash, Input, Output, html, callback, set_props
import time
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.test_lock = lock = background_callback_manager.test_lock
app.layout = html.Div(
[
html.Button("start", id="start"),
html.Div(id="secondary"),
html.Div(id="no-output"),
html.Div("initial", id="output"),
html.Button("start-no-output", id="start-no-output"),
]
)
@callback(
Output("output", "children"),
Input("start", "n_clicks"),
prevent_initial_call=True,
background=True,
interval=500,
)
def on_click(_):
set_props("secondary", {"children": "first"})
set_props("secondary", {"style": {"background": "red"}})
time.sleep(2)
set_props("secondary", {"children": "second"})
return "completed"
@callback(
Input("start-no-output", "n_clicks"),
prevent_initial_call=True,
background=True,
)
def on_click(_):
set_props("no-output", {"children": "started"})
time.sleep(2)
set_props("no-output", {"children": "completed"})
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_arbitrary.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_bg_on_error.py | from dash import Dash, Input, Output, html, set_props
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
def global_error_handler(err):
set_props("global-output", {"children": f"global: {err}"})
app = Dash(
__name__,
background_callback_manager=background_callback_manager,
on_error=global_error_handler,
)
app.layout = [
html.Button("callback on_error", id="start-cb-onerror"),
html.Div(id="cb-output"),
html.Button("global on_error", id="start-global-onerror"),
html.Div(id="global-output"),
]
def callback_on_error(err):
set_props("cb-output", {"children": f"callback: {err}"})
@app.callback(
Output("cb-output", "children"),
Input("start-cb-onerror", "n_clicks"),
prevent_initial_call=True,
background=True,
on_error=callback_on_error,
)
def on_click(_):
raise Exception("callback error")
@app.callback(
Output("global-output", "children"),
Input("start-global-onerror", "n_clicks"),
prevent_initial_call=True,
background=True,
)
def on_click_global(_):
raise Exception("global error")
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_bg_on_error.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_callback_ctx.py | import json
from dash import Dash, Input, Output, html, callback, ALL, ctx
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button(id={"type": "run-button", "index": 0}, children="Run 1"),
html.Button(id={"type": "run-button", "index": 1}, children="Run 2"),
html.Button(id={"type": "run-button", "index": 2}, children="Run 3"),
html.Div(id="result", children="No results"),
html.Div(id="running"),
]
)
app.test_lock = lock = background_callback_manager.test_lock
@callback(
Output("result", "children"),
[Input({"type": "run-button", "index": ALL}, "n_clicks")],
background=True,
prevent_initial_call=True,
running=[(Output("running", "children"), "on", "off")],
)
def update_output(n_clicks):
triggered = json.loads(ctx.triggered[0]["prop_id"].split(".")[0])
return json.dumps(dict(triggered=triggered, value=n_clicks[triggered["index"]]))
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_callback_ctx.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_diff_outputs.py | from dash import Dash, Input, Output, html
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button("click 1", id="button-1"),
html.Button("click 2", id="button-2"),
html.Div(id="output-1"),
html.Div(id="output-2"),
]
)
def gen_callback(index):
@app.callback(
Output(f"output-{index}", "children"),
Input(f"button-{index}", "n_clicks"),
background=True,
prevent_initial_call=True,
)
def callback_name(_):
return f"Clicked on {index}"
for i in range(1, 3):
gen_callback(i)
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_diff_outputs.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_error.py | import time
import dash
from dash import html, no_update
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = dash.Dash(__name__, background_callback_manager=background_callback_manager)
app.enable_dev_tools(debug=True, dev_tools_ui=True)
app.layout = html.Div(
[
html.Div([html.P(id="output", children=["Button not clicked"])]),
html.Button(id="button", children="Run Job!"),
html.Div(id="output-status"),
html.Div(id="output1"),
html.Div(id="output2"),
html.Div(id="output3"),
html.Button("multi-output", id="multi-output"),
]
)
app.test_lock = lock = background_callback_manager.test_lock
@app.callback(
output=Output("output", "children"),
inputs=Input("button", "n_clicks"),
running=[
(Output("button", "disabled"), True, False),
],
prevent_initial_call=True,
background=True,
)
def callback(n_clicks):
time.sleep(1)
if n_clicks == 2:
raise Exception("bad error")
if n_clicks == 4:
raise PreventUpdate
return f"Clicked {n_clicks} times"
@app.callback(
output=[Output("output-status", "children")]
+ [Output(f"output{i}", "children") for i in range(1, 4)],
inputs=[Input("multi-output", "n_clicks")],
running=[
(Output("multi-output", "disabled"), True, False),
],
prevent_initial_call=True,
background=True,
)
def long_multi(n_clicks):
time.sleep(1)
return (
[f"Updated: {n_clicks}"]
+ [i for i in range(1, n_clicks + 1)]
+ [no_update for _ in range(n_clicks + 1, 4)]
)
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_error.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_page_cancel.py | from dash import Dash, Input, Output, dcc, html, page_container, register_page
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(
__name__,
use_pages=True,
pages_folder="",
background_callback_manager=background_callback_manager,
)
app.layout = html.Div(
[
dcc.Link("page1", "/"),
dcc.Link("page2", "/2"),
html.Button("Cancel", id="shared_cancel"),
page_container,
]
)
app.test_lock = lock = background_callback_manager.test_lock
register_page(
"one",
"/",
layout=html.Div(
[
html.Button("start", id="start1"),
html.Button("cancel1", id="cancel1"),
html.Div("idle", id="progress1"),
html.Div("initial", id="output1"),
html.Div("no-cancel-btn", id="no-cancel-btn"),
html.Div("no-cancel", id="no-cancel-output"),
]
),
)
register_page(
"two",
"/2",
layout=html.Div(
[
html.Button("start2", id="start2"),
html.Button("cancel2", id="cancel2"),
html.Div("idle", id="progress2"),
html.Div("initial", id="output2"),
]
),
)
@app.callback(
Output("no-cancel-output", "children"),
Input("no-cancel-btn", "n_clicks"),
background=True,
prevent_initial_call=True,
)
def on_click_no_cancel(_):
return "Not Canceled"
@app.callback(
Output("output1", "children"),
Input("start1", "n_clicks"),
running=[
(Output("progress1", "children"), "running", "idle"),
],
cancel=[
Input("cancel1", "n_clicks"),
Input("shared_cancel", "n_clicks"),
],
background=True,
prevent_initial_call=True,
interval=300,
)
def on_click1(n_clicks):
with lock:
pass
return f"Click {n_clicks}"
@app.callback(
Output("output2", "children"),
Input("start2", "n_clicks"),
running=[
(Output("progress2", "children"), "running", "idle"),
],
cancel=[
Input("cancel2", "n_clicks"),
Input("shared_cancel", "n_clicks"),
],
background=True,
prevent_initial_call=True,
interval=300,
)
def on_click1(n_clicks):
with lock:
pass
return f"Click {n_clicks}"
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_page_cancel.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_pattern_matching.py | from dash import Dash, Input, Output, html, callback, ALL
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button(id={"type": "run-button", "index": 0}, children="Run 1"),
html.Button(id={"type": "run-button", "index": 1}, children="Run 2"),
html.Button(id={"type": "run-button", "index": 2}, children="Run 3"),
html.Div(id="result", children="No results"),
]
)
app.test_lock = lock = background_callback_manager.test_lock
@callback(
Output("result", "children"),
[Input({"type": "run-button", "index": ALL}, "n_clicks")],
background=True,
prevent_initial_call=True,
)
def update_output(n_clicks):
found = max(x for x in n_clicks if x is not None)
return f"Clicked '{found}'"
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_pattern_matching.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_progress_delete.py | from dash import Dash, Input, Output, State, html, clientside_callback
import time
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button("Start", id="start"),
html.Div(id="output"),
html.Div(id="progress-output"),
html.Div(0, id="progress-counter"),
]
)
clientside_callback(
"function(_, previous) { return parseInt(previous) + 1;}",
Output("progress-counter", "children"),
Input("progress-output", "children"),
State("progress-counter", "children"),
prevent_initial_call=True,
)
@app.callback(
Output("output", "children"),
Input("start", "n_clicks"),
progress=Output("progress-output", "children"),
interval=200,
background=True,
prevent_initial_call=True,
)
def on_bg_progress(set_progress, _):
set_progress("start")
time.sleep(2)
set_progress("stop")
return "done"
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_progress_delete.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_short_interval.py | from dash import Dash, Input, Output, html, callback
import time
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button(id="run-button", children="Run"),
html.Button(id="cancel-button", children="Cancel"),
html.Div(id="status", children="Finished"),
html.Div(id="result", children="No results"),
]
)
app.test_lock = lock = background_callback_manager.test_lock
@callback(
Output("result", "children"),
[Input("run-button", "n_clicks")],
background=True,
progress=Output("status", "children"),
progress_default="Finished",
cancel=[Input("cancel-button", "n_clicks")],
interval=0,
prevent_initial_call=True,
)
def update_output(set_progress, n_clicks):
for i in range(4):
set_progress(f"Progress {i}/4")
time.sleep(1)
return f"Clicked '{n_clicks}'"
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_short_interval.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_side_update.py | from dash import Dash, Input, Output, html, callback
import time
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Button(id="run-button", children="Run"),
html.Button(id="cancel-button", children="Cancel"),
html.Div(id="status", children="Finished"),
html.Div(id="result", children="No results"),
html.Div(id="side-status"),
]
)
app.test_lock = lock = background_callback_manager.test_lock
@callback(
Output("result", "children"),
[Input("run-button", "n_clicks")],
background=True,
progress=Output("status", "children"),
progress_default="Finished",
cancel=[Input("cancel-button", "n_clicks")],
interval=0,
prevent_initial_call=True,
)
def update_output(set_progress, n_clicks):
for i in range(4):
set_progress(f"Progress {i}/4")
time.sleep(1)
return f"Clicked '{n_clicks}'"
@callback(
Output("side-status", "children"),
[Input("status", "children")],
prevent_initial_call=True,
)
def update_side(progress):
return f"Side {progress}"
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_side_update.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/app_unordered.py | from dash import Dash, Input, Output, dcc, State, html, callback
from tests.background_callback.utils import get_background_callback_manager
background_callback_manager = get_background_callback_manager()
handle = background_callback_manager.handle
app = Dash(__name__, background_callback_manager=background_callback_manager)
app.layout = html.Div(
[
html.Div(id="output"),
html.Button("click", id="click"),
dcc.Store(data="stored", id="stored"),
]
)
@callback(
Output("output", "children"),
State("stored", "data"),
Input("click", "n_clicks"),
background=True,
prevent_initial_call=True,
)
def update_output(stored, n_clicks):
return stored
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/app_unordered.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback001.py | import sys
from multiprocessing import Lock
import pytest
from flaky import flaky
from .utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc001_fast_input(dash_duo, manager):
"""
Make sure that we settle to the correct final value when handling rapid inputs
"""
lock = Lock()
with setup_background_callback_app(manager, "app1") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#output-1", "initial value", 15)
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "hello world":
with lock:
input_.send_keys(key)
dash_duo.wait_for_text_to_equal("#output-1", "hello world", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback001.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback002.py | import sys
import pytest
from flaky import flaky
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc002_long_callback_running(dash_duo, manager):
with setup_background_callback_app(manager, "app2") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#result", "Not clicked", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
# Click button and check that status has changed to "Running"
dash_duo.find_element("#button-1").click()
dash_duo.wait_for_text_to_equal("#status", "Running", 8)
# Wait for calculation to finish, then check that status is "Finished"
dash_duo.wait_for_text_to_equal("#result", "Clicked 1 time(s)", 12)
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
# Click button twice and check that status has changed to "Running"
dash_duo.find_element("#button-1").click()
dash_duo.find_element("#button-1").click()
dash_duo.wait_for_text_to_equal("#status", "Running", 8)
# Wait for calculation to finish, then check that status is "Finished"
dash_duo.wait_for_text_to_equal("#result", "Clicked 3 time(s)", 12)
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback002.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback003.py | import sys
from multiprocessing import Lock
import pytest
from flaky import flaky
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc003_long_callback_running_cancel(dash_duo, manager):
lock = Lock()
with setup_background_callback_app(manager, "app3") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#result", "No results", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 6)
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#result", "Processed 'initial value'", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 6)
# Update input text box
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "hello world":
with lock:
input_.send_keys(key)
# Click run button and check that status has changed to "Running"
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Running", 8)
# Then click Cancel button and make sure that the status changes to finish
# without update result
dash_duo.find_element("#cancel-button").click()
dash_duo.wait_for_text_to_equal("#result", "Processed 'initial value'", 12)
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
# Click run button again, and let it finish
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Running", 8)
dash_duo.wait_for_text_to_equal("#result", "Processed 'hello world'", 8)
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback003.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback004.py | import sys
import pytest
from flaky import flaky
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc004_long_callback_progress(dash_duo, manager):
with setup_background_callback_app(manager, "app4") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result", "No results", 8)
# click run and check that status eventually cycles to 2/4
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
# Then click Cancel button and make sure that the status changes to finish
# without updating result
dash_duo.find_element("#cancel-button").click()
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result", "No results", 8)
# Click run button and allow callback to finish
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result", "Processed 'hello, world'", 8)
# Click run button again with same input.
# without caching, this should rerun callback and display progress
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result", "Processed 'hello, world'", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback004.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback005.py | import sys
from multiprocessing import Lock
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@pytest.mark.skip(reason="Timeout often")
def test_lcbc005_long_callback_caching(dash_duo, manager):
lock = Lock()
with setup_background_callback_app(manager, "app5") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result", "Result for 'AAA'", 8)
# Update input text box to BBB
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "BBB":
with lock:
input_.send_keys(key)
# Click run button and check that status eventually cycles to 2/4
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result", "Result for 'BBB'", 8)
# Update input text box back to AAA
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "AAA":
with lock:
input_.send_keys(key)
# Click run button and this time the cached result is used,
# So we can get the result right away
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result", "Result for 'AAA'", 8)
# Update input text box back to BBB
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "BBB":
with lock:
input_.send_keys(key)
# Click run button and this time the cached result is used,
# So we can get the result right away
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result", "Result for 'BBB'", 8)
# Update input text box back to AAA
input_ = dash_duo.find_element("#input")
dash_duo.clear_input(input_)
for key in "AAA":
with lock:
input_.send_keys(key)
# Change cache key
app._cache_key.value = 1
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result", "Result for 'AAA'", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback005.py",
"license": "MIT License",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback006.py | import sys
from multiprocessing import Lock
import pytest
from flaky import flaky
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc006_long_callback_caching_multi(dash_duo, manager):
lock = Lock()
with setup_background_callback_app(manager, "app6") as app:
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#status1", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#status1", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result1", "Result for 'AAA'", 8)
# Check initial status/output of second long_callback
# prevent_initial_callback=True means no calculation should have run yet
dash_duo.wait_for_text_to_equal("#status2", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result2", "No results", 8)
# Click second run button
dash_duo.find_element("#run-button2").click()
dash_duo.wait_for_text_to_equal("#status2", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#result2", "Result for 'aaa'", 8)
# Update input text box to BBB
input_ = dash_duo.find_element("#input1")
dash_duo.clear_input(input_)
for key in "BBB":
with lock:
input_.send_keys(key)
# Click run button and check that status eventually cycles to 2/4
dash_duo.find_element("#run-button1").click()
dash_duo.wait_for_text_to_equal("#status1", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status1", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result1", "Result for 'BBB'", 8)
# Check there were no changes in second long_callback output
dash_duo.wait_for_text_to_equal("#status2", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result2", "Result for 'aaa'", 8)
# Update input text box back to AAA
input_ = dash_duo.find_element("#input1")
dash_duo.clear_input(input_)
for key in "AAA":
with lock:
input_.send_keys(key)
# Click run button and this time the cached result is used,
# So we can get the result right away
dash_duo.find_element("#run-button1").click()
dash_duo.wait_for_text_to_equal("#status1", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result1", "Result for 'AAA'", 8)
# Update input text box back to BBB
input_ = dash_duo.find_element("#input1")
dash_duo.clear_input(input_)
for key in "BBB":
with lock:
input_.send_keys(key)
# Click run button and this time the cached result is used,
# So we can get the result right away
dash_duo.find_element("#run-button1").click()
dash_duo.wait_for_text_to_equal("#status1", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result1", "Result for 'BBB'", 8)
# Update second input text box to BBB, make sure there is not a cache hit
input_ = dash_duo.find_element("#input2")
dash_duo.clear_input(input_)
for key in "BBB":
with lock:
input_.send_keys(key)
dash_duo.find_element("#run-button2").click()
dash_duo.wait_for_text_to_equal("#status2", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status2", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result2", "Result for 'BBB'", 8)
# Update second input text box back to aaa, check for cache hit
input_ = dash_duo.find_element("#input2")
dash_duo.clear_input(input_)
for key in "aaa":
with lock:
input_.send_keys(key)
dash_duo.find_element("#run-button2").click()
dash_duo.wait_for_text_to_equal("#status2", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result2", "Result for 'aaa'", 8)
# Update input text box back to AAA
input_ = dash_duo.find_element("#input1")
dash_duo.clear_input(input_)
for key in "AAA":
with lock:
input_.send_keys(key)
# Change cache key to cause cache miss
app._cache_key.value = 1
# Check for cache miss for first long_callback
dash_duo.find_element("#run-button1").click()
dash_duo.wait_for_text_to_equal("#status1", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status1", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result1", "Result for 'AAA'", 8)
# Check for cache miss for second long_callback
dash_duo.find_element("#run-button2").click()
dash_duo.wait_for_text_to_equal("#status2", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status2", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result2", "Result for 'aaa'", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback006.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback007.py | import sys
import pytest
from flaky import flaky
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc007_validation_layout(dash_duo, manager):
with setup_background_callback_app(manager, "app7") as app:
dash_duo.start_server(app)
# Show layout
dash_duo.find_element("#show-layout-button").click()
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result", "No results", 8)
# click run and check that status eventually cycles to 2/4
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
# Then click Cancel button and make sure that the status changes to finish
# without updating result
dash_duo.find_element("#cancel-button").click()
dash_duo.wait_for_text_to_equal("#status", "Finished", 8)
dash_duo.wait_for_text_to_equal("#result", "No results", 8)
# Click run button and allow callback to finish
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result", "Processed 'hello, world'", 8)
# Click run button again with same input.
# without caching, this should rerun callback and display progress
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 15)
dash_duo.wait_for_text_to_equal("#status", "Finished", 15)
dash_duo.wait_for_text_to_equal("#result", "Processed 'hello, world'", 8)
assert not dash_duo.redux_state_is_loading
assert dash_duo.get_logs() == []
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback007.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback008.py | import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc008_long_callbacks_error(dash_duo, manager):
with setup_background_callback_app(manager, "app_error") as app:
dash_duo.start_server(
app,
debug=True,
use_reloader=False,
use_debugger=True,
dev_tools_hot_reload=False,
dev_tools_ui=True,
)
clicker = dash_duo.wait_for_element("#button")
def click_n_wait():
clicker.click()
dash_duo.wait_for_element("#button:disabled")
dash_duo.wait_for_element("#button:not([disabled])")
clicker.click()
dash_duo.wait_for_text_to_equal("#output", "Clicked 1 times")
click_n_wait()
dash_duo.wait_for_element(".dash-fe-error__title").click()
dash_duo.driver.switch_to.frame(dash_duo.find_element("iframe"))
assert (
"dash.exceptions.BackgroundCallbackError: "
"An error occurred inside a background callback:"
in dash_duo.wait_for_element(".errormsg").text
)
dash_duo.driver.switch_to.default_content()
click_n_wait()
dash_duo.wait_for_text_to_equal("#output", "Clicked 3 times")
click_n_wait()
dash_duo.wait_for_text_to_equal("#output", "Clicked 3 times")
click_n_wait()
dash_duo.wait_for_text_to_equal("#output", "Clicked 5 times")
def make_expect(n):
return [str(x) for x in range(1, n + 1)] + ["" for _ in range(n + 1, 4)]
multi = dash_duo.wait_for_element("#multi-output")
for i in range(1, 4):
with app.test_lock:
multi.click()
dash_duo.wait_for_element("#multi-output:disabled")
expect = make_expect(i)
dash_duo.wait_for_text_to_equal("#output-status", f"Updated: {i}")
for j, e in enumerate(expect):
assert dash_duo.find_element(f"#output{j + 1}").text == e
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback008.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback009.py | import sys
import time
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc009_short_interval(dash_duo, manager):
with setup_background_callback_app(manager, "app_short_interval") as app:
dash_duo.start_server(app)
dash_duo.find_element("#run-button").click()
dash_duo.wait_for_text_to_equal("#status", "Progress 2/4", 20)
dash_duo.wait_for_text_to_equal("#status", "Finished", 12)
dash_duo.wait_for_text_to_equal("#result", "Clicked '1'")
time.sleep(2)
# Ensure the progress is still not running
assert dash_duo.find_element("#status").text == "Finished"
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback009.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback010.py | import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc010_side_updates(dash_duo, manager):
with setup_background_callback_app(manager, "app_side_update") as app:
dash_duo.start_server(app)
dash_duo.find_element("#run-button").click()
for i in range(1, 4):
dash_duo.wait_for_text_to_equal("#side-status", f"Side Progress {i}/4")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback010.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback011.py | import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc011_long_pattern_matching(dash_duo, manager):
with setup_background_callback_app(manager, "app_pattern_matching") as app:
dash_duo.start_server(app)
for i in range(1, 4):
for _ in range(i):
dash_duo.find_element(f"button:nth-child({i})").click()
dash_duo.wait_for_text_to_equal("#result", f"Clicked '{i}'")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback011.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback012.py | import json
import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc012_long_callback_ctx(dash_duo, manager):
with setup_background_callback_app(manager, "app_callback_ctx") as app:
dash_duo.start_server(app)
dash_duo.find_element("button:nth-child(1)").click()
dash_duo.wait_for_text_to_equal("#running", "off")
output = json.loads(dash_duo.find_element("#result").text)
assert output["triggered"]["index"] == 0
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback012.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback013.py | import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc013_unordered_state_input(dash_duo, manager):
with setup_background_callback_app(manager, "app_unordered") as app:
dash_duo.start_server(app)
dash_duo.find_element("#click").click()
dash_duo.wait_for_text_to_equal("#output", "stored")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback013.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback014.py | import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc014_progress_delete(dash_duo, manager):
with setup_background_callback_app(manager, "app_progress_delete") as app:
dash_duo.start_server(app)
dash_duo.find_element("#start").click()
dash_duo.wait_for_text_to_equal("#output", "done")
assert dash_duo.find_element("#progress-counter").text == "2"
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback014.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback015.py | import sys
import pytest
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 7), reason="Python 3.6 long callbacks tests hangs up"
)
def test_lcbc015_diff_outputs_same_func(dash_duo, manager):
with setup_background_callback_app(manager, "app_diff_outputs") as app:
dash_duo.start_server(app)
for i in range(1, 3):
dash_duo.find_element(f"#button-{i}").click()
dash_duo.wait_for_text_to_equal(f"#output-{i}", f"Clicked on {i}")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback015.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback016.py | import sys
import pytest
from flaky import flaky
from tests.background_callback.utils import setup_background_callback_app
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Python 3.8 long callbacks tests hangs up"
)
@flaky(max_runs=3)
def test_lcbc016_multi_page_cancel(dash_duo, manager):
with setup_background_callback_app(manager, "app_page_cancel") as app:
dash_duo.start_server(app)
with app.test_lock:
dash_duo.find_element("#start1").click()
dash_duo.wait_for_text_to_equal("#progress1", "running")
dash_duo.find_element("#shared_cancel").click()
dash_duo.wait_for_text_to_equal("#progress1", "idle")
dash_duo.wait_for_text_to_equal("#output1", "initial")
with app.test_lock:
dash_duo.find_element("#start1").click()
dash_duo.wait_for_text_to_equal("#progress1", "running")
dash_duo.find_element("#cancel1").click()
dash_duo.wait_for_text_to_equal("#progress1", "idle")
dash_duo.wait_for_text_to_equal("#output1", "initial")
dash_duo.server_url = dash_duo.server_url + "/2"
with app.test_lock:
dash_duo.find_element("#start2").click()
dash_duo.wait_for_text_to_equal("#progress2", "running")
dash_duo.find_element("#shared_cancel").click()
dash_duo.wait_for_text_to_equal("#progress2", "idle")
dash_duo.wait_for_text_to_equal("#output2", "initial")
with app.test_lock:
dash_duo.find_element("#start2").click()
dash_duo.wait_for_text_to_equal("#progress2", "running")
dash_duo.find_element("#cancel2").click()
dash_duo.wait_for_text_to_equal("#progress2", "idle")
dash_duo.wait_for_text_to_equal("#output2", "initial")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback016.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback017.py | from tests.background_callback.utils import setup_background_callback_app
def test_lcbc017_long_callback_set_props(dash_duo, manager):
with setup_background_callback_app(manager, "app_arbitrary") as app:
dash_duo.start_server(app)
dash_duo.find_element("#start").click()
dash_duo.wait_for_text_to_equal("#secondary", "first")
dash_duo.wait_for_style_to_equal(
"#secondary", "background-color", "rgba(255, 0, 0, 1)"
)
dash_duo.wait_for_text_to_equal("#output", "initial")
dash_duo.wait_for_text_to_equal("#secondary", "second")
dash_duo.wait_for_text_to_equal("#output", "completed")
dash_duo.find_element("#start-no-output").click()
dash_duo.wait_for_text_to_equal("#no-output", "started")
dash_duo.wait_for_text_to_equal("#no-output", "completed")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback017.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
plotly/dash:tests/background_callback/test_basic_long_callback018.py | from tests.background_callback.utils import setup_background_callback_app
def test_lcbc018_background_callback_on_error(dash_duo, manager):
with setup_background_callback_app(manager, "app_bg_on_error") as app:
dash_duo.start_server(app)
dash_duo.find_element("#start-cb-onerror").click()
dash_duo.wait_for_contains_text("#cb-output", "callback error")
dash_duo.find_element("#start-global-onerror").click()
dash_duo.wait_for_contains_text("#global-output", "global error")
| {
"repo_id": "plotly/dash",
"file_path": "tests/background_callback/test_basic_long_callback018.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/type_ignore_with_other_comment.py | import pandas as pd
interval_td = pd.Interval(
pd.Timedelta("1 days"), pd.Timedelta("2 days"), closed="neither"
)
_td = ( # pyright: ignore[reportOperatorIssue,reportUnknownVariableType]
interval_td
- pd.Interval( # type: ignore[operator]
pd.Timedelta(1, "ns"), pd.Timedelta(2, "ns")
)
)
# output
import pandas as pd
interval_td = pd.Interval(
pd.Timedelta("1 days"), pd.Timedelta("2 days"), closed="neither"
)
_td = ( # pyright: ignore[reportOperatorIssue,reportUnknownVariableType]
interval_td
- pd.Interval( # type: ignore[operator]
pd.Timedelta(1, "ns"), pd.Timedelta(2, "ns")
)
)
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/type_ignore_with_other_comment.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/test_concurrency_manager_shutdown.py | from __future__ import annotations
import asyncio
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Optional
import black.concurrency as concurrency
from black import Mode, WriteBack
from black.report import Report
class FakeManager:
shutdown_called: bool
def __init__(self) -> None:
self.shutdown_called = False
def Lock(self) -> object:
return object()
def shutdown(self) -> None:
self.shutdown_called = True
def test_manager_shutdown_called_for_diff(monkeypatch: Any, tmp_path: Path) -> None:
"""
schedule_formatting() creates multiprocessing.Manager() for DIFF/COLOR_DIFF
and must shut it down deterministically.
"""
fake_manager = FakeManager()
monkeypatch.setattr(concurrency, "Manager", lambda: fake_manager)
def fake_format_file_in_place(
src: Path,
fast: bool,
mode: Mode,
write_back: WriteBack,
lock: Optional[object],
) -> bool:
assert lock is not None
return False
monkeypatch.setattr(concurrency, "format_file_in_place", fake_format_file_in_place)
src = tmp_path / "a.py"
src.write_text("x=1\n", encoding="utf8")
async def run() -> None:
loop = asyncio.get_running_loop()
with ThreadPoolExecutor(max_workers=1) as executor:
await concurrency.schedule_formatting(
sources={src},
fast=False,
write_back=WriteBack.DIFF,
mode=Mode(),
report=Report(),
loop=loop,
executor=executor,
no_cache=True,
)
asyncio.run(run())
assert fake_manager.shutdown_called is True
| {
"repo_id": "psf/black",
"file_path": "tests/test_concurrency_manager_shutdown.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/fmtskip_multiple_in_clause.py | # Multiple fmt: skip in multi-part if-clause
class ClassWithALongName:
Constant1 = 1
Constant2 = 2
Constant3 = 3
def test():
if (
"cond1" == "cond1"
and "cond2" == "cond2"
and 1 in (
ClassWithALongName.Constant1,
ClassWithALongName.Constant2,
ClassWithALongName.Constant3, # fmt: skip
) # fmt: skip
):
return True
return False
# output
# Multiple fmt: skip in multi-part if-clause
class ClassWithALongName:
Constant1 = 1
Constant2 = 2
Constant3 = 3
def test():
if (
"cond1" == "cond1"
and "cond2" == "cond2"
and 1 in (
ClassWithALongName.Constant1,
ClassWithALongName.Constant2,
ClassWithALongName.Constant3, # fmt: skip
) # fmt: skip
):
return True
return False
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/fmtskip_multiple_in_clause.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/fmtskip_multiple_strings.py | # Multiple fmt: skip on string literals
a = (
"this should " # fmt: skip
"be fine"
)
b = (
"this is " # fmt: skip
"not working" # fmt: skip
)
c = (
"and neither " # fmt: skip
"is this " # fmt: skip
"working"
)
d = (
"nor "
"is this " # fmt: skip
"working" # fmt: skip
)
e = (
"and this " # fmt: skip
"is definitely "
"not working" # fmt: skip
)
# Dictionary entries with fmt: skip (covers issue with long lines)
hotkeys = {
"editor:swap-line-down": [{"key": "ArrowDown", "modifiers": ["Alt", "Mod"]}], # fmt: skip
"editor:swap-line-up": [{"key": "ArrowUp", "modifiers": ["Alt", "Mod"]}], # fmt: skip
"editor:toggle-source": [{"key": "S", "modifiers": ["Alt", "Mod"]}], # fmt: skip
}
# output
# Multiple fmt: skip on string literals
a = (
"this should " # fmt: skip
"be fine"
)
b = (
"this is " # fmt: skip
"not working" # fmt: skip
)
c = (
"and neither " # fmt: skip
"is this " # fmt: skip
"working"
)
d = (
"nor "
"is this " # fmt: skip
"working" # fmt: skip
)
e = (
"and this " # fmt: skip
"is definitely "
"not working" # fmt: skip
)
# Dictionary entries with fmt: skip (covers issue with long lines)
hotkeys = {
"editor:swap-line-down": [{"key": "ArrowDown", "modifiers": ["Alt", "Mod"]}], # fmt: skip
"editor:swap-line-up": [{"key": "ArrowUp", "modifiers": ["Alt", "Mod"]}], # fmt: skip
"editor:toggle-source": [{"key": "S", "modifiers": ["Alt", "Mod"]}], # fmt: skip
}
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/fmtskip_multiple_strings.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/fmtskip13.py | t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{"foo": "bar"},
)
t = (
{
"foo": "very long string",
"bar": "another very long string",
"baz": "we should run out of space by now",
}, # fmt: skip
{"foo": "bar"},
)
t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{"foo": "bar",},
)
t = (
{
"foo": "very long string",
"bar": "another very long string",
"baz": "we should run out of space by now",
}, # fmt: skip
{"foo": "bar",},
)
# output
t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{"foo": "bar"},
)
t = (
{
"foo": "very long string",
"bar": "another very long string",
"baz": "we should run out of space by now",
}, # fmt: skip
{"foo": "bar"},
)
t = (
{"foo": "very long string", "bar": "another very long string", "baz": "we should run out of space by now"}, # fmt: skip
{
"foo": "bar",
},
)
t = (
{
"foo": "very long string",
"bar": "another very long string",
"baz": "we should run out of space by now",
}, # fmt: skip
{
"foo": "bar",
},
)
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/fmtskip13.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/remove_parens_from_lhs.py | # Remove unnecessary parentheses from LHS of assignments
def a():
return [1, 2, 3]
# Single variable with unnecessary parentheses
(b) = a()[0]
# Tuple unpacking with unnecessary parentheses
(c, *_) = a()
# These should not be changed - parentheses are necessary
(d,) = a() # single-element tuple
e = (1 + 2) * 3 # RHS has precedence needs
# output
# Remove unnecessary parentheses from LHS of assignments
def a():
return [1, 2, 3]
# Single variable with unnecessary parentheses
b = a()[0]
# Tuple unpacking with unnecessary parentheses
c, *_ = a()
# These should not be changed - parentheses are necessary
(d,) = a() # single-element tuple
e = (1 + 2) * 3 # RHS has precedence needs
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/remove_parens_from_lhs.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/fmtskip12.py | with open("file.txt") as f: content = f.read() # fmt: skip
# Ideally, only the last line would be ignored
# But ignoring only part of the asexpr_test causes a parse error
# Same with ignoring the asexpr_test without also ignoring the entire with_stmt
with open (
"file.txt" ,
) as f: content = f.read() # fmt: skip
# output
with open("file.txt") as f: content = f.read() # fmt: skip
# Ideally, only the last line would be ignored
# But ignoring only part of the asexpr_test causes a parse error
# Same with ignoring the asexpr_test without also ignoring the entire with_stmt
with open (
"file.txt" ,
) as f: content = f.read() # fmt: skip
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/fmtskip12.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/jupytext_markdown_fmt.py | # Test that Jupytext markdown comments are preserved before fmt:off/on blocks
# %% [markdown]
# fmt: off
# fmt: on
# Also test with other comments
# Some comment
# %% [markdown]
# Another comment
# fmt: off
x = 1
# fmt: on
# Test multiple markdown comments
# %% [markdown]
# First markdown
# %% [code]
# Code cell
# fmt: off
y = 2
# fmt: on | {
"repo_id": "psf/black",
"file_path": "tests/data/cases/jupytext_markdown_fmt.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/pep_750.py | # flags: --minimum-version=3.14
x = t"foo"
x = t'foo {{ {2 + 2}bar {{ baz'
x = t"foo {f'abc'} bar"
x = t"""foo {{ a
foo {2 + 2}bar {{ baz
x = f"foo {{ {
2 + 2 # comment
}bar"
{{ baz
}} buzz
{print("abc" + "def"
)}
abc"""
t'{(abc:=10)}'
t'''This is a really long string, but just make sure that you reflow tstrings {
2+2:d
}'''
t'This is a really long string, but just make sure that you reflow tstrings correctly {2+2:d}'
t"{ 2 + 2 = }"
t'{
X
!r
}'
tr'\{{\}}'
t'''
WITH {f'''
{1}_cte AS ()'''}
'''
# output
x = t"foo"
x = t"foo {{ {2 + 2}bar {{ baz"
x = t"foo {f'abc'} bar"
x = t"""foo {{ a
foo {2 + 2}bar {{ baz
x = f"foo {{ {
2 + 2 # comment
}bar"
{{ baz
}} buzz
{print("abc" + "def"
)}
abc"""
t"{(abc:=10)}"
t"""This is a really long string, but just make sure that you reflow tstrings {
2+2:d
}"""
t"This is a really long string, but just make sure that you reflow tstrings correctly {2+2:d}"
t"{ 2 + 2 = }"
t"{
X
!r
}"
rt"\{{\}}"
t"""
WITH {f'''
{1}_cte AS ()'''}
"""
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/pep_750.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:src/blackd/client.py | import aiohttp
from aiohttp.typedefs import StrOrURL
import black
_DEFAULT_HEADERS = {"Content-Type": "text/plain; charset=utf-8"}
class BlackDClient:
def __init__(
self,
url: StrOrURL = "http://localhost:9090",
line_length: int | None = None,
skip_source_first_line: bool = False,
skip_string_normalization: bool = False,
skip_magic_trailing_comma: bool = False,
preview: bool = False,
fast: bool = False,
python_variant: str | None = None,
diff: bool = False,
headers: dict[str, str] | None = None,
):
"""
Initialize a BlackDClient object.
:param url: The URL of the BlackD server.
:param line_length: The maximum line length.
Corresponds to the ``--line-length`` CLI option.
:param skip_source_first_line: True to skip the first line of the source.
Corresponds to the ``--skip-source-first-line`` CLI option.
:param skip_string_normalization: True to skip string normalization.
Corresponds to the ``--skip-string-normalization`` CLI option.
:param skip_magic_trailing_comma: True to skip magic trailing comma.
Corresponds to the ``--skip-magic-trailing-comma`` CLI option.
:param preview: True to enable experimental preview mode.
Corresponds to the ``--preview`` CLI option.
:param fast: True to enable fast mode.
Corresponds to the ``--fast`` CLI option.
:param python_variant: The Python variant to use.
Corresponds to the ``--pyi`` CLI option if this is "pyi".
Otherwise, corresponds to the ``--target-version`` CLI option.
:param diff: True to enable diff mode.
Corresponds to the ``--diff`` CLI option.
:param headers: A dictionary of additional custom headers to send with
the request.
"""
self.url = url
self.headers = _DEFAULT_HEADERS.copy()
if line_length is not None:
self.headers["X-Line-Length"] = str(line_length)
if skip_source_first_line:
self.headers["X-Skip-Source-First-Line"] = "yes"
if skip_string_normalization:
self.headers["X-Skip-String-Normalization"] = "yes"
if skip_magic_trailing_comma:
self.headers["X-Skip-Magic-Trailing-Comma"] = "yes"
if preview:
self.headers["X-Preview"] = "yes"
if fast:
self.headers["X-Fast-Or-Safe"] = "fast"
if python_variant is not None:
self.headers["X-Python-Variant"] = python_variant
if diff:
self.headers["X-Diff"] = "yes"
if headers is not None:
self.headers.update(headers)
async def format_code(self, unformatted_code: str) -> str:
async with aiohttp.ClientSession() as session:
async with session.post(
self.url, headers=self.headers, data=unformatted_code.encode("utf-8")
) as response:
if response.status == 204:
# Input is already well-formatted
return unformatted_code
elif response.status == 200:
# Formatting was needed
return await response.text()
elif response.status == 400:
# Input contains a syntax error
error_message = await response.text()
raise black.InvalidInput(error_message)
elif response.status == 500:
# Other kind of error while formatting
error_message = await response.text()
raise RuntimeError(f"Error while formatting: {error_message}")
else:
# Unexpected response status code
raise RuntimeError(
f"Unexpected response status code: {response.status}"
)
| {
"repo_id": "psf/black",
"file_path": "src/blackd/client.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
psf/black:tests/data/cases/type_expansion.py | # flags: --minimum-version=3.12
def f1[T: (int, str)](a,): pass
def f2[T: (int, str)](a: int, b,): pass
def g1[T: (int,)](a,): pass
def g2[T: (int, str, bytes)](a,): pass
def g3[T: ((int, str), (bytes,))](a,): pass
def g4[T: (int, (str, bytes))](a,): pass
def g5[T: ((int,),)](a: int, b,): pass
# output
def f1[T: (int, str)](
a,
):
pass
def f2[T: (int, str)](
a: int,
b,
):
pass
def g1[T: (int,)](
a,
):
pass
def g2[T: (int, str, bytes)](
a,
):
pass
def g3[T: ((int, str), (bytes,))](
a,
):
pass
def g4[T: (int, (str, bytes))](
a,
):
pass
def g5[T: ((int,),)](
a: int,
b,
):
pass
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/type_expansion.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/module_docstring_after_comment.py | #!/python
# regression test for #4762
"""
docstring
"""
from __future__ import annotations
import os
# output
#!/python
# regression test for #4762
"""
docstring
"""
from __future__ import annotations
import os
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/module_docstring_after_comment.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/preview_wrap_comprehension_in.py | # flags: --preview --line-length=79
[a for graph_path_expression in refined_constraint.condition_as_predicate.variables]
[
a
for graph_path_expression in refined_constraint.condition_as_predicate.variables
]
[
a
for graph_path_expression
in refined_constraint.condition_as_predicate.variables
]
[
a
for graph_path_expression in (
refined_constraint.condition_as_predicate.variables
)
]
[
(foobar_very_long_key, foobar_very_long_value)
for foobar_very_long_key, foobar_very_long_value in foobar_very_long_dictionary.items()
]
# Don't split the `in` if it's not too long
lcomp3 = [
element.split("\n", 1)[0]
for element in collection.select_elements()
# right
if element is not None
]
# Don't remove parens around ternaries
expected = [i for i in (a if b else c)]
# Nested arrays
# First in will not be split because it would still be too long
[[
x
for x in bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
for y in xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
]]
# Multiple comprehensions, only split the second `in`
graph_path_expressions_in_local_constraint_refinements = [
graph_path_expression
for refined_constraint in self._local_constraint_refinements.values()
if refined_constraint is not None
for graph_path_expression in refined_constraint.condition_as_predicate.variables
]
# Dictionary comprehensions
dict_with_really_long_names = {
really_really_long_key_name: an_even_longer_really_really_long_key_value
for really_really_long_key_name, an_even_longer_really_really_long_key_value in really_really_really_long_dict_name.items()
}
{
key_with_super_really_long_name: key_with_super_really_long_name
for key_with_super_really_long_name in dictionary_with_super_really_long_name
}
{
key_with_super_really_long_name: key_with_super_really_long_name
for key_with_super_really_long_name
in dictionary_with_super_really_long_name
}
{
key_with_super_really_long_name: key_with_super_really_long_name
for key in (
dictionary
)
}
# output
[
a
for graph_path_expression in (
refined_constraint.condition_as_predicate.variables
)
]
[
a
for graph_path_expression in (
refined_constraint.condition_as_predicate.variables
)
]
[
a
for graph_path_expression in (
refined_constraint.condition_as_predicate.variables
)
]
[
a
for graph_path_expression in (
refined_constraint.condition_as_predicate.variables
)
]
[
(foobar_very_long_key, foobar_very_long_value)
for foobar_very_long_key, foobar_very_long_value in (
foobar_very_long_dictionary.items()
)
]
# Don't split the `in` if it's not too long
lcomp3 = [
element.split("\n", 1)[0]
for element in collection.select_elements()
# right
if element is not None
]
# Don't remove parens around ternaries
expected = [i for i in (a if b else c)]
# Nested arrays
# First in will not be split because it would still be too long
[
[
x
for x in bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
for y in (
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
)
]
]
# Multiple comprehensions, only split the second `in`
graph_path_expressions_in_local_constraint_refinements = [
graph_path_expression
for refined_constraint in self._local_constraint_refinements.values()
if refined_constraint is not None
for graph_path_expression in (
refined_constraint.condition_as_predicate.variables
)
]
# Dictionary comprehensions
dict_with_really_long_names = {
really_really_long_key_name: an_even_longer_really_really_long_key_value
for really_really_long_key_name, an_even_longer_really_really_long_key_value in (
really_really_really_long_dict_name.items()
)
}
{
key_with_super_really_long_name: key_with_super_really_long_name
for key_with_super_really_long_name in (
dictionary_with_super_really_long_name
)
}
{
key_with_super_really_long_name: key_with_super_really_long_name
for key_with_super_really_long_name in (
dictionary_with_super_really_long_name
)
}
{
key_with_super_really_long_name: key_with_super_really_long_name
for key in dictionary
}
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/preview_wrap_comprehension_in.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/line_ranges_decorator_edge_case.py | # flags: --line-ranges=6-7
class Foo:
@overload
def foo(): ...
def fox(self):
print()
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/line_ranges_decorator_edge_case.py",
"license": "MIT License",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/remove_except_types_parens.py | # flags: --minimum-version=3.14
# SEE PEP 758 FOR MORE DETAILS
# remains unchanged
try:
pass
except:
pass
# remains unchanged
try:
pass
except ValueError:
pass
try:
pass
except* ValueError:
pass
# parenthesis are removed
try:
pass
except (ValueError):
pass
try:
pass
except* (ValueError):
pass
# parenthesis are removed
try:
pass
except (ValueError) as e:
pass
try:
pass
except* (ValueError) as e:
pass
# remains unchanged
try:
pass
except (ValueError,):
pass
try:
pass
except* (ValueError,):
pass
# remains unchanged
try:
pass
except (ValueError,) as e:
pass
try:
pass
except* (ValueError,) as e:
pass
# remains unchanged
try:
pass
except ValueError, TypeError, KeyboardInterrupt:
pass
try:
pass
except* ValueError, TypeError, KeyboardInterrupt:
pass
# parenthesis are removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt):
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt):
pass
# parenthesis are not removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt) as e:
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt) as e:
pass
# parenthesis are removed
try:
pass
except (ValueError if True else TypeError):
pass
try:
pass
except* (ValueError if True else TypeError):
pass
# inner except: parenthesis are removed
# outer except: parenthsis are not removed
try:
try:
pass
except (TypeError, KeyboardInterrupt):
pass
except (ValueError,):
pass
try:
try:
pass
except* (TypeError, KeyboardInterrupt):
pass
except* (ValueError,):
pass
# output
# SEE PEP 758 FOR MORE DETAILS
# remains unchanged
try:
pass
except:
pass
# remains unchanged
try:
pass
except ValueError:
pass
try:
pass
except* ValueError:
pass
# parenthesis are removed
try:
pass
except ValueError:
pass
try:
pass
except* ValueError:
pass
# parenthesis are removed
try:
pass
except ValueError as e:
pass
try:
pass
except* ValueError as e:
pass
# remains unchanged
try:
pass
except (ValueError,):
pass
try:
pass
except* (ValueError,):
pass
# remains unchanged
try:
pass
except (ValueError,) as e:
pass
try:
pass
except* (ValueError,) as e:
pass
# remains unchanged
try:
pass
except ValueError, TypeError, KeyboardInterrupt:
pass
try:
pass
except* ValueError, TypeError, KeyboardInterrupt:
pass
# parenthesis are removed
try:
pass
except ValueError, TypeError, KeyboardInterrupt:
pass
try:
pass
except* ValueError, TypeError, KeyboardInterrupt:
pass
# parenthesis are not removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt) as e:
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt) as e:
pass
# parenthesis are removed
try:
pass
except ValueError if True else TypeError:
pass
try:
pass
except* ValueError if True else TypeError:
pass
# inner except: parenthesis are removed
# outer except: parenthsis are not removed
try:
try:
pass
except TypeError, KeyboardInterrupt:
pass
except (ValueError,):
pass
try:
try:
pass
except* TypeError, KeyboardInterrupt:
pass
except* (ValueError,):
pass
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/remove_except_types_parens.py",
"license": "MIT License",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/remove_except_types_parens_pre_py314.py | # flags: --minimum-version=3.11
# SEE PEP 758 FOR MORE DETAILS
# remains unchanged
try:
pass
except:
pass
# remains unchanged
try:
pass
except ValueError:
pass
try:
pass
except* ValueError:
pass
# parenthesis are removed
try:
pass
except (ValueError):
pass
try:
pass
except* (ValueError):
pass
# parenthesis are removed
try:
pass
except (ValueError) as e:
pass
try:
pass
except* (ValueError) as e:
pass
# remains unchanged
try:
pass
except (ValueError,):
pass
try:
pass
except* (ValueError,):
pass
# remains unchanged
try:
pass
except (ValueError,) as e:
pass
try:
pass
except* (ValueError,) as e:
pass
# parenthesis are not removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt):
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt):
pass
# parenthesis are not removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt) as e:
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt) as e:
pass
# parenthesis are removed
try:
pass
except (ValueError if True else TypeError):
pass
try:
pass
except* (ValueError if True else TypeError):
pass
# parenthesis are not removed
try:
try:
pass
except (TypeError, KeyboardInterrupt):
pass
except (ValueError,):
pass
try:
try:
pass
except* (TypeError, KeyboardInterrupt):
pass
except* (ValueError,):
pass
# output
# SEE PEP 758 FOR MORE DETAILS
# remains unchanged
try:
pass
except:
pass
# remains unchanged
try:
pass
except ValueError:
pass
try:
pass
except* ValueError:
pass
# parenthesis are removed
try:
pass
except ValueError:
pass
try:
pass
except* ValueError:
pass
# parenthesis are removed
try:
pass
except ValueError as e:
pass
try:
pass
except* ValueError as e:
pass
# remains unchanged
try:
pass
except (ValueError,):
pass
try:
pass
except* (ValueError,):
pass
# remains unchanged
try:
pass
except (ValueError,) as e:
pass
try:
pass
except* (ValueError,) as e:
pass
# parenthesis are not removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt):
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt):
pass
# parenthesis are not removed
try:
pass
except (ValueError, TypeError, KeyboardInterrupt) as e:
pass
try:
pass
except* (ValueError, TypeError, KeyboardInterrupt) as e:
pass
# parenthesis are removed
try:
pass
except ValueError if True else TypeError:
pass
try:
pass
except* ValueError if True else TypeError:
pass
# parenthesis are not removed
try:
try:
pass
except (TypeError, KeyboardInterrupt):
pass
except (ValueError,):
pass
try:
try:
pass
except* (TypeError, KeyboardInterrupt):
pass
except* (ValueError,):
pass
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/remove_except_types_parens_pre_py314.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/target_version_flag.py | # flags: --minimum-version=3.12 --target-version=py312
# this is invalid in versions below py312
class ClassA[T: str]:
def method1(self) -> T:
...
# output
# this is invalid in versions below py312
class ClassA[T: str]:
def method1(self) -> T: ...
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/target_version_flag.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
psf/black:tests/data/cases/tuple_with_stmt.py | # don't remove the brackets here, it changes the meaning of the code.
# even though the code will always trigger a runtime error
with (name_5, name_4), name_5:
pass
with c, (a, b):
pass
with c, (a, b), d:
pass
with c, (a, b, e, f, g), d:
pass
def test_tuple_as_contextmanager():
from contextlib import nullcontext
try:
with (nullcontext(), nullcontext()), nullcontext():
pass
except TypeError:
# test passed
pass
else:
# this should be a type error
assert False
| {
"repo_id": "psf/black",
"file_path": "tests/data/cases/tuple_with_stmt.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
pydantic/pydantic:tests/types/test_dataclass.py | import dataclasses
from typing import Any
import pytest
import pydantic
from pydantic import ConfigDict, SerializationInfo, TypeAdapter, model_serializer
@pytest.mark.parametrize('config', [True, False, None])
@pytest.mark.parametrize('runtime', [True, False, None])
@pytest.mark.parametrize('dataclass_decorator', [dataclasses.dataclass, pydantic.dataclasses.dataclass])
def test_polymorphic_serialization(config: bool, runtime: bool, dataclass_decorator: Any) -> None:
@dataclass_decorator
class ClassA:
if config is not None:
__pydantic_config__ = ConfigDict(polymorphic_serialization=config)
a: int
@dataclass_decorator
class ClassB(ClassA):
b: str
kwargs = {}
if runtime is not None:
kwargs['polymorphic_serialization'] = runtime
serializer = TypeAdapter(ClassA).serializer
assert serializer.to_python(ClassA(a=123), **kwargs) == {'a': 123}
assert serializer.to_json(ClassA(a=123), **kwargs) == b'{"a":123}'
polymorphism_enabled = runtime if runtime is not None else config
# FIXME: stdlib dataclass does not serialize with polymorphism yet
if polymorphism_enabled and dataclass_decorator is pydantic.dataclasses.dataclass:
assert serializer.to_python(ClassB(a=123, b='test'), **kwargs) == {'a': 123, 'b': 'test'}
assert serializer.to_json(ClassB(a=123, b='test'), **kwargs) == b'{"a":123,"b":"test"}'
else:
assert serializer.to_python(ClassB(a=123, b='test'), **kwargs) == {'a': 123}
assert serializer.to_json(ClassB(a=123, b='test'), **kwargs) == b'{"a":123}'
@pytest.mark.parametrize('config', [True, False, None])
@pytest.mark.parametrize('runtime', [True, False, None])
@pytest.mark.parametrize('dataclass_decorator', [dataclasses.dataclass, pydantic.dataclasses.dataclass])
def test_polymorphic_serialization_with_model_serializer(config: bool, runtime: bool, dataclass_decorator: Any) -> None:
@dataclass_decorator
class ClassA:
if config is not None:
__pydantic_config__ = ConfigDict(polymorphic_serialization=config)
a: int
@model_serializer
def serialize(self, info: SerializationInfo) -> str:
assert info.polymorphic_serialization is runtime
return 'ClassA'
@dataclass_decorator
class ClassB(ClassA):
b: str
@model_serializer
def serialize(self, info: SerializationInfo) -> str:
assert info.polymorphic_serialization is runtime
return 'ClassB'
kwargs = {}
if runtime is not None:
kwargs['polymorphic_serialization'] = runtime
serializer = TypeAdapter(ClassA).serializer
kwargs = {}
if runtime is not None:
kwargs['polymorphic_serialization'] = runtime
assert serializer.to_python(ClassA(a=123), **kwargs) == 'ClassA'
assert serializer.to_json(ClassA(a=123), **kwargs) == b'"ClassA"'
polymorphism_enabled = runtime if runtime is not None else config
# FIXME: stdlib dataclass does not serialize with polymorphism yet
if polymorphism_enabled and dataclass_decorator is pydantic.dataclasses.dataclass:
assert serializer.to_python(ClassB(a=123, b='test'), **kwargs) == 'ClassB'
assert serializer.to_json(ClassB(a=123, b='test'), **kwargs) == b'"ClassB"'
else:
assert serializer.to_python(ClassB(a=123, b='test'), **kwargs) == 'ClassA'
assert serializer.to_json(ClassB(a=123, b='test'), **kwargs) == b'"ClassA"'
| {
"repo_id": "pydantic/pydantic",
"file_path": "tests/types/test_dataclass.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.