sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
modelcontextprotocol/python-sdk:src/mcp/server/auth/json_response.py | from typing import Any
from starlette.responses import JSONResponse
class PydanticJSONResponse(JSONResponse):
# use pydantic json serialization instead of the stock `json.dumps`,
# so that we can handle serializing pydantic models like AnyHttpUrl
def render(self, content: Any) -> bytes:
return content.model_dump_json(exclude_none=True).encode("utf-8")
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/json_response.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:src/mcp/server/auth/middleware/auth_context.py | import contextvars
from starlette.types import ASGIApp, Receive, Scope, Send
from mcp.server.auth.middleware.bearer_auth import AuthenticatedUser
from mcp.server.auth.provider import AccessToken
# Create a contextvar to store the authenticated user
# The default is None, indicating no authenticated user is present
auth_context_var = contextvars.ContextVar[AuthenticatedUser | None]("auth_context", default=None)
def get_access_token() -> AccessToken | None:
"""Get the access token from the current context.
Returns:
The access token if an authenticated user is available, None otherwise.
"""
auth_user = auth_context_var.get()
return auth_user.access_token if auth_user else None
class AuthContextMiddleware:
"""Middleware that extracts the authenticated user from the request
and sets it in a contextvar for easy access throughout the request lifecycle.
This middleware should be added after the AuthenticationMiddleware in the
middleware stack to ensure that the user is properly authenticated before
being stored in the context.
"""
def __init__(self, app: ASGIApp):
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send):
user = scope.get("user")
if isinstance(user, AuthenticatedUser):
# Set the authenticated user in the contextvar
token = auth_context_var.set(user)
try:
await self.app(scope, receive, send)
finally:
auth_context_var.reset(token)
else:
# No authenticated user, just process the request
await self.app(scope, receive, send)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/middleware/auth_context.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:src/mcp/server/auth/middleware/bearer_auth.py | import json
import time
from typing import Any
from pydantic import AnyHttpUrl
from starlette.authentication import AuthCredentials, AuthenticationBackend, SimpleUser
from starlette.requests import HTTPConnection
from starlette.types import Receive, Scope, Send
from mcp.server.auth.provider import AccessToken, TokenVerifier
class AuthenticatedUser(SimpleUser):
"""User with authentication info."""
def __init__(self, auth_info: AccessToken):
super().__init__(auth_info.client_id)
self.access_token = auth_info
self.scopes = auth_info.scopes
class BearerAuthBackend(AuthenticationBackend):
"""Authentication backend that validates Bearer tokens using a TokenVerifier."""
def __init__(self, token_verifier: TokenVerifier):
self.token_verifier = token_verifier
async def authenticate(self, conn: HTTPConnection):
auth_header = next(
(conn.headers.get(key) for key in conn.headers if key.lower() == "authorization"),
None,
)
if not auth_header or not auth_header.lower().startswith("bearer "):
return None
token = auth_header[7:] # Remove "Bearer " prefix
# Validate the token with the verifier
auth_info = await self.token_verifier.verify_token(token)
if not auth_info:
return None
if auth_info.expires_at and auth_info.expires_at < int(time.time()):
return None
return AuthCredentials(auth_info.scopes), AuthenticatedUser(auth_info)
class RequireAuthMiddleware:
"""Middleware that requires a valid Bearer token in the Authorization header.
This will validate the token with the auth provider and store the resulting
auth info in the request state.
"""
def __init__(
self,
app: Any,
required_scopes: list[str],
resource_metadata_url: AnyHttpUrl | None = None,
):
"""Initialize the middleware.
Args:
app: ASGI application
required_scopes: List of scopes that the token must have
resource_metadata_url: Optional protected resource metadata URL for WWW-Authenticate header
"""
self.app = app
self.required_scopes = required_scopes
self.resource_metadata_url = resource_metadata_url
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
auth_user = scope.get("user")
if not isinstance(auth_user, AuthenticatedUser):
await self._send_auth_error(
send, status_code=401, error="invalid_token", description="Authentication required"
)
return
auth_credentials = scope.get("auth")
for required_scope in self.required_scopes:
# auth_credentials should always be provided; this is just paranoia
if auth_credentials is None or required_scope not in auth_credentials.scopes:
await self._send_auth_error(
send, status_code=403, error="insufficient_scope", description=f"Required scope: {required_scope}"
)
return
await self.app(scope, receive, send)
async def _send_auth_error(self, send: Send, status_code: int, error: str, description: str) -> None:
"""Send an authentication error response with WWW-Authenticate header."""
# Build WWW-Authenticate header value
www_auth_parts = [f'error="{error}"', f'error_description="{description}"']
if self.resource_metadata_url: # pragma: no cover
www_auth_parts.append(f'resource_metadata="{self.resource_metadata_url}"')
www_authenticate = f"Bearer {', '.join(www_auth_parts)}"
# Send response
body = {"error": error, "error_description": description}
body_bytes = json.dumps(body).encode()
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [
(b"content-type", b"application/json"),
(b"content-length", str(len(body_bytes)).encode()),
(b"www-authenticate", www_authenticate.encode()),
],
}
)
await send(
{
"type": "http.response.body",
"body": body_bytes,
}
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/middleware/bearer_auth.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/auth/middleware/client_auth.py | import base64
import binascii
import hmac
import time
from typing import Any
from urllib.parse import unquote
from starlette.requests import Request
from mcp.server.auth.provider import OAuthAuthorizationServerProvider
from mcp.shared.auth import OAuthClientInformationFull
class AuthenticationError(Exception):
def __init__(self, message: str):
self.message = message
class ClientAuthenticator:
"""ClientAuthenticator is a callable which validates requests from a client
application, used to verify /token calls.
If, during registration, the client requested to be issued a secret, the
authenticator asserts that /token calls must be authenticated with
that same secret.
NOTE: clients can opt for no authentication during registration, in which case this
logic is skipped.
"""
def __init__(self, provider: OAuthAuthorizationServerProvider[Any, Any, Any]):
"""Initialize the authenticator.
Args:
provider: Provider to look up client information
"""
self.provider = provider
async def authenticate_request(self, request: Request) -> OAuthClientInformationFull:
"""Authenticate a client from an HTTP request.
Extracts client credentials from the appropriate location based on the
client's registered authentication method and validates them.
Args:
request: The HTTP request containing client credentials
Returns:
The authenticated client information
Raises:
AuthenticationError: If authentication fails
"""
form_data = await request.form()
client_id = form_data.get("client_id")
if not client_id:
raise AuthenticationError("Missing client_id")
client = await self.provider.get_client(str(client_id))
if not client:
raise AuthenticationError("Invalid client_id") # pragma: no cover
request_client_secret: str | None = None
auth_header = request.headers.get("Authorization", "")
if client.token_endpoint_auth_method == "client_secret_basic":
if not auth_header.startswith("Basic "):
raise AuthenticationError("Missing or invalid Basic authentication in Authorization header")
try:
encoded_credentials = auth_header[6:] # Remove "Basic " prefix
decoded = base64.b64decode(encoded_credentials).decode("utf-8")
if ":" not in decoded:
raise ValueError("Invalid Basic auth format")
basic_client_id, request_client_secret = decoded.split(":", 1)
# URL-decode both parts per RFC 6749 Section 2.3.1
basic_client_id = unquote(basic_client_id)
request_client_secret = unquote(request_client_secret)
if basic_client_id != client_id:
raise AuthenticationError("Client ID mismatch in Basic auth")
except (ValueError, UnicodeDecodeError, binascii.Error):
raise AuthenticationError("Invalid Basic authentication header")
elif client.token_endpoint_auth_method == "client_secret_post":
raw_form_data = form_data.get("client_secret")
# form_data.get() can return an UploadFile or None, so we need to check if it's a string
if isinstance(raw_form_data, str):
request_client_secret = str(raw_form_data)
elif client.token_endpoint_auth_method == "none":
request_client_secret = None
else:
raise AuthenticationError( # pragma: no cover
f"Unsupported auth method: {client.token_endpoint_auth_method}"
)
# If client from the store expects a secret, validate that the request provides
# that secret
if client.client_secret:
if not request_client_secret:
raise AuthenticationError("Client secret is required")
# hmac.compare_digest requires that both arguments are either bytes or a `str` containing
# only ASCII characters. Since we do not control `request_client_secret`, we encode both
# arguments to bytes.
if not hmac.compare_digest(client.client_secret.encode(), request_client_secret.encode()):
raise AuthenticationError("Invalid client_secret")
if client.client_secret_expires_at and client.client_secret_expires_at < int(time.time()):
raise AuthenticationError("Client secret has expired") # pragma: no cover
return client
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/middleware/client_auth.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/auth/provider.py | from dataclasses import dataclass
from typing import Generic, Literal, Protocol, TypeVar
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
from pydantic import AnyUrl, BaseModel
from mcp.shared.auth import OAuthClientInformationFull, OAuthToken
class AuthorizationParams(BaseModel):
state: str | None
scopes: list[str] | None
code_challenge: str
redirect_uri: AnyUrl
redirect_uri_provided_explicitly: bool
resource: str | None = None # RFC 8707 resource indicator
class AuthorizationCode(BaseModel):
code: str
scopes: list[str]
expires_at: float
client_id: str
code_challenge: str
redirect_uri: AnyUrl
redirect_uri_provided_explicitly: bool
resource: str | None = None # RFC 8707 resource indicator
class RefreshToken(BaseModel):
token: str
client_id: str
scopes: list[str]
expires_at: int | None = None
class AccessToken(BaseModel):
token: str
client_id: str
scopes: list[str]
expires_at: int | None = None
resource: str | None = None # RFC 8707 resource indicator
RegistrationErrorCode = Literal[
"invalid_redirect_uri",
"invalid_client_metadata",
"invalid_software_statement",
"unapproved_software_statement",
]
@dataclass(frozen=True)
class RegistrationError(Exception):
error: RegistrationErrorCode
error_description: str | None = None
AuthorizationErrorCode = Literal[
"invalid_request",
"unauthorized_client",
"access_denied",
"unsupported_response_type",
"invalid_scope",
"server_error",
"temporarily_unavailable",
]
@dataclass(frozen=True)
class AuthorizeError(Exception):
error: AuthorizationErrorCode
error_description: str | None = None
TokenErrorCode = Literal[
"invalid_request",
"invalid_client",
"invalid_grant",
"unauthorized_client",
"unsupported_grant_type",
"invalid_scope",
]
@dataclass(frozen=True)
class TokenError(Exception):
error: TokenErrorCode
error_description: str | None = None
class TokenVerifier(Protocol):
"""Protocol for verifying bearer tokens."""
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify a bearer token and return access info if valid."""
# NOTE: MCPServer doesn't render any of these types in the user response, so it's
# OK to add fields to subclasses which should not be exposed externally.
AuthorizationCodeT = TypeVar("AuthorizationCodeT", bound=AuthorizationCode)
RefreshTokenT = TypeVar("RefreshTokenT", bound=RefreshToken)
AccessTokenT = TypeVar("AccessTokenT", bound=AccessToken)
class OAuthAuthorizationServerProvider(Protocol, Generic[AuthorizationCodeT, RefreshTokenT, AccessTokenT]):
async def get_client(self, client_id: str) -> OAuthClientInformationFull | None:
"""Retrieves client information by client ID.
Implementors MAY raise NotImplementedError if dynamic client registration is
disabled in ClientRegistrationOptions.
Args:
client_id: The ID of the client to retrieve.
Returns:
The client information, or None if the client does not exist.
"""
async def register_client(self, client_info: OAuthClientInformationFull) -> None:
"""Saves client information as part of registering it.
Implementors MAY raise NotImplementedError if dynamic client registration is
disabled in ClientRegistrationOptions.
Args:
client_info: The client metadata to register.
Raises:
RegistrationError: If the client metadata is invalid.
"""
async def authorize(self, client: OAuthClientInformationFull, params: AuthorizationParams) -> str:
"""Handle the /authorize endpoint and return a URL that the client
will be redirected to.
Many MCP implementations will redirect to a third-party provider to perform
a second OAuth exchange with that provider. In this sort of setup, the client
has an OAuth connection with the MCP server, and the MCP server has an OAuth
connection with the 3rd-party provider. At the end of this flow, the client
should be redirected to the redirect_uri from params.redirect_uri.
+--------+ +------------+ +-------------------+
| | | | | |
| Client | --> | MCP Server | --> | 3rd Party OAuth |
| | | | | Server |
+--------+ +------------+ +-------------------+
| ^ |
+------------+ | | |
| | | | Redirect |
|redirect_uri|<-----+ +------------------+
| |
+------------+
Implementations will need to define another handler on the MCP server's return
flow to perform the second redirect, and generate and store an authorization
code as part of completing the OAuth authorization step.
Implementations SHOULD generate an authorization code with at least 160 bits of
entropy,
and MUST generate an authorization code with at least 128 bits of entropy.
See https://datatracker.ietf.org/doc/html/rfc6749#section-10.10.
Args:
client: The client requesting authorization.
params: The parameters of the authorization request.
Returns:
A URL to redirect the client to for authorization.
Raises:
AuthorizeError: If the authorization request is invalid.
"""
...
async def load_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: str
) -> AuthorizationCodeT | None:
"""Loads an AuthorizationCode by its code.
Args:
client: The client that requested the authorization code.
authorization_code: The authorization code to get the challenge for.
Returns:
The AuthorizationCode, or None if not found.
"""
...
async def exchange_authorization_code(
self, client: OAuthClientInformationFull, authorization_code: AuthorizationCodeT
) -> OAuthToken:
"""Exchanges an authorization code for an access token and refresh token.
Args:
client: The client exchanging the authorization code.
authorization_code: The authorization code to exchange.
Returns:
The OAuth token, containing access and refresh tokens.
Raises:
TokenError: If the request is invalid.
"""
...
async def load_refresh_token(self, client: OAuthClientInformationFull, refresh_token: str) -> RefreshTokenT | None:
"""Loads a RefreshToken by its token string.
Args:
client: The client that is requesting to load the refresh token.
refresh_token: The refresh token string to load.
Returns:
The RefreshToken object if found, or None if not found.
"""
...
async def exchange_refresh_token(
self,
client: OAuthClientInformationFull,
refresh_token: RefreshTokenT,
scopes: list[str],
) -> OAuthToken:
"""Exchanges a refresh token for an access token and refresh token.
Implementations SHOULD rotate both the access token and refresh token.
Args:
client: The client exchanging the refresh token.
refresh_token: The refresh token to exchange.
scopes: Optional scopes to request with the new access token.
Returns:
The OAuth token, containing access and refresh tokens.
Raises:
TokenError: If the request is invalid.
"""
...
async def load_access_token(self, token: str) -> AccessTokenT | None:
"""Loads an access token by its token string.
Args:
token: The access token to verify.
Returns:
The access token, or None if the token is invalid.
"""
async def revoke_token(
self,
token: AccessTokenT | RefreshTokenT,
) -> None:
"""Revokes an access or refresh token.
If the given token is invalid or already revoked, this method should do nothing.
Implementations SHOULD revoke both the access token and its corresponding
refresh token, regardless of which of the access token or refresh token is
provided.
Args:
token: The token to revoke.
"""
def construct_redirect_uri(redirect_uri_base: str, **params: str | None) -> str:
parsed_uri = urlparse(redirect_uri_base)
query_params = [(k, v) for k, vs in parse_qs(parsed_uri.query).items() for v in vs]
for k, v in params.items():
if v is not None:
query_params.append((k, v))
redirect_uri = urlunparse(parsed_uri._replace(query=urlencode(query_params)))
return redirect_uri
class ProviderTokenVerifier(TokenVerifier):
"""Token verifier that uses an OAuthAuthorizationServerProvider.
This is provided for backwards compatibility with existing auth_server_provider
configurations. For new implementations using AS/RS separation, consider using
the TokenVerifier protocol with a dedicated implementation like IntrospectionTokenVerifier.
"""
def __init__(self, provider: "OAuthAuthorizationServerProvider[AuthorizationCode, RefreshToken, AccessToken]"):
self.provider = provider
async def verify_token(self, token: str) -> AccessToken | None:
"""Verify token using the provider's load_access_token method."""
return await self.provider.load_access_token(token)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/provider.py",
"license": "MIT License",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
modelcontextprotocol/python-sdk:src/mcp/server/auth/routes.py | from collections.abc import Awaitable, Callable
from typing import Any
from urllib.parse import urlparse
from pydantic import AnyHttpUrl
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route, request_response # type: ignore
from starlette.types import ASGIApp
from mcp.server.auth.handlers.authorize import AuthorizationHandler
from mcp.server.auth.handlers.metadata import MetadataHandler, ProtectedResourceMetadataHandler
from mcp.server.auth.handlers.register import RegistrationHandler
from mcp.server.auth.handlers.revoke import RevocationHandler
from mcp.server.auth.handlers.token import TokenHandler
from mcp.server.auth.middleware.client_auth import ClientAuthenticator
from mcp.server.auth.provider import OAuthAuthorizationServerProvider
from mcp.server.auth.settings import ClientRegistrationOptions, RevocationOptions
from mcp.server.streamable_http import MCP_PROTOCOL_VERSION_HEADER
from mcp.shared.auth import OAuthMetadata, ProtectedResourceMetadata
def validate_issuer_url(url: AnyHttpUrl):
"""Validate that the issuer URL meets OAuth 2.0 requirements.
Args:
url: The issuer URL to validate.
Raises:
ValueError: If the issuer URL is invalid.
"""
# RFC 8414 requires HTTPS, but we allow loopback/localhost HTTP for testing
if url.scheme != "https" and url.host not in ("localhost", "127.0.0.1", "[::1]"):
raise ValueError("Issuer URL must be HTTPS")
# No fragments or query parameters allowed
if url.fragment:
raise ValueError("Issuer URL must not have a fragment")
if url.query:
raise ValueError("Issuer URL must not have a query string")
AUTHORIZATION_PATH = "/authorize"
TOKEN_PATH = "/token"
REGISTRATION_PATH = "/register"
REVOCATION_PATH = "/revoke"
def cors_middleware(
handler: Callable[[Request], Response | Awaitable[Response]],
allow_methods: list[str],
) -> ASGIApp:
cors_app = CORSMiddleware(
app=request_response(handler),
allow_origins="*",
allow_methods=allow_methods,
allow_headers=[MCP_PROTOCOL_VERSION_HEADER],
)
return cors_app
def create_auth_routes(
provider: OAuthAuthorizationServerProvider[Any, Any, Any],
issuer_url: AnyHttpUrl,
service_documentation_url: AnyHttpUrl | None = None,
client_registration_options: ClientRegistrationOptions | None = None,
revocation_options: RevocationOptions | None = None,
) -> list[Route]:
validate_issuer_url(issuer_url)
client_registration_options = client_registration_options or ClientRegistrationOptions()
revocation_options = revocation_options or RevocationOptions()
metadata = build_metadata(
issuer_url,
service_documentation_url,
client_registration_options,
revocation_options,
)
client_authenticator = ClientAuthenticator(provider)
# Create routes
# Allow CORS requests for endpoints meant to be hit by the OAuth client
# (with the client secret). This is intended to support things like MCP Inspector,
# where the client runs in a web browser.
routes = [
Route(
"/.well-known/oauth-authorization-server",
endpoint=cors_middleware(
MetadataHandler(metadata).handle,
["GET", "OPTIONS"],
),
methods=["GET", "OPTIONS"],
),
Route(
AUTHORIZATION_PATH,
# do not allow CORS for authorization endpoint;
# clients should just redirect to this
endpoint=AuthorizationHandler(provider).handle,
methods=["GET", "POST"],
),
Route(
TOKEN_PATH,
endpoint=cors_middleware(
TokenHandler(provider, client_authenticator).handle,
["POST", "OPTIONS"],
),
methods=["POST", "OPTIONS"],
),
]
if client_registration_options.enabled: # pragma: no branch
registration_handler = RegistrationHandler(
provider,
options=client_registration_options,
)
routes.append(
Route(
REGISTRATION_PATH,
endpoint=cors_middleware(
registration_handler.handle,
["POST", "OPTIONS"],
),
methods=["POST", "OPTIONS"],
)
)
if revocation_options.enabled: # pragma: no branch
revocation_handler = RevocationHandler(provider, client_authenticator)
routes.append(
Route(
REVOCATION_PATH,
endpoint=cors_middleware(
revocation_handler.handle,
["POST", "OPTIONS"],
),
methods=["POST", "OPTIONS"],
)
)
return routes
def build_metadata(
issuer_url: AnyHttpUrl,
service_documentation_url: AnyHttpUrl | None,
client_registration_options: ClientRegistrationOptions,
revocation_options: RevocationOptions,
) -> OAuthMetadata:
authorization_url = AnyHttpUrl(str(issuer_url).rstrip("/") + AUTHORIZATION_PATH)
token_url = AnyHttpUrl(str(issuer_url).rstrip("/") + TOKEN_PATH)
# Create metadata
metadata = OAuthMetadata(
issuer=issuer_url,
authorization_endpoint=authorization_url,
token_endpoint=token_url,
scopes_supported=client_registration_options.valid_scopes,
response_types_supported=["code"],
response_modes_supported=None,
grant_types_supported=["authorization_code", "refresh_token"],
token_endpoint_auth_methods_supported=["client_secret_post", "client_secret_basic"],
token_endpoint_auth_signing_alg_values_supported=None,
service_documentation=service_documentation_url,
ui_locales_supported=None,
op_policy_uri=None,
op_tos_uri=None,
introspection_endpoint=None,
code_challenge_methods_supported=["S256"],
)
# Add registration endpoint if supported
if client_registration_options.enabled: # pragma: no branch
metadata.registration_endpoint = AnyHttpUrl(str(issuer_url).rstrip("/") + REGISTRATION_PATH)
# Add revocation endpoint if supported
if revocation_options.enabled: # pragma: no branch
metadata.revocation_endpoint = AnyHttpUrl(str(issuer_url).rstrip("/") + REVOCATION_PATH)
metadata.revocation_endpoint_auth_methods_supported = ["client_secret_post", "client_secret_basic"]
return metadata
def build_resource_metadata_url(resource_server_url: AnyHttpUrl) -> AnyHttpUrl:
"""Build RFC 9728 compliant protected resource metadata URL.
Inserts /.well-known/oauth-protected-resource between host and resource path
as specified in RFC 9728 §3.1.
Args:
resource_server_url: The resource server URL (e.g., https://example.com/mcp)
Returns:
The metadata URL (e.g., https://example.com/.well-known/oauth-protected-resource/mcp)
"""
parsed = urlparse(str(resource_server_url))
# Handle trailing slash: if path is just "/", treat as empty
resource_path = parsed.path if parsed.path != "/" else ""
return AnyHttpUrl(f"{parsed.scheme}://{parsed.netloc}/.well-known/oauth-protected-resource{resource_path}")
def create_protected_resource_routes(
resource_url: AnyHttpUrl,
authorization_servers: list[AnyHttpUrl],
scopes_supported: list[str] | None = None,
resource_name: str | None = None,
resource_documentation: AnyHttpUrl | None = None,
) -> list[Route]:
"""Create routes for OAuth 2.0 Protected Resource Metadata (RFC 9728).
Args:
resource_url: The URL of this resource server
authorization_servers: List of authorization servers that can issue tokens
scopes_supported: Optional list of scopes supported by this resource
resource_name: Optional human-readable name for this resource
resource_documentation: Optional URL to documentation for this resource
Returns:
List of Starlette routes for protected resource metadata
"""
metadata = ProtectedResourceMetadata(
resource=resource_url,
authorization_servers=authorization_servers,
scopes_supported=scopes_supported,
resource_name=resource_name,
resource_documentation=resource_documentation,
# bearer_methods_supported defaults to ["header"] in the model
)
handler = ProtectedResourceMetadataHandler(metadata)
# RFC 9728 §3.1: Register route at /.well-known/oauth-protected-resource + resource path
metadata_url = build_resource_metadata_url(resource_url)
# Extract just the path part for route registration
parsed = urlparse(str(metadata_url))
well_known_path = parsed.path
return [
Route(
well_known_path,
endpoint=cors_middleware(handler.handle, ["GET", "OPTIONS"]),
methods=["GET", "OPTIONS"],
)
]
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/routes.py",
"license": "MIT License",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:src/mcp/server/auth/settings.py | from pydantic import AnyHttpUrl, BaseModel, Field
class ClientRegistrationOptions(BaseModel):
enabled: bool = False
client_secret_expiry_seconds: int | None = None
valid_scopes: list[str] | None = None
default_scopes: list[str] | None = None
class RevocationOptions(BaseModel):
enabled: bool = False
class AuthSettings(BaseModel):
issuer_url: AnyHttpUrl = Field(
...,
description="OAuth authorization server URL that issues tokens for this resource server.",
)
service_documentation_url: AnyHttpUrl | None = None
client_registration_options: ClientRegistrationOptions | None = None
revocation_options: RevocationOptions | None = None
required_scopes: list[str] | None = None
# Resource Server settings (when operating as RS only)
resource_server_url: AnyHttpUrl | None = Field(
...,
description="The URL of the MCP server to be used as the resource identifier "
"and base route to look up OAuth Protected Resource Metadata.",
)
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/server/auth/settings.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
modelcontextprotocol/python-sdk:src/mcp/shared/auth.py | from typing import Any, Literal
from pydantic import AnyHttpUrl, AnyUrl, BaseModel, Field, field_validator
class OAuthToken(BaseModel):
"""See https://datatracker.ietf.org/doc/html/rfc6749#section-5.1"""
access_token: str
token_type: Literal["Bearer"] = "Bearer"
expires_in: int | None = None
scope: str | None = None
refresh_token: str | None = None
@field_validator("token_type", mode="before")
@classmethod
def normalize_token_type(cls, v: str | None) -> str | None:
if isinstance(v, str):
# Bearer is title-cased in the spec, so we normalize it
# https://datatracker.ietf.org/doc/html/rfc6750#section-4
return v.title()
return v # pragma: no cover
class InvalidScopeError(Exception):
def __init__(self, message: str):
self.message = message
class InvalidRedirectUriError(Exception):
def __init__(self, message: str):
self.message = message
class OAuthClientMetadata(BaseModel):
"""RFC 7591 OAuth 2.0 Dynamic Client Registration Metadata.
See https://datatracker.ietf.org/doc/html/rfc7591#section-2
"""
redirect_uris: list[AnyUrl] | None = Field(..., min_length=1)
# supported auth methods for the token endpoint
token_endpoint_auth_method: (
Literal["none", "client_secret_post", "client_secret_basic", "private_key_jwt"] | None
) = None
# supported grant_types of this implementation
grant_types: list[
Literal["authorization_code", "refresh_token", "urn:ietf:params:oauth:grant-type:jwt-bearer"] | str
] = [
"authorization_code",
"refresh_token",
]
# The MCP spec requires the "code" response type, but OAuth
# servers may also return additional types they support
response_types: list[str] = ["code"]
scope: str | None = None
# these fields are currently unused, but we support & store them for potential
# future use
client_name: str | None = None
client_uri: AnyHttpUrl | None = None
logo_uri: AnyHttpUrl | None = None
contacts: list[str] | None = None
tos_uri: AnyHttpUrl | None = None
policy_uri: AnyHttpUrl | None = None
jwks_uri: AnyHttpUrl | None = None
jwks: Any | None = None
software_id: str | None = None
software_version: str | None = None
def validate_scope(self, requested_scope: str | None) -> list[str] | None:
if requested_scope is None:
return None
requested_scopes = requested_scope.split(" ")
allowed_scopes = [] if self.scope is None else self.scope.split(" ")
for scope in requested_scopes:
if scope not in allowed_scopes: # pragma: no branch
raise InvalidScopeError(f"Client was not registered with scope {scope}")
return requested_scopes # pragma: no cover
def validate_redirect_uri(self, redirect_uri: AnyUrl | None) -> AnyUrl:
if redirect_uri is not None:
# Validate redirect_uri against client's registered redirect URIs
if self.redirect_uris is None or redirect_uri not in self.redirect_uris:
raise InvalidRedirectUriError(f"Redirect URI '{redirect_uri}' not registered for client")
return redirect_uri
elif self.redirect_uris is not None and len(self.redirect_uris) == 1:
return self.redirect_uris[0]
else:
raise InvalidRedirectUriError("redirect_uri must be specified when client has multiple registered URIs")
class OAuthClientInformationFull(OAuthClientMetadata):
"""RFC 7591 OAuth 2.0 Dynamic Client Registration full response
(client information plus metadata).
"""
client_id: str | None = None
client_secret: str | None = None
client_id_issued_at: int | None = None
client_secret_expires_at: int | None = None
class OAuthMetadata(BaseModel):
"""RFC 8414 OAuth 2.0 Authorization Server Metadata.
See https://datatracker.ietf.org/doc/html/rfc8414#section-2
"""
issuer: AnyHttpUrl
authorization_endpoint: AnyHttpUrl
token_endpoint: AnyHttpUrl
registration_endpoint: AnyHttpUrl | None = None
scopes_supported: list[str] | None = None
response_types_supported: list[str] = ["code"]
response_modes_supported: list[str] | None = None
grant_types_supported: list[str] | None = None
token_endpoint_auth_methods_supported: list[str] | None = None
token_endpoint_auth_signing_alg_values_supported: list[str] | None = None
service_documentation: AnyHttpUrl | None = None
ui_locales_supported: list[str] | None = None
op_policy_uri: AnyHttpUrl | None = None
op_tos_uri: AnyHttpUrl | None = None
revocation_endpoint: AnyHttpUrl | None = None
revocation_endpoint_auth_methods_supported: list[str] | None = None
revocation_endpoint_auth_signing_alg_values_supported: list[str] | None = None
introspection_endpoint: AnyHttpUrl | None = None
introspection_endpoint_auth_methods_supported: list[str] | None = None
introspection_endpoint_auth_signing_alg_values_supported: list[str] | None = None
code_challenge_methods_supported: list[str] | None = None
client_id_metadata_document_supported: bool | None = None
class ProtectedResourceMetadata(BaseModel):
"""RFC 9728 OAuth 2.0 Protected Resource Metadata.
See https://datatracker.ietf.org/doc/html/rfc9728#section-2
"""
resource: AnyHttpUrl
authorization_servers: list[AnyHttpUrl] = Field(..., min_length=1)
jwks_uri: AnyHttpUrl | None = None
scopes_supported: list[str] | None = None
bearer_methods_supported: list[str] | None = Field(default=["header"]) # MCP only supports header method
resource_signing_alg_values_supported: list[str] | None = None
resource_name: str | None = None
resource_documentation: AnyHttpUrl | None = None
resource_policy_uri: AnyHttpUrl | None = None
resource_tos_uri: AnyHttpUrl | None = None
# tls_client_certificate_bound_access_tokens default is False, but omitted here for clarity
tls_client_certificate_bound_access_tokens: bool | None = None
authorization_details_types_supported: list[str] | None = None
dpop_signing_alg_values_supported: list[str] | None = None
# dpop_bound_access_tokens_required default is False, but omitted here for clarity
dpop_bound_access_tokens_required: bool | None = None
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "src/mcp/shared/auth.py",
"license": "MIT License",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
modelcontextprotocol/python-sdk:tests/server/auth/middleware/test_auth_context.py | """Tests for the AuthContext middleware components."""
import time
import pytest
from starlette.types import Message, Receive, Scope, Send
from mcp.server.auth.middleware.auth_context import (
AuthContextMiddleware,
auth_context_var,
get_access_token,
)
from mcp.server.auth.middleware.bearer_auth import AuthenticatedUser
from mcp.server.auth.provider import AccessToken
class MockApp:
"""Mock ASGI app for testing."""
def __init__(self):
self.called = False
self.scope: Scope | None = None
self.receive: Receive | None = None
self.send: Send | None = None
self.access_token_during_call: AccessToken | None = None
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
self.called = True
self.scope = scope
self.receive = receive
self.send = send
# Check the context during the call
self.access_token_during_call = get_access_token()
@pytest.fixture
def valid_access_token() -> AccessToken:
"""Create a valid access token."""
return AccessToken(
token="valid_token",
client_id="test_client",
scopes=["read", "write"],
expires_at=int(time.time()) + 3600, # 1 hour from now
)
@pytest.mark.anyio
async def test_auth_context_middleware_with_authenticated_user(valid_access_token: AccessToken):
"""Test middleware with an authenticated user in scope."""
app = MockApp()
middleware = AuthContextMiddleware(app)
# Create an authenticated user
user = AuthenticatedUser(valid_access_token)
scope: Scope = {"type": "http", "user": user}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
async def send(message: Message) -> None: # pragma: no cover
pass
# Verify context is empty before middleware
assert auth_context_var.get() is None
assert get_access_token() is None
# Run the middleware
await middleware(scope, receive, send)
# Verify the app was called
assert app.called
assert app.scope == scope
assert app.receive == receive
assert app.send == send
# Verify the access token was available during the call
assert app.access_token_during_call == valid_access_token
# Verify context is reset after middleware
assert auth_context_var.get() is None
assert get_access_token() is None
@pytest.mark.anyio
async def test_auth_context_middleware_with_no_user():
"""Test middleware with no user in scope."""
app = MockApp()
middleware = AuthContextMiddleware(app)
scope: Scope = {"type": "http"} # No user
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
async def send(message: Message) -> None: # pragma: no cover
pass
# Verify context is empty before middleware
assert auth_context_var.get() is None
assert get_access_token() is None
# Run the middleware
await middleware(scope, receive, send)
# Verify the app was called
assert app.called
assert app.scope == scope
assert app.receive == receive
assert app.send == send
# Verify the access token was not available during the call
assert app.access_token_during_call is None
# Verify context is still empty after middleware
assert auth_context_var.get() is None
assert get_access_token() is None
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/auth/middleware/test_auth_context.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/auth/middleware/test_bearer_auth.py | """Tests for the BearerAuth middleware components."""
import time
from typing import Any, cast
import pytest
from starlette.authentication import AuthCredentials
from starlette.datastructures import Headers
from starlette.requests import Request
from starlette.types import Message, Receive, Scope, Send
from mcp.server.auth.middleware.bearer_auth import AuthenticatedUser, BearerAuthBackend, RequireAuthMiddleware
from mcp.server.auth.provider import AccessToken, OAuthAuthorizationServerProvider, ProviderTokenVerifier
class MockOAuthProvider:
"""Mock OAuth provider for testing.
This is a simplified version that only implements the methods needed for testing
the BearerAuthMiddleware components.
"""
def __init__(self):
self.tokens: dict[str, AccessToken] = {} # token -> AccessToken
def add_token(self, token: str, access_token: AccessToken) -> None:
"""Add a token to the provider."""
self.tokens[token] = access_token
async def load_access_token(self, token: str) -> AccessToken | None:
"""Load an access token."""
return self.tokens.get(token)
def add_token_to_provider(
provider: OAuthAuthorizationServerProvider[Any, Any, Any],
token: str,
access_token: AccessToken,
) -> None:
"""Helper function to add a token to a provider.
This is used to work around type checking issues with our mock provider.
"""
# We know this is actually a MockOAuthProvider
mock_provider = cast(MockOAuthProvider, provider)
mock_provider.add_token(token, access_token)
class MockApp:
"""Mock ASGI app for testing."""
def __init__(self):
self.called = False
self.scope: Scope | None = None
self.receive: Receive | None = None
self.send: Send | None = None
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
self.called = True
self.scope = scope
self.receive = receive
self.send = send
@pytest.fixture
def mock_oauth_provider() -> OAuthAuthorizationServerProvider[Any, Any, Any]:
"""Create a mock OAuth provider."""
# Use type casting to satisfy the type checker
return cast(OAuthAuthorizationServerProvider[Any, Any, Any], MockOAuthProvider())
@pytest.fixture
def valid_access_token() -> AccessToken:
"""Create a valid access token."""
return AccessToken(
token="valid_token",
client_id="test_client",
scopes=["read", "write"],
expires_at=int(time.time()) + 3600, # 1 hour from now
)
@pytest.fixture
def expired_access_token() -> AccessToken:
"""Create an expired access token."""
return AccessToken(
token="expired_token",
client_id="test_client",
scopes=["read"],
expires_at=int(time.time()) - 3600, # 1 hour ago
)
@pytest.fixture
def no_expiry_access_token() -> AccessToken:
"""Create an access token with no expiry."""
return AccessToken(
token="no_expiry_token",
client_id="test_client",
scopes=["read", "write"],
expires_at=None,
)
@pytest.mark.anyio
class TestBearerAuthBackend:
"""Tests for the BearerAuthBackend class."""
async def test_no_auth_header(self, mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any]):
"""Test authentication with no Authorization header."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
request = Request({"type": "http", "headers": []})
result = await backend.authenticate(request)
assert result is None
async def test_non_bearer_auth_header(self, mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any]):
"""Test authentication with non-Bearer Authorization header."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
request = Request(
{
"type": "http",
"headers": [(b"authorization", b"Basic dXNlcjpwYXNz")],
}
)
result = await backend.authenticate(request)
assert result is None
async def test_invalid_token(self, mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any]):
"""Test authentication with invalid token."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
request = Request(
{
"type": "http",
"headers": [(b"authorization", b"Bearer invalid_token")],
}
)
result = await backend.authenticate(request)
assert result is None
async def test_expired_token(
self,
mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any],
expired_access_token: AccessToken,
):
"""Test authentication with expired token."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
add_token_to_provider(mock_oauth_provider, "expired_token", expired_access_token)
request = Request(
{
"type": "http",
"headers": [(b"authorization", b"Bearer expired_token")],
}
)
result = await backend.authenticate(request)
assert result is None
async def test_valid_token(
self,
mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any],
valid_access_token: AccessToken,
):
"""Test authentication with valid token."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
add_token_to_provider(mock_oauth_provider, "valid_token", valid_access_token)
request = Request(
{
"type": "http",
"headers": [(b"authorization", b"Bearer valid_token")],
}
)
result = await backend.authenticate(request)
assert result is not None
credentials, user = result
assert isinstance(credentials, AuthCredentials)
assert isinstance(user, AuthenticatedUser)
assert credentials.scopes == ["read", "write"]
assert user.display_name == "test_client"
assert user.access_token == valid_access_token
assert user.scopes == ["read", "write"]
async def test_token_without_expiry(
self,
mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any],
no_expiry_access_token: AccessToken,
):
"""Test authentication with token that has no expiry."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
add_token_to_provider(mock_oauth_provider, "no_expiry_token", no_expiry_access_token)
request = Request(
{
"type": "http",
"headers": [(b"authorization", b"Bearer no_expiry_token")],
}
)
result = await backend.authenticate(request)
assert result is not None
credentials, user = result
assert isinstance(credentials, AuthCredentials)
assert isinstance(user, AuthenticatedUser)
assert credentials.scopes == ["read", "write"]
assert user.display_name == "test_client"
assert user.access_token == no_expiry_access_token
assert user.scopes == ["read", "write"]
async def test_lowercase_bearer_prefix(
self,
mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any],
valid_access_token: AccessToken,
):
"""Test with lowercase 'bearer' prefix in Authorization header"""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
add_token_to_provider(mock_oauth_provider, "valid_token", valid_access_token)
headers = Headers({"Authorization": "bearer valid_token"})
scope = {"type": "http", "headers": headers.raw}
request = Request(scope)
result = await backend.authenticate(request)
assert result is not None
credentials, user = result
assert isinstance(credentials, AuthCredentials)
assert isinstance(user, AuthenticatedUser)
assert credentials.scopes == ["read", "write"]
assert user.display_name == "test_client"
assert user.access_token == valid_access_token
async def test_mixed_case_bearer_prefix(
self,
mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any],
valid_access_token: AccessToken,
):
"""Test with mixed 'BeArEr' prefix in Authorization header"""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
add_token_to_provider(mock_oauth_provider, "valid_token", valid_access_token)
headers = Headers({"authorization": "BeArEr valid_token"})
scope = {"type": "http", "headers": headers.raw}
request = Request(scope)
result = await backend.authenticate(request)
assert result is not None
credentials, user = result
assert isinstance(credentials, AuthCredentials)
assert isinstance(user, AuthenticatedUser)
assert credentials.scopes == ["read", "write"]
assert user.display_name == "test_client"
assert user.access_token == valid_access_token
async def test_mixed_case_authorization_header(
self,
mock_oauth_provider: OAuthAuthorizationServerProvider[Any, Any, Any],
valid_access_token: AccessToken,
):
"""Test authentication with mixed 'Authorization' header."""
backend = BearerAuthBackend(token_verifier=ProviderTokenVerifier(mock_oauth_provider))
add_token_to_provider(mock_oauth_provider, "valid_token", valid_access_token)
headers = Headers({"AuThOrIzAtIoN": "BeArEr valid_token"})
scope = {"type": "http", "headers": headers.raw}
request = Request(scope)
result = await backend.authenticate(request)
assert result is not None
credentials, user = result
assert isinstance(credentials, AuthCredentials)
assert isinstance(user, AuthenticatedUser)
assert credentials.scopes == ["read", "write"]
assert user.display_name == "test_client"
assert user.access_token == valid_access_token
@pytest.mark.anyio
class TestRequireAuthMiddleware:
"""Tests for the RequireAuthMiddleware class."""
async def test_no_user(self):
"""Test middleware with no user in scope."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=["read"])
scope: Scope = {"type": "http"}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
sent_messages: list[Message] = []
async def send(message: Message) -> None:
sent_messages.append(message)
await middleware(scope, receive, send)
# Check that a 401 response was sent
assert len(sent_messages) == 2
assert sent_messages[0]["type"] == "http.response.start"
assert sent_messages[0]["status"] == 401
assert any(h[0] == b"www-authenticate" for h in sent_messages[0]["headers"])
assert not app.called
async def test_non_authenticated_user(self):
"""Test middleware with non-authenticated user in scope."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=["read"])
scope: Scope = {"type": "http", "user": object()}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
sent_messages: list[Message] = []
async def send(message: Message) -> None:
sent_messages.append(message)
await middleware(scope, receive, send)
# Check that a 401 response was sent
assert len(sent_messages) == 2
assert sent_messages[0]["type"] == "http.response.start"
assert sent_messages[0]["status"] == 401
assert any(h[0] == b"www-authenticate" for h in sent_messages[0]["headers"])
assert not app.called
async def test_missing_required_scope(self, valid_access_token: AccessToken):
"""Test middleware with user missing required scope."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=["admin"])
# Create a user with read/write scopes but not admin
user = AuthenticatedUser(valid_access_token)
auth = AuthCredentials(["read", "write"])
scope: Scope = {"type": "http", "user": user, "auth": auth}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
sent_messages: list[Message] = []
async def send(message: Message) -> None:
sent_messages.append(message)
await middleware(scope, receive, send)
# Check that a 403 response was sent
assert len(sent_messages) == 2
assert sent_messages[0]["type"] == "http.response.start"
assert sent_messages[0]["status"] == 403
assert any(h[0] == b"www-authenticate" for h in sent_messages[0]["headers"])
assert not app.called
async def test_no_auth_credentials(self, valid_access_token: AccessToken):
"""Test middleware with no auth credentials in scope."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=["read"])
# Create a user with read/write scopes
user = AuthenticatedUser(valid_access_token)
scope: Scope = {"type": "http", "user": user} # No auth credentials
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
sent_messages: list[Message] = []
async def send(message: Message) -> None:
sent_messages.append(message)
await middleware(scope, receive, send)
# Check that a 403 response was sent
assert len(sent_messages) == 2
assert sent_messages[0]["type"] == "http.response.start"
assert sent_messages[0]["status"] == 403
assert any(h[0] == b"www-authenticate" for h in sent_messages[0]["headers"])
assert not app.called
async def test_has_required_scopes(self, valid_access_token: AccessToken):
"""Test middleware with user having all required scopes."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=["read"])
# Create a user with read/write scopes
user = AuthenticatedUser(valid_access_token)
auth = AuthCredentials(["read", "write"])
scope: Scope = {"type": "http", "user": user, "auth": auth}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
async def send(message: Message) -> None: # pragma: no cover
pass
await middleware(scope, receive, send)
assert app.called
assert app.scope == scope
assert app.receive == receive
assert app.send == send
async def test_multiple_required_scopes(self, valid_access_token: AccessToken):
"""Test middleware with multiple required scopes."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=["read", "write"])
# Create a user with read/write scopes
user = AuthenticatedUser(valid_access_token)
auth = AuthCredentials(["read", "write"])
scope: Scope = {"type": "http", "user": user, "auth": auth}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
async def send(message: Message) -> None: # pragma: no cover
pass
await middleware(scope, receive, send)
assert app.called
assert app.scope == scope
assert app.receive == receive
assert app.send == send
async def test_no_required_scopes(self, valid_access_token: AccessToken):
"""Test middleware with no required scopes."""
app = MockApp()
middleware = RequireAuthMiddleware(app, required_scopes=[])
# Create a user with read/write scopes
user = AuthenticatedUser(valid_access_token)
auth = AuthCredentials(["read", "write"])
scope: Scope = {"type": "http", "user": user, "auth": auth}
# Create dummy async functions for receive and send
async def receive() -> Message: # pragma: no cover
return {"type": "http.request"}
async def send(message: Message) -> None: # pragma: no cover
pass
await middleware(scope, receive, send)
assert app.called
assert app.scope == scope
assert app.receive == receive
assert app.send == send
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/auth/middleware/test_bearer_auth.py",
"license": "MIT License",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/server/auth/test_error_handling.py | """Tests for OAuth error handling in the auth handlers."""
import base64
import hashlib
import secrets
import unittest.mock
from typing import Any
from urllib.parse import parse_qs, urlparse
import httpx
import pytest
from httpx import ASGITransport
from pydantic import AnyHttpUrl
from starlette.applications import Starlette
from mcp.server.auth.provider import AuthorizeError, RegistrationError, TokenError
from mcp.server.auth.routes import create_auth_routes
from mcp.server.auth.settings import ClientRegistrationOptions, RevocationOptions
from tests.server.mcpserver.auth.test_auth_integration import MockOAuthProvider
@pytest.fixture
def oauth_provider():
"""Return a MockOAuthProvider instance that can be configured to raise errors."""
return MockOAuthProvider()
@pytest.fixture
def app(oauth_provider: MockOAuthProvider):
# Enable client registration
client_registration_options = ClientRegistrationOptions(enabled=True)
revocation_options = RevocationOptions(enabled=True)
# Create auth routes
auth_routes = create_auth_routes(
oauth_provider,
issuer_url=AnyHttpUrl("http://localhost"),
client_registration_options=client_registration_options,
revocation_options=revocation_options,
)
# Create Starlette app with routes directly
return Starlette(routes=auth_routes)
@pytest.fixture
def client(app: Starlette):
transport = ASGITransport(app=app)
# Use base_url without a path since routes are directly on the app
return httpx.AsyncClient(transport=transport, base_url="http://localhost")
@pytest.fixture
def pkce_challenge():
"""Create a PKCE challenge with code_verifier and code_challenge."""
# Generate a code verifier
code_verifier = secrets.token_urlsafe(64)[:128]
# Create code challenge using S256 method
code_verifier_bytes = code_verifier.encode("ascii")
sha256 = hashlib.sha256(code_verifier_bytes).digest()
code_challenge = base64.urlsafe_b64encode(sha256).decode().rstrip("=")
return {"code_verifier": code_verifier, "code_challenge": code_challenge}
@pytest.fixture
async def registered_client(client: httpx.AsyncClient) -> dict[str, Any]:
"""Create and register a test client."""
# Default client metadata
client_metadata = {
"redirect_uris": ["https://client.example.com/callback"],
"token_endpoint_auth_method": "client_secret_post",
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code"],
"client_name": "Test Client",
}
response = await client.post("/register", json=client_metadata)
assert response.status_code == 201, f"Failed to register client: {response.content}"
client_info = response.json()
return client_info
@pytest.mark.anyio
async def test_registration_error_handling(client: httpx.AsyncClient, oauth_provider: MockOAuthProvider):
# Mock the register_client method to raise a registration error
with unittest.mock.patch.object(
oauth_provider,
"register_client",
side_effect=RegistrationError(
error="invalid_redirect_uri",
error_description="The redirect URI is invalid",
),
):
# Prepare a client registration request
client_data = {
"redirect_uris": ["https://client.example.com/callback"],
"token_endpoint_auth_method": "client_secret_post",
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code"],
"client_name": "Test Client",
}
# Send the registration request
response = await client.post(
"/register",
json=client_data,
)
# Verify the response
assert response.status_code == 400, response.content
data = response.json()
assert data["error"] == "invalid_redirect_uri"
assert data["error_description"] == "The redirect URI is invalid"
@pytest.mark.anyio
async def test_authorize_error_handling(
client: httpx.AsyncClient,
oauth_provider: MockOAuthProvider,
registered_client: dict[str, Any],
pkce_challenge: dict[str, str],
):
# Mock the authorize method to raise an authorize error
with unittest.mock.patch.object(
oauth_provider,
"authorize",
side_effect=AuthorizeError(error="access_denied", error_description="The user denied the request"),
):
# Register the client
client_id = registered_client["client_id"]
redirect_uri = registered_client["redirect_uris"][0]
# Prepare an authorization request
params = {
"client_id": client_id,
"redirect_uri": redirect_uri,
"response_type": "code",
"code_challenge": pkce_challenge["code_challenge"],
"code_challenge_method": "S256",
"state": "test_state",
}
# Send the authorization request
response = await client.get("/authorize", params=params)
# Verify the response is a redirect with error parameters
assert response.status_code == 302
redirect_url = response.headers["location"]
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url.query)
assert query_params["error"][0] == "access_denied"
assert "error_description" in query_params
assert query_params["state"][0] == "test_state"
@pytest.mark.anyio
async def test_token_error_handling_auth_code(
client: httpx.AsyncClient,
oauth_provider: MockOAuthProvider,
registered_client: dict[str, Any],
pkce_challenge: dict[str, str],
):
# Register the client and get an auth code
client_id = registered_client["client_id"]
client_secret = registered_client["client_secret"]
redirect_uri = registered_client["redirect_uris"][0]
# First get an authorization code
auth_response = await client.get(
"/authorize",
params={
"client_id": client_id,
"redirect_uri": redirect_uri,
"response_type": "code",
"code_challenge": pkce_challenge["code_challenge"],
"code_challenge_method": "S256",
"state": "test_state",
},
)
redirect_url = auth_response.headers["location"]
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url.query)
code = query_params["code"][0]
# Mock the exchange_authorization_code method to raise a token error
with unittest.mock.patch.object(
oauth_provider,
"exchange_authorization_code",
side_effect=TokenError(
error="invalid_grant",
error_description="The authorization code is invalid",
),
):
# Try to exchange the code for tokens
token_response = await client.post(
"/token",
data={
"grant_type": "authorization_code",
"code": code,
"redirect_uri": redirect_uri,
"client_id": client_id,
"client_secret": client_secret,
"code_verifier": pkce_challenge["code_verifier"],
},
)
# Verify the response
assert token_response.status_code == 400
data = token_response.json()
assert data["error"] == "invalid_grant"
assert data["error_description"] == "The authorization code is invalid"
@pytest.mark.anyio
async def test_token_error_handling_refresh_token(
client: httpx.AsyncClient,
oauth_provider: MockOAuthProvider,
registered_client: dict[str, Any],
pkce_challenge: dict[str, str],
):
# Register the client and get tokens
client_id = registered_client["client_id"]
client_secret = registered_client["client_secret"]
redirect_uri = registered_client["redirect_uris"][0]
# First get an authorization code
auth_response = await client.get(
"/authorize",
params={
"client_id": client_id,
"redirect_uri": redirect_uri,
"response_type": "code",
"code_challenge": pkce_challenge["code_challenge"],
"code_challenge_method": "S256",
"state": "test_state",
},
)
assert auth_response.status_code == 302, auth_response.content
redirect_url = auth_response.headers["location"]
parsed_url = urlparse(redirect_url)
query_params = parse_qs(parsed_url.query)
code = query_params["code"][0]
# Exchange the code for tokens
token_response = await client.post(
"/token",
data={
"grant_type": "authorization_code",
"code": code,
"redirect_uri": redirect_uri,
"client_id": client_id,
"client_secret": client_secret,
"code_verifier": pkce_challenge["code_verifier"],
},
)
tokens = token_response.json()
refresh_token = tokens["refresh_token"]
# Mock the exchange_refresh_token method to raise a token error
with unittest.mock.patch.object(
oauth_provider,
"exchange_refresh_token",
side_effect=TokenError(
error="invalid_scope",
error_description="The requested scope is invalid",
),
):
# Try to use the refresh token
refresh_response = await client.post(
"/token",
data={
"grant_type": "refresh_token",
"refresh_token": refresh_token,
"client_id": client_id,
"client_secret": client_secret,
},
)
# Verify the response
assert refresh_response.status_code == 400
data = refresh_response.json()
assert data["error"] == "invalid_scope"
assert data["error_description"] == "The requested scope is invalid"
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/server/auth/test_error_handling.py",
"license": "MIT License",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
modelcontextprotocol/python-sdk:tests/client/test_resource_cleanup.py | from typing import Any
from unittest.mock import patch
import anyio
import pytest
from pydantic import TypeAdapter
from mcp.shared.message import SessionMessage
from mcp.shared.session import BaseSession, RequestId, SendResultT
from mcp.types import ClientNotification, ClientRequest, ClientResult, EmptyResult, ErrorData, PingRequest
@pytest.mark.anyio
async def test_send_request_stream_cleanup():
"""Test that send_request properly cleans up streams when an exception occurs.
This test mocks out most of the session functionality to focus on stream cleanup.
"""
# Create a mock session with the minimal required functionality
class TestSession(BaseSession[ClientRequest, ClientNotification, ClientResult, Any, Any]):
async def _send_response(
self, request_id: RequestId, response: SendResultT | ErrorData
) -> None: # pragma: no cover
pass
@property
def _receive_request_adapter(self) -> TypeAdapter[Any]:
return TypeAdapter(object) # pragma: no cover
@property
def _receive_notification_adapter(self) -> TypeAdapter[Any]:
return TypeAdapter(object) # pragma: no cover
# Create streams
write_stream_send, write_stream_receive = anyio.create_memory_object_stream[SessionMessage](1)
read_stream_send, read_stream_receive = anyio.create_memory_object_stream[SessionMessage](1)
# Create the session
session = TestSession(read_stream_receive, write_stream_send)
# Create a test request
request = PingRequest()
# Patch the _write_stream.send method to raise an exception
async def mock_send(*args: Any, **kwargs: Any):
raise RuntimeError("Simulated network error")
# Record the response streams before the test
initial_stream_count = len(session._response_streams)
# Run the test with the patched method
with patch.object(session._write_stream, "send", mock_send):
with pytest.raises(RuntimeError):
await session.send_request(request, EmptyResult)
# Verify that no response streams were leaked
assert len(session._response_streams) == initial_stream_count, (
f"Expected {initial_stream_count} response streams after request, but found {len(session._response_streams)}"
)
# Clean up
await write_stream_send.aclose()
await write_stream_receive.aclose()
await read_stream_send.aclose()
await read_stream_receive.aclose()
| {
"repo_id": "modelcontextprotocol/python-sdk",
"file_path": "tests/client/test_resource_cleanup.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/ipam/ui/panels.py | from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from netbox.ui import actions, panels
class FHRPGroupAssignmentsPanel(panels.ObjectPanel):
"""
A panel which lists all FHRP group assignments for a given object.
"""
template_name = 'ipam/panels/fhrp_groups.html'
title = _('FHRP Groups')
actions = [
actions.AddObject(
'ipam.FHRPGroup',
url_params={
'return_url': lambda ctx: reverse(
'ipam:fhrpgroupassignment_add',
query={
'interface_type': ContentType.objects.get_for_model(ctx['object']).pk,
'interface_id': ctx['object'].pk,
},
),
},
label=_('Create Group'),
),
actions.AddObject(
'ipam.FHRPGroupAssignment',
url_params={
'interface_type': lambda ctx: ContentType.objects.get_for_model(ctx['object']).pk,
'interface_id': lambda ctx: ctx['object'].pk,
},
label=_('Assign Group'),
),
]
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/ipam/ui/panels.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/tests/test_tables.py | from django.test import RequestFactory, TestCase, tag
from users.models import Token
from users.tables import TokenTable
class TokenTableTest(TestCase):
@tag('regression')
def test_every_orderable_field_does_not_throw_exception(self):
tokens = Token.objects.all()
disallowed = {'actions'}
orderable_columns = [
column.name for column in TokenTable(tokens).columns
if column.orderable and column.name not in disallowed
]
fake_request = RequestFactory().get("/")
for col in orderable_columns:
for direction in ('-', ''):
with self.subTest(col=col, direction=direction):
table = TokenTable(tokens)
table.order_by = f'{direction}{col}'
table.as_html(fake_request)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/tests/test_tables.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/virtualization/ui/panels.py | from django.utils.translation import gettext_lazy as _
from netbox.ui import attrs, panels
class ClusterPanel(panels.ObjectAttributesPanel):
name = attrs.TextAttr('name')
type = attrs.RelatedObjectAttr('type', linkify=True)
status = attrs.ChoiceAttr('status')
description = attrs.TextAttr('description')
group = attrs.RelatedObjectAttr('group', linkify=True)
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
scope = attrs.GenericForeignKeyAttr('scope', linkify=True)
class VirtualMachinePanel(panels.ObjectAttributesPanel):
name = attrs.TextAttr('name')
status = attrs.ChoiceAttr('status')
start_on_boot = attrs.ChoiceAttr('start_on_boot')
role = attrs.RelatedObjectAttr('role', linkify=True)
platform = attrs.NestedObjectAttr('platform', linkify=True, max_depth=3)
description = attrs.TextAttr('description')
serial = attrs.TextAttr('serial', label=_('Serial number'), style='font-monospace', copy_button=True)
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
config_template = attrs.RelatedObjectAttr('config_template', linkify=True)
primary_ip4 = attrs.TemplatedAttr(
'primary_ip4',
label=_('Primary IPv4'),
template_name='virtualization/virtualmachine/attrs/ipaddress.html',
)
primary_ip6 = attrs.TemplatedAttr(
'primary_ip6',
label=_('Primary IPv6'),
template_name='virtualization/virtualmachine/attrs/ipaddress.html',
)
class VirtualMachineClusterPanel(panels.ObjectAttributesPanel):
title = _('Cluster')
site = attrs.RelatedObjectAttr('site', linkify=True, grouped_by='group')
cluster = attrs.RelatedObjectAttr('cluster', linkify=True)
cluster_type = attrs.RelatedObjectAttr('cluster.type', linkify=True)
device = attrs.RelatedObjectAttr('device', linkify=True)
class VirtualDiskPanel(panels.ObjectAttributesPanel):
virtual_machine = attrs.RelatedObjectAttr('virtual_machine', linkify=True, label=_('Virtual Machine'))
name = attrs.TextAttr('name')
size = attrs.TemplatedAttr('size', template_name='virtualization/virtualdisk/attrs/size.html')
description = attrs.TextAttr('description')
class VMInterfacePanel(panels.ObjectAttributesPanel):
virtual_machine = attrs.RelatedObjectAttr('virtual_machine', linkify=True, label=_('Virtual Machine'))
name = attrs.TextAttr('name')
enabled = attrs.BooleanAttr('enabled')
parent = attrs.RelatedObjectAttr('parent_interface', linkify=True)
bridge = attrs.RelatedObjectAttr('bridge', linkify=True)
description = attrs.TextAttr('description')
mtu = attrs.TextAttr('mtu', label=_('MTU'))
mode = attrs.ChoiceAttr('mode', label=_('802.1Q Mode'))
qinq_svlan = attrs.RelatedObjectAttr('qinq_svlan', linkify=True, label=_('Q-in-Q SVLAN'))
tunnel_termination = attrs.RelatedObjectAttr('tunnel_termination.tunnel', linkify=True, label=_('Tunnel'))
class VMInterfaceAddressingPanel(panels.ObjectAttributesPanel):
title = _('Addressing')
primary_mac_address = attrs.TextAttr(
'primary_mac_address', label=_('MAC Address'), style='font-monospace', copy_button=True
)
vrf = attrs.RelatedObjectAttr('vrf', linkify=True, label=_('VRF'))
vlan_translation_policy = attrs.RelatedObjectAttr(
'vlan_translation_policy', linkify=True, label=_('VLAN Translation')
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/virtualization/ui/panels.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/core/tests/test_data_backends.py | from unittest import skipIf
from unittest.mock import patch
from django.test import TestCase
from core.data_backends import url_has_embedded_credentials
try:
import dulwich # noqa: F401
DULWICH_AVAILABLE = True
except ImportError:
DULWICH_AVAILABLE = False
class URLEmbeddedCredentialsTests(TestCase):
def test_url_with_embedded_username(self):
self.assertTrue(url_has_embedded_credentials('https://myuser@bitbucket.org/workspace/repo.git'))
def test_url_without_embedded_username(self):
self.assertFalse(url_has_embedded_credentials('https://bitbucket.org/workspace/repo.git'))
def test_url_with_username_and_password(self):
self.assertTrue(url_has_embedded_credentials('https://user:pass@bitbucket.org/workspace/repo.git'))
def test_various_providers_with_embedded_username(self):
urls = [
'https://user@bitbucket.org/workspace/repo.git',
'https://user@github.com/owner/repo.git',
'https://deploy-key@gitlab.com/group/project.git',
'http://user@internal-git.example.com/repo.git',
]
for url in urls:
with self.subTest(url=url):
self.assertTrue(url_has_embedded_credentials(url))
def test_various_providers_without_embedded_username(self):
"""Various Git providers without embedded usernames."""
urls = [
'https://bitbucket.org/workspace/repo.git',
'https://github.com/owner/repo.git',
'https://gitlab.com/group/project.git',
'http://internal-git.example.com/repo.git',
]
for url in urls:
with self.subTest(url=url):
self.assertFalse(url_has_embedded_credentials(url))
def test_ssh_url(self):
# git@host:path format doesn't parse as having a username in the traditional sense
self.assertFalse(url_has_embedded_credentials('git@github.com:owner/repo.git'))
def test_file_url(self):
self.assertFalse(url_has_embedded_credentials('file:///path/to/repo'))
@skipIf(not DULWICH_AVAILABLE, "dulwich is not installed")
class GitBackendCredentialIntegrationTests(TestCase):
"""
Integration tests that verify GitBackend correctly applies credential logic.
These tests require dulwich to be installed and verify the full integration
of the credential handling in GitBackend.fetch().
"""
def _get_clone_kwargs(self, url, **params):
from core.data_backends import GitBackend
backend = GitBackend(url=url, **params)
with patch('dulwich.porcelain.clone') as mock_clone, \
patch('dulwich.porcelain.NoneStream'):
try:
with backend.fetch():
pass
except Exception:
pass
if mock_clone.called:
return mock_clone.call_args.kwargs
return {}
def test_url_with_embedded_username_skips_explicit_credentials(self):
kwargs = self._get_clone_kwargs(
url='https://myuser@bitbucket.org/workspace/repo.git',
username='myuser',
password='my-api-key'
)
self.assertEqual(kwargs.get('username'), None)
self.assertEqual(kwargs.get('password'), None)
def test_url_without_embedded_username_passes_explicit_credentials(self):
kwargs = self._get_clone_kwargs(
url='https://bitbucket.org/workspace/repo.git',
username='myuser',
password='my-api-key'
)
self.assertEqual(kwargs.get('username'), 'myuser')
self.assertEqual(kwargs.get('password'), 'my-api-key')
def test_url_with_embedded_username_no_explicit_credentials(self):
kwargs = self._get_clone_kwargs(
url='https://myuser@bitbucket.org/workspace/repo.git'
)
self.assertEqual(kwargs.get('username'), None)
self.assertEqual(kwargs.get('password'), None)
def test_public_repo_no_credentials(self):
kwargs = self._get_clone_kwargs(
url='https://github.com/public/repo.git'
)
self.assertEqual(kwargs.get('username'), None)
self.assertEqual(kwargs.get('password'), None)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/tests/test_data_backends.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/graphql/pagination.py | import strawberry
from strawberry.types.unset import UNSET
from strawberry_django.pagination import _QS, apply
__all__ = (
'OffsetPaginationInfo',
'OffsetPaginationInput',
'apply_pagination',
)
@strawberry.type
class OffsetPaginationInfo:
offset: int = 0
limit: int | None = UNSET
start: int | None = UNSET
@strawberry.input
class OffsetPaginationInput(OffsetPaginationInfo):
"""
Customized implementation of OffsetPaginationInput to support cursor-based pagination.
"""
pass
def apply_pagination(
self,
queryset: _QS,
pagination: OffsetPaginationInput | None = None,
*,
related_field_id: str | None = None,
) -> _QS:
"""
Replacement for the `apply_pagination()` method on StrawberryDjangoField to support cursor-based pagination.
"""
if pagination is not None and pagination.start not in (None, UNSET):
if pagination.offset:
raise ValueError('Cannot specify both `start` and `offset` in pagination.')
if pagination.start < 0:
raise ValueError('`start` must be greater than or equal to zero.')
# Filter the queryset to include only records with a primary key greater than or equal to the start value,
# and force ordering by primary key to ensure consistent pagination across all records.
queryset = queryset.filter(pk__gte=pagination.start).order_by('pk')
# Ignore `offset` when `start` is set
pagination.offset = 0
return apply(pagination, queryset, related_field_id=related_field_id)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/graphql/pagination.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/ipam/tests/test_tables.py | from django.test import RequestFactory, TestCase
from netaddr import IPNetwork
from ipam.models import IPAddress, IPRange, Prefix
from ipam.tables import AnnotatedIPAddressTable
from ipam.utils import annotate_ip_space
class AnnotatedIPAddressTableTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.prefix = Prefix.objects.create(
prefix=IPNetwork('10.1.1.0/24'),
status='active'
)
cls.ip_address = IPAddress.objects.create(
address='10.1.1.1/24',
status='active'
)
cls.ip_range = IPRange.objects.create(
start_address=IPNetwork('10.1.1.2/24'),
end_address=IPNetwork('10.1.1.10/24'),
status='active'
)
def test_ipaddress_has_checkbox_iprange_does_not(self):
data = annotate_ip_space(self.prefix)
table = AnnotatedIPAddressTable(data, orderable=False)
table.columns.show('pk')
request = RequestFactory().get('/')
html = table.as_html(request)
ipaddress_checkbox_count = html.count(f'name="pk" value="{self.ip_address.pk}"')
self.assertEqual(ipaddress_checkbox_count, 1)
iprange_checkbox_count = html.count(f'name="pk" value="{self.ip_range.pk}"')
self.assertEqual(iprange_checkbox_count, 0)
def test_annotate_ip_space_ipv4_non_pool_excludes_network_and_broadcast(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('192.0.2.0/29'), # 8 addresses total
status='active',
is_pool=False,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# /29 non-pool: exclude .0 (network) and .7 (broadcast)
self.assertEqual(available.first_ip, '192.0.2.1/29')
self.assertEqual(available.size, 6)
def test_annotate_ip_space_ipv4_pool_includes_network_and_broadcast(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('192.0.2.8/29'), # 8 addresses total
status='active',
is_pool=True,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# Pool: all addresses are usable, including network/broadcast
self.assertEqual(available.first_ip, '192.0.2.8/29')
self.assertEqual(available.size, 8)
def test_annotate_ip_space_ipv4_31_includes_all_ips(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('192.0.2.16/31'), # 2 addresses total
status='active',
is_pool=False,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# /31: fully usable
self.assertEqual(available.first_ip, '192.0.2.16/31')
self.assertEqual(available.size, 2)
def test_annotate_ip_space_ipv4_32_includes_single_ip(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('192.0.2.100/32'), # 1 address total
status='active',
is_pool=False,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# /32: single usable address
self.assertEqual(available.first_ip, '192.0.2.100/32')
self.assertEqual(available.size, 1)
def test_annotate_ip_space_ipv6_non_pool_excludes_anycast_first_ip(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('2001:db8::/126'), # 4 addresses total
status='active',
is_pool=False,
)
data = annotate_ip_space(prefix)
# No child records -> expect one AvailableIPSpace entry
self.assertEqual(len(data), 1)
available = data[0]
# For IPv6 non-pool prefixes (except /127-/128), the first address is reserved (subnet-router anycast)
self.assertEqual(available.first_ip, '2001:db8::1/126')
self.assertEqual(available.size, 3) # 4 total - 1 reserved anycast
def test_annotate_ip_space_ipv6_127_includes_all_ips(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('2001:db8::/127'), # 2 addresses total
status='active',
is_pool=False,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# /127 is fully usable (no anycast exclusion)
self.assertEqual(available.first_ip, '2001:db8::/127')
self.assertEqual(available.size, 2)
def test_annotate_ip_space_ipv6_128_includes_single_ip(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('2001:db8::1/128'), # 1 address total
status='active',
is_pool=False,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# /128 is fully usable (single host address)
self.assertEqual(available.first_ip, '2001:db8::1/128')
self.assertEqual(available.size, 1)
def test_annotate_ip_space_ipv6_pool_includes_anycast_first_ip(self):
prefix = Prefix.objects.create(
prefix=IPNetwork('2001:db8:1::/126'), # 4 addresses total
status='active',
is_pool=True,
)
data = annotate_ip_space(prefix)
self.assertEqual(len(data), 1)
available = data[0]
# Pools are fully usable
self.assertEqual(available.first_ip, '2001:db8:1::/126')
self.assertEqual(available.size, 4)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/ipam/tests/test_tables.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/core/graphql/enums.py | import strawberry
from core.choices import *
__all__ = (
'DataSourceStatusEnum',
'ObjectChangeActionEnum',
)
DataSourceStatusEnum = strawberry.enum(DataSourceStatusChoices.as_enum(prefix='status'))
ObjectChangeActionEnum = strawberry.enum(ObjectChangeActionChoices.as_enum(prefix='action'))
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/graphql/enums.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/dcim/cable_profiles.py | from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from dcim.choices import CableEndChoices
from dcim.models import CableTermination
class BaseCableProfile:
"""Base class for representing a cable profile."""
# Mappings of connectors to the number of positions presented by each, at either end of the cable. For example, a
# 12-strand MPO fiber cable would have one connector at either end with six positions (six bidirectional fiber
# pairs).
a_connectors = {}
b_connectors = {}
# Defined a mapping of A/B connector & position pairings. If not defined, all positions are presumed to be
# symmetrical (i.e. 1:1 on side A maps to 1:1 on side B). If defined, it must be constructed as a dictionary of
# two-item tuples, e.g. {(1, 1): (1, 1)}.
_mapping = None
def clean(self, cable):
# Enforce maximum terminations limits
a_terminations_count = len(cable.a_terminations)
b_terminations_count = len(cable.b_terminations)
max_a_terminations = len(self.a_connectors)
max_b_terminations = len(self.b_connectors)
if a_terminations_count > max_a_terminations:
raise ValidationError({
'a_terminations': _(
'A side of cable has {count} terminations but only {max} are permitted for profile {profile}'
).format(
count=a_terminations_count,
profile=cable.get_profile_display(),
max=max_a_terminations,
)
})
if b_terminations_count > max_b_terminations:
raise ValidationError({
'b_terminations': _(
'B side of cable has {count} terminations but only {max} are permitted for profile {profile}'
).format(
count=b_terminations_count,
profile=cable.get_profile_display(),
max=max_b_terminations,
)
})
def get_mapped_position(self, side, connector, position):
"""
Return the mapped far-end connector & position for a given cable end the local connector & position.
"""
# By default, assume all positions are symmetrical.
if self._mapping:
return self._mapping.get((connector, position))
return connector, position
def get_peer_termination(self, termination, position):
"""
Given a terminating object, return the peer terminating object (if any) on the opposite end of the cable.
"""
try:
connector, position = self.get_mapped_position(
termination.cable_end,
termination.cable_connector,
position
)
except TypeError:
raise ValueError(
f"Could not map connector {termination.cable_connector} position {position} on side "
f"{termination.cable_end}"
)
try:
ct = CableTermination.objects.get(
cable=termination.cable,
cable_end=termination.opposite_cable_end,
connector=connector,
positions__contains=[position],
)
return ct.termination, position
except CableTermination.DoesNotExist:
return None, None
@staticmethod
def get_position_list(n):
"""Return a list of integers from 1 to n, inclusive."""
return list(range(1, n + 1))
# Profile naming:
# - Single: One connector per side, with one or more positions
# - Trunk: Two or more connectors per side, with one or more positions per connector
# - Breakout: One or more connectors on the A side which map to a greater number of B side connectors
# - Shuffle: A cable with nonlinear position mappings between sides
class Single1C1PCableProfile(BaseCableProfile):
a_connectors = {
1: 1,
}
b_connectors = a_connectors
class Single1C2PCableProfile(BaseCableProfile):
a_connectors = {
1: 2,
}
b_connectors = a_connectors
class Single1C4PCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
}
b_connectors = a_connectors
class Single1C6PCableProfile(BaseCableProfile):
a_connectors = {
1: 6,
}
b_connectors = a_connectors
class Single1C8PCableProfile(BaseCableProfile):
a_connectors = {
1: 8,
}
b_connectors = a_connectors
class Single1C12PCableProfile(BaseCableProfile):
a_connectors = {
1: 12,
}
b_connectors = a_connectors
class Single1C16PCableProfile(BaseCableProfile):
a_connectors = {
1: 16,
}
b_connectors = a_connectors
class Trunk2C1PCableProfile(BaseCableProfile):
a_connectors = {
1: 1,
2: 1,
}
b_connectors = a_connectors
class Trunk2C2PCableProfile(BaseCableProfile):
a_connectors = {
1: 2,
2: 2,
}
b_connectors = a_connectors
class Trunk2C4PCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
2: 4,
}
b_connectors = a_connectors
class Trunk2C6PCableProfile(BaseCableProfile):
a_connectors = {
1: 6,
2: 6,
}
b_connectors = a_connectors
class Trunk2C8PCableProfile(BaseCableProfile):
a_connectors = {
1: 8,
2: 8,
}
b_connectors = a_connectors
class Trunk2C12PCableProfile(BaseCableProfile):
a_connectors = {
1: 12,
2: 12,
}
b_connectors = a_connectors
class Trunk4C1PCableProfile(BaseCableProfile):
a_connectors = {
1: 1,
2: 1,
3: 1,
4: 1,
}
b_connectors = a_connectors
class Trunk4C2PCableProfile(BaseCableProfile):
a_connectors = {
1: 2,
2: 2,
3: 2,
4: 2,
}
b_connectors = a_connectors
class Trunk4C4PCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
2: 4,
3: 4,
4: 4,
}
b_connectors = a_connectors
class Trunk4C6PCableProfile(BaseCableProfile):
a_connectors = {
1: 6,
2: 6,
3: 6,
4: 6,
}
b_connectors = a_connectors
class Trunk4C8PCableProfile(BaseCableProfile):
a_connectors = {
1: 8,
2: 8,
3: 8,
4: 8,
}
b_connectors = a_connectors
class Trunk8C4PCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
2: 4,
3: 4,
4: 4,
5: 4,
6: 4,
7: 4,
8: 4,
}
b_connectors = a_connectors
class Breakout1C4Px4C1PCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
}
b_connectors = {
1: 1,
2: 1,
3: 1,
4: 1,
}
_mapping = {
(1, 1): (1, 1),
(1, 2): (2, 1),
(1, 3): (3, 1),
(1, 4): (4, 1),
(2, 1): (1, 2),
(3, 1): (1, 3),
(4, 1): (1, 4),
}
class Breakout1C6Px6C1PCableProfile(BaseCableProfile):
a_connectors = {
1: 6,
}
b_connectors = {
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
}
_mapping = {
(1, 1): (1, 1),
(1, 2): (2, 1),
(1, 3): (3, 1),
(1, 4): (4, 1),
(1, 5): (5, 1),
(1, 6): (6, 1),
(2, 1): (1, 2),
(3, 1): (1, 3),
(4, 1): (1, 4),
(5, 1): (1, 5),
(6, 1): (1, 6),
}
class Trunk2C4PShuffleCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
2: 4,
}
b_connectors = a_connectors
_mapping = {
(1, 1): (1, 1),
(1, 2): (1, 2),
(1, 3): (2, 1),
(1, 4): (2, 2),
(2, 1): (1, 3),
(2, 2): (1, 4),
(2, 3): (2, 3),
(2, 4): (2, 4),
}
class Trunk4C4PShuffleCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
2: 4,
3: 4,
4: 4,
}
b_connectors = a_connectors
_mapping = {
(1, 1): (1, 1),
(1, 2): (2, 1),
(1, 3): (3, 1),
(1, 4): (4, 1),
(2, 1): (1, 2),
(2, 2): (2, 2),
(2, 3): (3, 2),
(2, 4): (4, 2),
(3, 1): (1, 3),
(3, 2): (2, 3),
(3, 3): (3, 3),
(3, 4): (4, 3),
(4, 1): (1, 4),
(4, 2): (2, 4),
(4, 3): (3, 4),
(4, 4): (4, 4),
}
class Breakout2C4Px8C1PShuffleCableProfile(BaseCableProfile):
a_connectors = {
1: 4,
2: 4,
}
b_connectors = {
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
}
_a_mapping = {
(1, 1): (1, 1),
(1, 2): (2, 1),
(1, 3): (5, 1),
(1, 4): (6, 1),
(2, 1): (3, 1),
(2, 2): (4, 1),
(2, 3): (7, 1),
(2, 4): (8, 1),
}
_b_mapping = {
(1, 1): (1, 1),
(2, 1): (1, 2),
(3, 1): (2, 1),
(4, 1): (2, 2),
(5, 1): (1, 3),
(6, 1): (1, 4),
(7, 1): (2, 3),
(8, 1): (2, 4),
}
def get_mapped_position(self, side, connector, position):
if side.upper() == CableEndChoices.SIDE_A:
return self._a_mapping.get((connector, position))
return self._b_mapping.get((connector, position))
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/dcim/cable_profiles.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/dcim/models/base.py | from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from dcim.constants import PORT_POSITION_MAX, PORT_POSITION_MIN
__all__ = (
'PortMappingBase',
)
class PortMappingBase(models.Model):
"""
Base class for PortMapping and PortTemplateMapping
"""
front_port_position = models.PositiveSmallIntegerField(
default=1,
validators=(
MinValueValidator(PORT_POSITION_MIN),
MaxValueValidator(PORT_POSITION_MAX),
),
)
rear_port_position = models.PositiveSmallIntegerField(
default=1,
validators=(
MinValueValidator(PORT_POSITION_MIN),
MaxValueValidator(PORT_POSITION_MAX),
),
)
_netbox_private = True
class Meta:
abstract = True
constraints = (
models.UniqueConstraint(
fields=('front_port', 'front_port_position'),
name='%(app_label)s_%(class)s_unique_front_port_position'
),
models.UniqueConstraint(
fields=('rear_port', 'rear_port_position'),
name='%(app_label)s_%(class)s_unique_rear_port_position'
),
)
def clean(self):
super().clean()
# Validate rear port position
if self.rear_port_position > self.rear_port.positions:
raise ValidationError({
"rear_port_position": _(
"Invalid rear port position ({rear_port_position}): Rear port {name} has only {positions} "
"positions."
).format(
rear_port_position=self.rear_port_position,
name=self.rear_port.name,
positions=self.rear_port.positions
)
})
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/dcim/models/base.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/dcim/tests/test_cablepaths2.py | from unittest import skip
from circuits.models import CircuitTermination
from dcim.choices import CableProfileChoices
from dcim.models import *
from dcim.svg import CableTraceSVG
from dcim.tests.utils import CablePathTestCase
class CablePathTests(CablePathTestCase):
"""
Test the creation of CablePaths for Cables with different profiles applied.
Tests are numbered as follows:
1XX: Test direct connections using each profile
2XX: Topology tests replicated from the legacy test case and adapted to use profiles
"""
def test_101_cable_profile_single_1c1p(self):
"""
[IF1] --C1-- [IF2]
Cable profile: Single connector, single position
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
]
# Create cable 1
cable1 = Cable(
profile=CableProfileChoices.SINGLE_1C1P,
a_terminations=[interfaces[0]],
b_terminations=[interfaces[1]],
)
cable1.clean()
cable1.save()
path1 = self.assertPathExists(
(interfaces[0], cable1, interfaces[1]),
is_complete=True,
is_active=True
)
path2 = self.assertPathExists(
(interfaces[1], cable1, interfaces[0]),
is_complete=True,
is_active=True
)
self.assertEqual(CablePath.objects.count(), 2)
interfaces[0].refresh_from_db()
interfaces[1].refresh_from_db()
self.assertPathIsSet(interfaces[0], path1)
self.assertPathIsSet(interfaces[1], path2)
self.assertEqual(interfaces[0].cable_connector, 1)
self.assertEqual(interfaces[0].cable_positions, [1])
self.assertEqual(interfaces[1].cable_connector, 1)
self.assertEqual(interfaces[1].cable_positions, [1])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
# Delete cable 1
cable1.delete()
# Check that all CablePaths have been deleted
self.assertEqual(CablePath.objects.count(), 0)
def test_102_cable_profile_single_1c2p(self):
"""
[IF1] --C1-- [FP1][RP1] --C3-- [RP2][FP3] --C4-- [IF3]
[IF2] --C2-- [FP2] [FP4] --C5-- [IF4]
Cable profile: Single connector, multiple positions
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
]
rear_ports = [
RearPort.objects.create(device=self.device, name='Rear Port 1', positions=2),
RearPort.objects.create(device=self.device, name='Rear Port 2', positions=2),
]
front_ports = [
FrontPort.objects.create(device=self.device, name='Front Port 1'),
FrontPort.objects.create(device=self.device, name='Front Port 2'),
FrontPort.objects.create(device=self.device, name='Front Port 3'),
FrontPort.objects.create(device=self.device, name='Front Port 4'),
]
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=front_ports[0],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[1],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=front_ports[2],
front_port_position=1,
rear_port=rear_ports[1],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[3],
front_port_position=1,
rear_port=rear_ports[1],
rear_port_position=2,
),
])
# Create cables
cable1 = Cable(
a_terminations=[interfaces[0]],
b_terminations=[front_ports[0]],
)
cable1.clean()
cable1.save()
cable2 = Cable(
a_terminations=[interfaces[1]],
b_terminations=[front_ports[1]],
)
cable2.clean()
cable2.save()
cable3 = Cable(
profile=CableProfileChoices.SINGLE_1C2P,
a_terminations=[rear_ports[0]],
b_terminations=[rear_ports[1]],
)
cable3.clean()
cable3.save()
cable4 = Cable(
a_terminations=[interfaces[2]],
b_terminations=[front_ports[2]],
)
cable4.clean()
cable4.save()
cable5 = Cable(
a_terminations=[interfaces[3]],
b_terminations=[front_ports[3]],
)
cable5.clean()
cable5.save()
path1 = self.assertPathExists(
(
interfaces[0], cable1, front_ports[0], rear_ports[0], cable3, rear_ports[1], front_ports[2], cable4,
interfaces[2],
),
is_complete=True,
is_active=True
)
path2 = self.assertPathExists(
(
interfaces[1], cable2, front_ports[1], rear_ports[0], cable3, rear_ports[1], front_ports[3], cable5,
interfaces[3],
),
is_complete=True,
is_active=True
)
path3 = self.assertPathExists(
(
interfaces[2], cable4, front_ports[2], rear_ports[1], cable3, rear_ports[0], front_ports[0], cable1,
interfaces[0],
),
is_complete=True,
is_active=True
)
path4 = self.assertPathExists(
(
interfaces[3], cable5, front_ports[3], rear_ports[1], cable3, rear_ports[0], front_ports[1], cable2,
interfaces[1],
),
is_complete=True,
is_active=True
)
self.assertEqual(CablePath.objects.count(), 4)
for iface in interfaces:
iface.refresh_from_db()
self.assertPathIsSet(interfaces[0], path1)
self.assertPathIsSet(interfaces[1], path2)
self.assertPathIsSet(interfaces[2], path3)
self.assertPathIsSet(interfaces[3], path4)
for rear_port in rear_ports:
rear_port.refresh_from_db()
self.assertEqual(rear_ports[0].cable_connector, 1)
self.assertEqual(rear_ports[0].cable_positions, [1, 2])
self.assertEqual(rear_ports[1].cable_connector, 1)
self.assertEqual(rear_ports[1].cable_positions, [1, 2])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_103_cable_profile_trunk_2c1p(self):
"""
[IF1] --C1-- [IF3]
[IF2] [IF4]
Cable profile: Multiple connectors, single position
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
]
# Create cable 1
cable1 = Cable(
profile=CableProfileChoices.TRUNK_2C1P,
a_terminations=[interfaces[0], interfaces[1]],
b_terminations=[interfaces[2], interfaces[3]],
)
cable1.clean()
cable1.save()
path1 = self.assertPathExists(
(interfaces[0], cable1, interfaces[2]),
is_complete=True,
is_active=True
)
path2 = self.assertPathExists(
(interfaces[1], cable1, interfaces[3]),
is_complete=True,
is_active=True
)
path3 = self.assertPathExists(
(interfaces[2], cable1, interfaces[0]),
is_complete=True,
is_active=True
)
path4 = self.assertPathExists(
(interfaces[3], cable1, interfaces[1]),
is_complete=True,
is_active=True
)
self.assertEqual(CablePath.objects.count(), 4)
for interface in interfaces:
interface.refresh_from_db()
self.assertPathIsSet(interfaces[0], path1)
self.assertPathIsSet(interfaces[1], path2)
self.assertPathIsSet(interfaces[2], path3)
self.assertPathIsSet(interfaces[3], path4)
self.assertEqual(interfaces[0].cable_connector, 1)
self.assertEqual(interfaces[0].cable_positions, [1])
self.assertEqual(interfaces[1].cable_connector, 2)
self.assertEqual(interfaces[1].cable_positions, [1])
self.assertEqual(interfaces[2].cable_connector, 1)
self.assertEqual(interfaces[2].cable_positions, [1])
self.assertEqual(interfaces[3].cable_connector, 2)
self.assertEqual(interfaces[3].cable_positions, [1])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
# Delete cable 1
cable1.delete()
# Check that all CablePaths have been deleted
self.assertEqual(CablePath.objects.count(), 0)
def test_104_cable_profile_trunk_2c2p(self):
"""
[IF1] --C1-- [FP1][RP1] --C9-- [RP3][FP5] --C5-- [IF5]
[IF2] --C2-- [FP2] [FP6] --C6-- [IF6]
[IF3] --C3-- [FP3][RP2] [RP4][FP7] --C7-- [IF7]
[IF4] --C4-- [FP4] [FP8] --C8-- [IF8]
Cable profile: Multiple connectors, multiple positions
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
Interface.objects.create(device=self.device, name='Interface 5'),
Interface.objects.create(device=self.device, name='Interface 6'),
Interface.objects.create(device=self.device, name='Interface 7'),
Interface.objects.create(device=self.device, name='Interface 8'),
]
rear_ports = [
RearPort.objects.create(device=self.device, name='Rear Port 1', positions=2),
RearPort.objects.create(device=self.device, name='Rear Port 2', positions=2),
RearPort.objects.create(device=self.device, name='Rear Port 3', positions=2),
RearPort.objects.create(device=self.device, name='Rear Port 4', positions=2),
]
front_ports = [
FrontPort.objects.create(device=self.device, name='Front Port 1'),
FrontPort.objects.create(device=self.device, name='Front Port 2'),
FrontPort.objects.create(device=self.device, name='Front Port 3'),
FrontPort.objects.create(device=self.device, name='Front Port 4'),
FrontPort.objects.create(device=self.device, name='Front Port 5'),
FrontPort.objects.create(device=self.device, name='Front Port 6'),
FrontPort.objects.create(device=self.device, name='Front Port 7'),
FrontPort.objects.create(device=self.device, name='Front Port 8'),
]
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=front_ports[0],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[1],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=front_ports[2],
front_port_position=1,
rear_port=rear_ports[1],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[3],
front_port_position=1,
rear_port=rear_ports[1],
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=front_ports[4],
front_port_position=1,
rear_port=rear_ports[2],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[5],
front_port_position=1,
rear_port=rear_ports[2],
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=front_ports[6],
front_port_position=1,
rear_port=rear_ports[3],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[7],
front_port_position=1,
rear_port=rear_ports[3],
rear_port_position=2,
),
])
# Create cables
cable1 = Cable(a_terminations=[interfaces[0]], b_terminations=[front_ports[0]])
cable1.clean()
cable1.save()
cable2 = Cable(a_terminations=[interfaces[1]], b_terminations=[front_ports[1]])
cable2.clean()
cable2.save()
cable3 = Cable(a_terminations=[interfaces[2]], b_terminations=[front_ports[2]])
cable3.clean()
cable3.save()
cable4 = Cable(a_terminations=[interfaces[3]], b_terminations=[front_ports[3]])
cable4.clean()
cable4.save()
cable5 = Cable(a_terminations=[interfaces[4]], b_terminations=[front_ports[4]])
cable5.clean()
cable5.save()
cable6 = Cable(a_terminations=[interfaces[5]], b_terminations=[front_ports[5]])
cable6.clean()
cable6.save()
cable7 = Cable(a_terminations=[interfaces[6]], b_terminations=[front_ports[6]])
cable7.clean()
cable7.save()
cable8 = Cable(a_terminations=[interfaces[7]], b_terminations=[front_ports[7]])
cable8.clean()
cable8.save()
cable9 = Cable(
profile=CableProfileChoices.TRUNK_2C2P,
a_terminations=[rear_ports[0], rear_ports[1]],
b_terminations=[rear_ports[2], rear_ports[3]]
)
cable9.clean()
cable9.save()
path1 = self.assertPathExists(
(
interfaces[0], cable1, front_ports[0], rear_ports[0], cable9, rear_ports[2], front_ports[4], cable5,
interfaces[4],
),
is_complete=True,
is_active=True
)
path2 = self.assertPathExists(
(
interfaces[1], cable2, front_ports[1], rear_ports[0], cable9, rear_ports[2], front_ports[5], cable6,
interfaces[5],
),
is_complete=True,
is_active=True
)
path3 = self.assertPathExists(
(
interfaces[2], cable3, front_ports[2], rear_ports[1], cable9, rear_ports[3], front_ports[6], cable7,
interfaces[6],
),
is_complete=True,
is_active=True
)
path4 = self.assertPathExists(
(
interfaces[3], cable4, front_ports[3], rear_ports[1], cable9, rear_ports[3], front_ports[7], cable8,
interfaces[7],
),
is_complete=True,
is_active=True
)
path5 = self.assertPathExists(
(
interfaces[4], cable5, front_ports[4], rear_ports[2], cable9, rear_ports[0], front_ports[0], cable1,
interfaces[0],
),
is_complete=True,
is_active=True
)
path6 = self.assertPathExists(
(
interfaces[5], cable6, front_ports[5], rear_ports[2], cable9, rear_ports[0], front_ports[1], cable2,
interfaces[1],
),
is_complete=True,
is_active=True
)
path7 = self.assertPathExists(
(
interfaces[6], cable7, front_ports[6], rear_ports[3], cable9, rear_ports[1], front_ports[2], cable3,
interfaces[2],
),
is_complete=True,
is_active=True
)
path8 = self.assertPathExists(
(
interfaces[7], cable8, front_ports[7], rear_ports[3], cable9, rear_ports[1], front_ports[3], cable4,
interfaces[3],
),
is_complete=True,
is_active=True
)
self.assertEqual(CablePath.objects.count(), 8)
for iface in interfaces:
iface.refresh_from_db()
self.assertPathIsSet(interfaces[0], path1)
self.assertPathIsSet(interfaces[1], path2)
self.assertPathIsSet(interfaces[2], path3)
self.assertPathIsSet(interfaces[3], path4)
self.assertPathIsSet(interfaces[4], path5)
self.assertPathIsSet(interfaces[5], path6)
self.assertPathIsSet(interfaces[6], path7)
self.assertPathIsSet(interfaces[7], path8)
for rear_port in rear_ports:
rear_port.refresh_from_db()
self.assertEqual(rear_ports[0].cable_connector, 1)
self.assertEqual(rear_ports[0].cable_positions, [1, 2])
self.assertEqual(rear_ports[1].cable_connector, 2)
self.assertEqual(rear_ports[1].cable_positions, [1, 2])
self.assertEqual(rear_ports[2].cable_connector, 1)
self.assertEqual(rear_ports[2].cable_positions, [1, 2])
self.assertEqual(rear_ports[3].cable_connector, 2)
self.assertEqual(rear_ports[3].cable_positions, [1, 2])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_105_cable_profile_breakout(self):
"""
[IF1] --C1-- [FP1][RP1] --C2-- [IF5]
[IF2] --C3-- [FP2] [IF6]
[IF3] --C4-- [FP3] [IF7]
[IF4] --C5-- [FP4] [IF8]
Cable profile: 1:4 breakout
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
Interface.objects.create(device=self.device, name='Interface 5'),
Interface.objects.create(device=self.device, name='Interface 6'),
Interface.objects.create(device=self.device, name='Interface 7'),
Interface.objects.create(device=self.device, name='Interface 8'),
]
rear_ports = [
RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4),
]
front_ports = [
FrontPort.objects.create(device=self.device, name='Front Port 1'),
FrontPort.objects.create(device=self.device, name='Front Port 2'),
FrontPort.objects.create(device=self.device, name='Front Port 3'),
FrontPort.objects.create(device=self.device, name='Front Port 4'),
]
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=front_ports[0],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[1],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=front_ports[2],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=3,
),
PortMapping(
device=self.device,
front_port=front_ports[3],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=4,
),
])
# Create cables
cable1 = Cable(a_terminations=[interfaces[0]], b_terminations=[front_ports[0]])
cable1.clean()
cable1.save()
cable2 = Cable(a_terminations=[interfaces[1]], b_terminations=[front_ports[1]])
cable2.clean()
cable2.save()
cable3 = Cable(a_terminations=[interfaces[2]], b_terminations=[front_ports[2]])
cable3.clean()
cable3.save()
cable4 = Cable(a_terminations=[interfaces[3]], b_terminations=[front_ports[3]])
cable4.clean()
cable4.save()
cable5 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[rear_ports[0]],
b_terminations=interfaces[4:8],
)
cable5.clean()
cable5.save()
path1 = self.assertPathExists(
(interfaces[0], cable1, front_ports[0], rear_ports[0], cable5, interfaces[4]),
is_complete=True,
is_active=True
)
path2 = self.assertPathExists(
(interfaces[1], cable2, front_ports[1], rear_ports[0], cable5, interfaces[5]),
is_complete=True,
is_active=True
)
path3 = self.assertPathExists(
(interfaces[2], cable3, front_ports[2], rear_ports[0], cable5, interfaces[6]),
is_complete=True,
is_active=True
)
path4 = self.assertPathExists(
(interfaces[3], cable4, front_ports[3], rear_ports[0], cable5, interfaces[7]),
is_complete=True,
is_active=True
)
path5 = self.assertPathExists(
(interfaces[4], cable5, rear_ports[0], front_ports[0], cable1, interfaces[0]),
is_complete=True,
is_active=True
)
path6 = self.assertPathExists(
(interfaces[5], cable5, rear_ports[0], front_ports[1], cable2, interfaces[1]),
is_complete=True,
is_active=True
)
path7 = self.assertPathExists(
(interfaces[6], cable5, rear_ports[0], front_ports[2], cable3, interfaces[2]),
is_complete=True,
is_active=True
)
path8 = self.assertPathExists(
(interfaces[7], cable5, rear_ports[0], front_ports[3], cable4, interfaces[3]),
is_complete=True,
is_active=True
)
self.assertEqual(CablePath.objects.count(), 8)
for interface in interfaces:
interface.refresh_from_db()
self.assertPathIsSet(interfaces[0], path1)
self.assertPathIsSet(interfaces[1], path2)
self.assertPathIsSet(interfaces[2], path3)
self.assertPathIsSet(interfaces[3], path4)
self.assertPathIsSet(interfaces[4], path5)
self.assertPathIsSet(interfaces[5], path6)
self.assertPathIsSet(interfaces[6], path7)
self.assertPathIsSet(interfaces[7], path8)
self.assertEqual(interfaces[4].cable_connector, 1)
self.assertEqual(interfaces[4].cable_positions, [1])
self.assertEqual(interfaces[5].cable_connector, 2)
self.assertEqual(interfaces[5].cable_positions, [1])
self.assertEqual(interfaces[6].cable_connector, 3)
self.assertEqual(interfaces[6].cable_positions, [1])
self.assertEqual(interfaces[7].cable_connector, 4)
self.assertEqual(interfaces[7].cable_positions, [1])
rear_ports[0].refresh_from_db()
self.assertEqual(rear_ports[0].cable_connector, 1)
self.assertEqual(rear_ports[0].cable_positions, [1, 2, 3, 4])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_106_cable_profile_shuffle(self):
"""
[IF1] --C1-- [FP1][RP1] --C17-- [RP3][FP9] --C9-- [IF9]
[IF2] --C2-- [FP2] [FP10] --C10-- [IF10]
[IF3] --C3-- [FP3] [FP11] --C11-- [IF11]
[IF4] --C4-- [FP4] [FP12] --C12-- [IF12]
[IF5] --C5-- [FP5][RP2] [RP4][FP13] --C13-- [IF9]
[IF6] --C6-- [FP6] [FP14] --C14-- [IF10]
[IF7] --C7-- [FP7] [FP15] --C15-- [IF11]
[IF8] --C8-- [FP8] [FP16] --C16-- [IF12]
Cable profile: Shuffle (2x2 MPO8)
"""
interfaces = [
# A side
Interface.objects.create(device=self.device, name='Interface 1:1'),
Interface.objects.create(device=self.device, name='Interface 1:2'),
Interface.objects.create(device=self.device, name='Interface 1:3'),
Interface.objects.create(device=self.device, name='Interface 1:4'),
Interface.objects.create(device=self.device, name='Interface 2:1'),
Interface.objects.create(device=self.device, name='Interface 2:2'),
Interface.objects.create(device=self.device, name='Interface 2:3'),
Interface.objects.create(device=self.device, name='Interface 2:4'),
# B side
Interface.objects.create(device=self.device, name='Interface 3:1'),
Interface.objects.create(device=self.device, name='Interface 3:2'),
Interface.objects.create(device=self.device, name='Interface 3:3'),
Interface.objects.create(device=self.device, name='Interface 3:4'),
Interface.objects.create(device=self.device, name='Interface 4:1'),
Interface.objects.create(device=self.device, name='Interface 4:2'),
Interface.objects.create(device=self.device, name='Interface 4:3'),
Interface.objects.create(device=self.device, name='Interface 4:4'),
]
rear_ports = [
RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4),
RearPort.objects.create(device=self.device, name='Rear Port 2', positions=4),
RearPort.objects.create(device=self.device, name='Rear Port 3', positions=4),
RearPort.objects.create(device=self.device, name='Rear Port 4', positions=4),
]
front_ports = [
FrontPort.objects.create(device=self.device, name='Front Port 1'),
FrontPort.objects.create(device=self.device, name='Front Port 2'),
FrontPort.objects.create(device=self.device, name='Front Port 3'),
FrontPort.objects.create(device=self.device, name='Front Port 4'),
FrontPort.objects.create(device=self.device, name='Front Port 5'),
FrontPort.objects.create(device=self.device, name='Front Port 6'),
FrontPort.objects.create(device=self.device, name='Front Port 7'),
FrontPort.objects.create(device=self.device, name='Front Port 8'),
FrontPort.objects.create(device=self.device, name='Front Port 9'),
FrontPort.objects.create(device=self.device, name='Front Port 10'),
FrontPort.objects.create(device=self.device, name='Front Port 11'),
FrontPort.objects.create(device=self.device, name='Front Port 12'),
FrontPort.objects.create(device=self.device, name='Front Port 13'),
FrontPort.objects.create(device=self.device, name='Front Port 14'),
FrontPort.objects.create(device=self.device, name='Front Port 15'),
FrontPort.objects.create(device=self.device, name='Front Port 16'),
]
port_mappings = []
for i, front_port in enumerate(front_ports):
port_mappings.append(
PortMapping(
device=self.device,
front_port=front_ports[i],
front_port_position=1,
rear_port=rear_ports[int(i / 4)],
rear_port_position=(i % 4) + 1,
),
)
PortMapping.objects.bulk_create(port_mappings)
# Create cables
cables = []
for interface, front_port in zip(interfaces, front_ports):
cable = Cable(a_terminations=[interface], b_terminations=[front_port])
cable.clean()
cable.save()
cables.append(cable)
shuffle_cable = Cable(
profile=CableProfileChoices.TRUNK_2C4P_SHUFFLE,
a_terminations=rear_ports[0:2],
b_terminations=rear_ports[2:4],
)
shuffle_cable.clean()
shuffle_cable.save()
paths = [
# A-to-B paths
self.assertPathExists(
(
interfaces[0], cables[0], front_ports[0], rear_ports[0], shuffle_cable, rear_ports[2],
front_ports[8], cables[8], interfaces[8],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[1], cables[1], front_ports[1], rear_ports[0], shuffle_cable, rear_ports[2],
front_ports[9], cables[9], interfaces[9],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[2], cables[2], front_ports[2], rear_ports[0], shuffle_cable, rear_ports[3],
front_ports[12], cables[12], interfaces[12],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[3], cables[3], front_ports[3], rear_ports[0], shuffle_cable, rear_ports[3],
front_ports[13], cables[13], interfaces[13],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[4], cables[4], front_ports[4], rear_ports[1], shuffle_cable, rear_ports[2],
front_ports[10], cables[10], interfaces[10],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[5], cables[5], front_ports[5], rear_ports[1], shuffle_cable, rear_ports[2],
front_ports[11], cables[11], interfaces[11],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[6], cables[6], front_ports[6], rear_ports[1], shuffle_cable, rear_ports[3],
front_ports[14], cables[14], interfaces[14],
),
is_complete=True,
is_active=True
),
self.assertPathExists(
(
interfaces[7], cables[7], front_ports[7], rear_ports[1], shuffle_cable, rear_ports[3],
front_ports[15], cables[15], interfaces[15],
),
is_complete=True,
is_active=True
),
]
self.assertEqual(CablePath.objects.count(), len(paths) * 2)
for i, (interface, path) in enumerate(zip(interfaces, paths)):
interface.refresh_from_db()
self.assertPathIsSet(interface, path)
for i, rear_port in enumerate(rear_ports):
rear_port.refresh_from_db()
self.assertEqual(rear_port.cable_connector, (i % 2) + 1)
self.assertEqual(rear_port.cable_positions, [1, 2, 3, 4])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_202_single_path_via_pass_through_with_breakouts(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [IF3]
[IF2] [IF4]
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
]
rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4)
frontport1 = FrontPort.objects.create(device=self.device, name='Front Port 1', positions=4)
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport1,
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=2,
rear_port=rearport1,
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=3,
rear_port=rearport1,
rear_port_position=3,
),
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=4,
rear_port=rearport1,
rear_port_position=4,
),
])
# Create cables
cable1 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[frontport1],
b_terminations=[interfaces[0], interfaces[1]],
)
cable1.clean()
cable1.save()
cable2 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[rearport1],
b_terminations=[interfaces[2], interfaces[3]]
)
cable2.clean()
cable2.save()
paths = [
self.assertPathExists(
(interfaces[0], cable1, frontport1, rearport1, cable2, interfaces[2]),
is_complete=True,
is_active=True
),
self.assertPathExists(
(interfaces[1], cable1, frontport1, rearport1, cable2, interfaces[3]),
is_complete=True,
is_active=True
),
self.assertPathExists(
(interfaces[2], cable2, rearport1, frontport1, cable1, interfaces[0]),
is_complete=True,
is_active=True
),
self.assertPathExists(
(interfaces[3], cable2, rearport1, frontport1, cable1, interfaces[1]),
is_complete=True,
is_active=True
),
]
self.assertEqual(CablePath.objects.count(), 4)
for interface in interfaces:
interface.refresh_from_db()
self.assertPathIsSet(interfaces[0], paths[0])
self.assertPathIsSet(interfaces[1], paths[1])
self.assertPathIsSet(interfaces[2], paths[2])
self.assertPathIsSet(interfaces[3], paths[3])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_204_multiple_paths_via_pass_through_with_breakouts(self):
"""
[IF1] --C1-- [FP1] [RP1] --C3-- [RP2] [FP3] --C4-- [IF5]
[IF2] [IF6]
[IF3] --C2-- [FP2] [FP4] --C5-- [IF7]
[IF4] [IF8]
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
Interface.objects.create(device=self.device, name='Interface 5'),
Interface.objects.create(device=self.device, name='Interface 6'),
Interface.objects.create(device=self.device, name='Interface 7'),
Interface.objects.create(device=self.device, name='Interface 8'),
]
rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1', positions=8)
rearport2 = RearPort.objects.create(device=self.device, name='Rear Port 2', positions=8)
frontport1 = FrontPort.objects.create(device=self.device, name='Front Port 1:1', positions=4)
frontport2 = FrontPort.objects.create(device=self.device, name='Front Port 1:2', positions=4)
frontport3 = FrontPort.objects.create(device=self.device, name='Front Port 2:1', positions=4)
frontport4 = FrontPort.objects.create(device=self.device, name='Front Port 2:2', positions=4)
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport1,
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=2,
rear_port=rearport1,
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=frontport2,
front_port_position=1,
rear_port=rearport1,
rear_port_position=5,
),
PortMapping(
device=self.device,
front_port=frontport2,
front_port_position=2,
rear_port=rearport1,
rear_port_position=6,
),
PortMapping(
device=self.device,
front_port=frontport3,
front_port_position=1,
rear_port=rearport2,
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=frontport3,
front_port_position=2,
rear_port=rearport2,
rear_port_position=2,
),
PortMapping(
device=self.device,
front_port=frontport4,
front_port_position=1,
rear_port=rearport2,
rear_port_position=5,
),
PortMapping(
device=self.device,
front_port=frontport4,
front_port_position=2,
rear_port=rearport2,
rear_port_position=6,
),
])
# Create cables
cable1 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[frontport1],
b_terminations=[interfaces[0], interfaces[1]],
)
cable1.clean()
cable1.save()
cable2 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[frontport2],
b_terminations=[interfaces[2], interfaces[3]],
)
cable2.clean()
cable2.save()
cable3 = Cable(
profile=CableProfileChoices.SINGLE_1C8P,
a_terminations=[rearport1],
b_terminations=[rearport2]
)
cable3.clean()
cable3.save()
cable4 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[frontport3],
b_terminations=[interfaces[4], interfaces[5]],
)
cable4.clean()
cable4.save()
cable5 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[frontport4],
b_terminations=[interfaces[6], interfaces[7]],
)
cable5.clean()
cable5.save()
paths = [
self.assertPathExists(
(
interfaces[0], cable1, frontport1, rearport1, cable3, rearport2, frontport3, cable4,
interfaces[4],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[1], cable1, frontport1, rearport1, cable3, rearport2, frontport3, cable4,
interfaces[5],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[2], cable2, frontport2, rearport1, cable3, rearport2, frontport4, cable5,
interfaces[6],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[3], cable2, frontport2, rearport1, cable3, rearport2, frontport4, cable5,
interfaces[7],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[4], cable4, frontport3, rearport2, cable3, rearport1, frontport1, cable1,
interfaces[0],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[5], cable4, frontport3, rearport2, cable3, rearport1, frontport1, cable1,
interfaces[1],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[6], cable5, frontport4, rearport2, cable3, rearport1, frontport2, cable2,
interfaces[2],
),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(
interfaces[7], cable5, frontport4, rearport2, cable3, rearport1, frontport2, cable2,
interfaces[3],
),
is_complete=True,
is_active=True,
),
]
self.assertEqual(CablePath.objects.count(), 8)
for interface in interfaces:
interface.refresh_from_db()
self.assertPathIsSet(interfaces[0], paths[0])
self.assertPathIsSet(interfaces[1], paths[1])
self.assertPathIsSet(interfaces[2], paths[2])
self.assertPathIsSet(interfaces[3], paths[3])
self.assertPathIsSet(interfaces[4], paths[4])
self.assertPathIsSet(interfaces[5], paths[5])
self.assertPathIsSet(interfaces[6], paths[6])
self.assertPathIsSet(interfaces[7], paths[7])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_212_interface_to_interface_via_circuit_with_breakouts(self):
"""
[IF1] --C1-- [CT1] [CT2] --C2-- [IF3]
[IF2] [IF4]
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
]
circuittermination1 = CircuitTermination.objects.create(
circuit=self.circuit,
termination=self.site,
term_side='A'
)
circuittermination2 = CircuitTermination.objects.create(
circuit=self.circuit,
termination=self.site,
term_side='Z'
)
# Create cables
cable1 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[circuittermination1],
b_terminations=[interfaces[0], interfaces[1]],
)
cable1.clean()
cable1.save()
cable2 = Cable(
profile=CableProfileChoices.BREAKOUT_1C4P_4C1P,
a_terminations=[circuittermination2],
b_terminations=[interfaces[2], interfaces[3]]
)
cable2.clean()
cable2.save()
# Check for two complete paths in either direction
paths = [
self.assertPathExists(
(interfaces[0], cable1, circuittermination1, circuittermination2, cable2, interfaces[2]),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(interfaces[1], cable1, circuittermination1, circuittermination2, cable2, interfaces[3]),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(interfaces[2], cable2, circuittermination2, circuittermination1, cable1, interfaces[0]),
is_complete=True,
is_active=True,
),
self.assertPathExists(
(interfaces[3], cable2, circuittermination2, circuittermination1, cable1, interfaces[1]),
is_complete=True,
is_active=True,
),
]
self.assertEqual(CablePath.objects.count(), 4)
for interface in interfaces:
interface.refresh_from_db()
self.assertPathIsSet(interfaces[0], paths[0])
self.assertPathIsSet(interfaces[1], paths[1])
self.assertPathIsSet(interfaces[2], paths[2])
self.assertPathIsSet(interfaces[3], paths[3])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
# TBD: Is this a topology we want to support?
@skip("Test applicability TBD")
def test_217_interface_to_interface_via_rear_ports(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [RP3] [FP3] --C3-- [IF2]
[FP2] [RP2] [RP4] [FP4]
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
]
rear_ports = [
RearPort.objects.create(device=self.device, name='Rear Port 1'),
RearPort.objects.create(device=self.device, name='Rear Port 2'),
RearPort.objects.create(device=self.device, name='Rear Port 3'),
RearPort.objects.create(device=self.device, name='Rear Port 4'),
]
front_ports = [
FrontPort.objects.create(device=self.device, name='Front Port 1'),
FrontPort.objects.create(device=self.device, name='Front Port 2'),
FrontPort.objects.create(device=self.device, name='Front Port 3'),
FrontPort.objects.create(device=self.device, name='Front Port 4'),
]
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=front_ports[0],
front_port_position=1,
rear_port=rear_ports[0],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[1],
front_port_position=1,
rear_port=rear_ports[1],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[2],
front_port_position=1,
rear_port=rear_ports[2],
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=front_ports[3],
front_port_position=1,
rear_port=rear_ports[3],
rear_port_position=1,
),
])
# Create cables
cable1 = Cable(
profile=CableProfileChoices.SINGLE_2C1P,
a_terminations=[interfaces[0]],
b_terminations=[front_ports[0], front_ports[1]]
)
cable1.clean()
cable1.save()
cable2 = Cable(
a_terminations=[rear_ports[0], rear_ports[1]],
b_terminations=[rear_ports[2], rear_ports[3]]
)
cable2.clean()
cable2.save()
cable3 = Cable(
profile=CableProfileChoices.SINGLE_2C1P,
a_terminations=[interfaces[1]],
b_terminations=[front_ports[2], front_ports[3]]
)
cable3.clean()
cable3.save()
# Check for one complete path in either direction
paths = [
self.assertPathExists(
(
interfaces[0], cable1, (front_ports[0], front_ports[1]), (rear_ports[0], rear_ports[1]), cable2,
(rear_ports[2], rear_ports[3]), (front_ports[2], front_ports[3]), cable3, interfaces[1]
),
is_complete=True
),
self.assertPathExists(
(
interfaces[1], cable3, (front_ports[2], front_ports[3]), (rear_ports[2], rear_ports[3]), cable2,
(rear_ports[0], rear_ports[1]), (front_ports[0], front_ports[1]), cable1, interfaces[0]
),
is_complete=True
),
]
self.assertEqual(CablePath.objects.count(), 2)
for interface in interfaces:
interface.refresh_from_db()
self.assertPathIsSet(interfaces[0], paths[0])
self.assertPathIsSet(interfaces[1], paths[1])
# Test SVG generation
CableTraceSVG(interfaces[0]).render()
def test_223_single_path_via_multiple_pass_throughs_with_breakouts(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [IF3]
[IF2] [FP2] [RP2] [IF4]
"""
interfaces = [
Interface.objects.create(device=self.device, name='Interface 1'),
Interface.objects.create(device=self.device, name='Interface 2'),
Interface.objects.create(device=self.device, name='Interface 3'),
Interface.objects.create(device=self.device, name='Interface 4'),
]
rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1')
rearport2 = RearPort.objects.create(device=self.device, name='Rear Port 2')
frontport1 = FrontPort.objects.create(device=self.device, name='Front Port 1')
frontport2 = FrontPort.objects.create(device=self.device, name='Front Port 2')
PortMapping.objects.bulk_create([
PortMapping(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport1,
rear_port_position=1,
),
PortMapping(
device=self.device,
front_port=frontport2,
front_port_position=1,
rear_port=rearport2,
rear_port_position=1,
),
])
# Create cables
cable1 = Cable(
profile=CableProfileChoices.TRUNK_2C2P,
a_terminations=[interfaces[0], interfaces[1]],
b_terminations=[frontport1, frontport2]
)
cable1.clean()
cable1.save()
cable2 = Cable(
profile=CableProfileChoices.TRUNK_2C2P,
a_terminations=[rearport1, rearport2],
b_terminations=[interfaces[2], interfaces[3]]
)
cable2.clean()
cable2.save()
# Validate paths
self.assertPathExists(
(interfaces[0], cable1, frontport1, rearport1, cable2, interfaces[2]),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interfaces[1], cable1, frontport2, rearport2, cable2, interfaces[3]),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interfaces[2], cable2, rearport1, frontport1, cable1, interfaces[0]),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interfaces[3], cable2, rearport2, frontport2, cable1, interfaces[1]),
is_complete=True,
is_active=True
)
self.assertEqual(CablePath.objects.count(), 4)
def test_304_add_port_mapping_between_connected_ports(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [IF2]
"""
interface1 = Interface.objects.create(device=self.device, name='Interface 1')
interface2 = Interface.objects.create(device=self.device, name='Interface 2')
frontport1 = FrontPort.objects.create(device=self.device, name='Front Port 1')
rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1')
cable1 = Cable(
a_terminations=[interface1],
b_terminations=[frontport1]
)
cable1.save()
cable2 = Cable(
a_terminations=[interface2],
b_terminations=[rearport1]
)
cable2.save()
# Check for incomplete paths
self.assertPathExists(
(interface1, cable1, frontport1),
is_complete=False,
is_active=True
)
self.assertPathExists(
(interface2, cable2, rearport1),
is_complete=False,
is_active=True
)
# Create a PortMapping between frontport1 and rearport1
PortMapping.objects.create(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport1,
rear_port_position=1,
)
# Check that paths are now complete
self.assertPathExists(
(interface1, cable1, frontport1, rearport1, cable2, interface2),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interface2, cable2, rearport1, frontport1, cable1, interface1),
is_complete=True,
is_active=True
)
def test_305_delete_port_mapping_between_connected_ports(self):
"""
[IF1] --C1-- [FP1] [RP1] --C2-- [IF2]
"""
interface1 = Interface.objects.create(device=self.device, name='Interface 1')
interface2 = Interface.objects.create(device=self.device, name='Interface 2')
frontport1 = FrontPort.objects.create(device=self.device, name='Front Port 1')
rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1')
cable1 = Cable(
a_terminations=[interface1],
b_terminations=[frontport1]
)
cable1.save()
cable2 = Cable(
a_terminations=[interface2],
b_terminations=[rearport1]
)
cable2.save()
portmapping1 = PortMapping.objects.create(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport1,
rear_port_position=1,
)
# Check for complete paths
self.assertPathExists(
(interface1, cable1, frontport1, rearport1, cable2, interface2),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interface2, cable2, rearport1, frontport1, cable1, interface1),
is_complete=True,
is_active=True
)
# Delete the PortMapping between frontport1 and rearport1
portmapping1.delete()
# Check that paths are no longer complete
self.assertPathExists(
(interface1, cable1, frontport1),
is_complete=False,
is_active=True
)
self.assertPathExists(
(interface2, cable2, rearport1),
is_complete=False,
is_active=True
)
def test_306_change_port_mapping_between_connected_ports(self):
"""
[IF1] --C1-- [FP1] [RP1] --C3-- [IF3]
[IF2] --C2-- [FP2] [RP3] --C4-- [IF4]
"""
interface1 = Interface.objects.create(device=self.device, name='Interface 1')
interface2 = Interface.objects.create(device=self.device, name='Interface 2')
interface3 = Interface.objects.create(device=self.device, name='Interface 3')
interface4 = Interface.objects.create(device=self.device, name='Interface 4')
frontport1 = FrontPort.objects.create(device=self.device, name='Front Port 1')
frontport2 = FrontPort.objects.create(device=self.device, name='Front Port 2')
rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1')
rearport2 = RearPort.objects.create(device=self.device, name='Rear Port 2')
cable1 = Cable(
a_terminations=[interface1],
b_terminations=[frontport1]
)
cable1.save()
cable2 = Cable(
a_terminations=[interface2],
b_terminations=[frontport2]
)
cable2.save()
cable3 = Cable(
a_terminations=[interface3],
b_terminations=[rearport1]
)
cable3.save()
cable4 = Cable(
a_terminations=[interface4],
b_terminations=[rearport2]
)
cable4.save()
portmapping1 = PortMapping.objects.create(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport1,
rear_port_position=1,
)
# Verify expected initial paths
self.assertPathExists(
(interface1, cable1, frontport1, rearport1, cable3, interface3),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interface3, cable3, rearport1, frontport1, cable1, interface1),
is_complete=True,
is_active=True
)
# Delete and replace the PortMapping to connect interface1 to interface4
portmapping1.delete()
portmapping2 = PortMapping.objects.create(
device=self.device,
front_port=frontport1,
front_port_position=1,
rear_port=rearport2,
rear_port_position=1,
)
# Verify expected new paths
self.assertPathExists(
(interface1, cable1, frontport1, rearport2, cable4, interface4),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interface4, cable4, rearport2, frontport1, cable1, interface1),
is_complete=True,
is_active=True
)
# Delete and replace the PortMapping to connect interface2 to interface4
portmapping2.delete()
PortMapping.objects.create(
device=self.device,
front_port=frontport2,
front_port_position=1,
rear_port=rearport2,
rear_port_position=1,
)
# Verify expected new paths
self.assertPathExists(
(interface2, cable2, frontport2, rearport2, cable4, interface4),
is_complete=True,
is_active=True
)
self.assertPathExists(
(interface4, cable4, rearport2, frontport2, cable2, interface2),
is_complete=True,
is_active=True
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/dcim/tests/test_cablepaths2.py",
"license": "Apache License 2.0",
"lines": 1465,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/dcim/tests/utils.py | from django.test import TestCase
from circuits.models import *
from dcim.models import *
from dcim.utils import object_to_path_node
__all__ = (
'CablePathTestCase',
)
class CablePathTestCase(TestCase):
"""
Base class for test cases for cable paths.
"""
@classmethod
def setUpTestData(cls):
manufacturer = Manufacturer.objects.create(name='Generic', slug='generic')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Test Device')
role = DeviceRole.objects.create(name='Device Role', slug='device-role')
provider = Provider.objects.create(name='Provider', slug='provider')
circuit_type = CircuitType.objects.create(name='Circuit Type', slug='circuit-type')
# Create reusable test objects
cls.site = Site.objects.create(name='Site', slug='site')
cls.device = Device.objects.create(site=cls.site, device_type=device_type, role=role, name='Test Device')
cls.powerpanel = PowerPanel.objects.create(site=cls.site, name='Power Panel')
cls.circuit = Circuit.objects.create(provider=provider, type=circuit_type, cid='Circuit 1')
def _get_cablepath(self, nodes, **kwargs):
"""
Return a given cable path
:param nodes: Iterable of steps, with each step being either a single node or a list of nodes
:return: The matching CablePath (if any)
"""
path = []
for step in nodes:
if type(step) in (list, tuple):
path.append([object_to_path_node(node) for node in step])
else:
path.append([object_to_path_node(step)])
return CablePath.objects.filter(path=path, **kwargs).first()
def assertPathExists(self, nodes, **kwargs):
"""
Assert that a CablePath from origin to destination with a specific intermediate path exists. Returns the
first matching CablePath, if found.
:param nodes: Iterable of steps, with each step being either a single node or a list of nodes
"""
cablepath = self._get_cablepath(nodes, **kwargs)
self.assertIsNotNone(cablepath, msg='CablePath not found')
return cablepath
def assertPathDoesNotExist(self, nodes, **kwargs):
"""
Assert that a specific CablePath does *not* exist.
:param nodes: Iterable of steps, with each step being either a single node or a list of nodes
"""
cablepath = self._get_cablepath(nodes, **kwargs)
self.assertIsNone(cablepath, msg='Unexpected CablePath found')
def assertPathIsSet(self, origin, cablepath, msg=None):
"""
Assert that a specific CablePath instance is set as the path on the origin.
:param origin: The originating path endpoint
:param cablepath: The CablePath instance originating from this endpoint
:param msg: Custom failure message (optional)
"""
if msg is None:
msg = f"Path #{cablepath.pk} not set on originating endpoint {origin}"
self.assertEqual(origin._path_id, cablepath.pk, msg=msg)
def assertPathIsNotSet(self, origin, msg=None):
"""
Assert that a specific CablePath instance is set as the path on the origin.
:param origin: The originating path endpoint
:param msg: Custom failure message (optional)
"""
if msg is None:
msg = f"Path #{origin._path_id} set as origin on {origin}; should be None!"
self.assertIsNone(origin._path_id, msg=msg)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/dcim/tests/utils.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/dcim/ui/panels.py | from django.utils.translation import gettext_lazy as _
from netbox.ui import attrs, panels
class SitePanel(panels.ObjectAttributesPanel):
region = attrs.NestedObjectAttr('region', linkify=True)
group = attrs.NestedObjectAttr('group', linkify=True)
name = attrs.TextAttr('name')
status = attrs.ChoiceAttr('status')
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
facility = attrs.TextAttr('facility')
description = attrs.TextAttr('description')
timezone = attrs.TimezoneAttr('time_zone')
physical_address = attrs.AddressAttr('physical_address', map_url=True)
shipping_address = attrs.AddressAttr('shipping_address', map_url=True)
gps_coordinates = attrs.GPSCoordinatesAttr()
class LocationPanel(panels.NestedGroupObjectPanel):
site = attrs.RelatedObjectAttr('site', linkify=True, grouped_by='group')
status = attrs.ChoiceAttr('status')
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
facility = attrs.TextAttr('facility')
class RackDimensionsPanel(panels.ObjectAttributesPanel):
form_factor = attrs.ChoiceAttr('form_factor')
width = attrs.ChoiceAttr('width')
height = attrs.TextAttr('u_height', format_string='{}U', label=_('Height'))
outer_width = attrs.NumericAttr('outer_width', unit_accessor='get_outer_unit_display')
outer_height = attrs.NumericAttr('outer_height', unit_accessor='get_outer_unit_display')
outer_depth = attrs.NumericAttr('outer_depth', unit_accessor='get_outer_unit_display')
mounting_depth = attrs.TextAttr('mounting_depth', format_string=_('{} millimeters'))
class RackNumberingPanel(panels.ObjectAttributesPanel):
starting_unit = attrs.TextAttr('starting_unit')
desc_units = attrs.BooleanAttr('desc_units', label=_('Descending units'))
class RackPanel(panels.ObjectAttributesPanel):
region = attrs.NestedObjectAttr('site.region', linkify=True)
site = attrs.RelatedObjectAttr('site', linkify=True, grouped_by='group')
location = attrs.NestedObjectAttr('location', linkify=True)
name = attrs.TextAttr('name')
facility_id = attrs.TextAttr('facility_id', label=_('Facility ID'))
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
status = attrs.ChoiceAttr('status')
rack_type = attrs.RelatedObjectAttr('rack_type', linkify=True, grouped_by='manufacturer')
role = attrs.RelatedObjectAttr('role', linkify=True)
description = attrs.TextAttr('description')
serial = attrs.TextAttr('serial', label=_('Serial number'), style='font-monospace', copy_button=True)
asset_tag = attrs.TextAttr('asset_tag', style='font-monospace', copy_button=True)
airflow = attrs.ChoiceAttr('airflow')
space_utilization = attrs.UtilizationAttr('get_utilization')
power_utilization = attrs.UtilizationAttr('get_power_utilization')
class RackWeightPanel(panels.ObjectAttributesPanel):
weight = attrs.NumericAttr('weight', unit_accessor='get_weight_unit_display')
max_weight = attrs.NumericAttr('max_weight', unit_accessor='get_weight_unit_display', label=_('Maximum weight'))
total_weight = attrs.TemplatedAttr('total_weight', template_name='dcim/rack/attrs/total_weight.html')
class RackRolePanel(panels.OrganizationalObjectPanel):
color = attrs.ColorAttr('color')
class RackReservationPanel(panels.ObjectAttributesPanel):
units = attrs.TextAttr('unit_list')
status = attrs.ChoiceAttr('status')
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
user = attrs.RelatedObjectAttr('user')
description = attrs.TextAttr('description')
class RackTypePanel(panels.ObjectAttributesPanel):
manufacturer = attrs.RelatedObjectAttr('manufacturer', linkify=True)
model = attrs.TextAttr('model')
description = attrs.TextAttr('description')
class DevicePanel(panels.ObjectAttributesPanel):
region = attrs.NestedObjectAttr('site.region', linkify=True)
site = attrs.RelatedObjectAttr('site', linkify=True, grouped_by='group')
location = attrs.NestedObjectAttr('location', linkify=True)
rack = attrs.TemplatedAttr('rack', template_name='dcim/device/attrs/rack.html')
virtual_chassis = attrs.RelatedObjectAttr('virtual_chassis', linkify=True)
parent_device = attrs.TemplatedAttr('parent_bay', template_name='dcim/device/attrs/parent_device.html')
gps_coordinates = attrs.GPSCoordinatesAttr()
tenant = attrs.RelatedObjectAttr('tenant', linkify=True, grouped_by='group')
description = attrs.TextAttr('description')
airflow = attrs.ChoiceAttr('airflow')
serial = attrs.TextAttr('serial', label=_('Serial number'), style='font-monospace', copy_button=True)
asset_tag = attrs.TextAttr('asset_tag', style='font-monospace', copy_button=True)
config_template = attrs.RelatedObjectAttr('config_template', linkify=True)
class DeviceManagementPanel(panels.ObjectAttributesPanel):
title = _('Management')
status = attrs.ChoiceAttr('status')
role = attrs.NestedObjectAttr('role', linkify=True, max_depth=3)
platform = attrs.NestedObjectAttr('platform', linkify=True, max_depth=3)
primary_ip4 = attrs.TemplatedAttr(
'primary_ip4',
label=_('Primary IPv4'),
template_name='dcim/device/attrs/ipaddress.html',
)
primary_ip6 = attrs.TemplatedAttr(
'primary_ip6',
label=_('Primary IPv6'),
template_name='dcim/device/attrs/ipaddress.html',
)
oob_ip = attrs.TemplatedAttr(
'oob_ip',
label=_('Out-of-band IP'),
template_name='dcim/device/attrs/ipaddress.html',
)
cluster = attrs.RelatedObjectAttr('cluster', linkify=True)
class DeviceDeviceTypePanel(panels.ObjectAttributesPanel):
title = _('Device Type')
manufacturer = attrs.RelatedObjectAttr('device_type.manufacturer', linkify=True)
model = attrs.RelatedObjectAttr('device_type', linkify=True)
height = attrs.TemplatedAttr('device_type.u_height', template_name='dcim/devicetype/attrs/height.html')
front_image = attrs.ImageAttr('device_type.front_image')
rear_image = attrs.ImageAttr('device_type.rear_image')
class DeviceDimensionsPanel(panels.ObjectAttributesPanel):
title = _('Dimensions')
total_weight = attrs.TemplatedAttr('total_weight', template_name='dcim/device/attrs/total_weight.html')
class DeviceRolePanel(panels.NestedGroupObjectPanel):
color = attrs.ColorAttr('color')
vm_role = attrs.BooleanAttr('vm_role', label=_('VM role'))
config_template = attrs.RelatedObjectAttr('config_template', linkify=True)
class DeviceTypePanel(panels.ObjectAttributesPanel):
manufacturer = attrs.RelatedObjectAttr('manufacturer', linkify=True)
model = attrs.TextAttr('model')
part_number = attrs.TextAttr('part_number')
default_platform = attrs.RelatedObjectAttr('default_platform', linkify=True)
description = attrs.TextAttr('description')
height = attrs.TemplatedAttr('u_height', template_name='dcim/devicetype/attrs/height.html')
exclude_from_utilization = attrs.BooleanAttr('exclude_from_utilization')
full_depth = attrs.BooleanAttr('is_full_depth')
weight = attrs.NumericAttr('weight', unit_accessor='get_weight_unit_display')
subdevice_role = attrs.ChoiceAttr('subdevice_role', label=_('Parent/child'))
airflow = attrs.ChoiceAttr('airflow')
front_image = attrs.ImageAttr('front_image')
rear_image = attrs.ImageAttr('rear_image')
class ModulePanel(panels.ObjectAttributesPanel):
device = attrs.RelatedObjectAttr('device', linkify=True)
device_type = attrs.RelatedObjectAttr('device.device_type', linkify=True, grouped_by='manufacturer')
module_bay = attrs.NestedObjectAttr('module_bay', linkify=True)
status = attrs.ChoiceAttr('status')
description = attrs.TextAttr('description')
serial = attrs.TextAttr('serial', label=_('Serial number'), style='font-monospace', copy_button=True)
asset_tag = attrs.TextAttr('asset_tag', style='font-monospace', copy_button=True)
class ModuleTypeProfilePanel(panels.ObjectAttributesPanel):
name = attrs.TextAttr('name')
description = attrs.TextAttr('description')
class ModuleTypePanel(panels.ObjectAttributesPanel):
profile = attrs.RelatedObjectAttr('profile', linkify=True)
manufacturer = attrs.RelatedObjectAttr('manufacturer', linkify=True)
model = attrs.TextAttr('model', label=_('Model name'))
part_number = attrs.TextAttr('part_number')
description = attrs.TextAttr('description')
airflow = attrs.ChoiceAttr('airflow')
weight = attrs.NumericAttr('weight', unit_accessor='get_weight_unit_display')
class PlatformPanel(panels.NestedGroupObjectPanel):
manufacturer = attrs.RelatedObjectAttr('manufacturer', linkify=True)
config_template = attrs.RelatedObjectAttr('config_template', linkify=True)
class VirtualChassisMembersPanel(panels.ObjectPanel):
"""
A panel which lists all members of a virtual chassis.
"""
template_name = 'dcim/panels/virtual_chassis_members.html'
title = _('Virtual Chassis Members')
def get_context(self, context):
return {
**super().get_context(context),
'vc_members': context.get('vc_members'),
}
def render(self, context):
if not context.get('vc_members'):
return ''
return super().render(context)
class PowerUtilizationPanel(panels.ObjectPanel):
"""
A panel which displays the power utilization statistics for a device.
"""
template_name = 'dcim/panels/power_utilization.html'
title = _('Power Utilization')
def get_context(self, context):
return {
**super().get_context(context),
'vc_members': context.get('vc_members'),
}
def render(self, context):
obj = context['object']
if not obj.powerports.exists() or not obj.poweroutlets.exists():
return ''
return super().render(context)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/dcim/ui/panels.py",
"license": "Apache License 2.0",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/extras/ui/panels.py | from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from netbox.ui import actions, panels
from utilities.data import resolve_attr_path
__all__ = (
'CustomFieldsPanel',
'ImageAttachmentsPanel',
'TagsPanel',
)
class CustomFieldsPanel(panels.ObjectPanel):
"""
A panel showing the value of all custom fields defined on an object.
"""
template_name = 'extras/panels/custom_fields.html'
title = _('Custom Fields')
def get_context(self, context):
obj = resolve_attr_path(context, self.accessor)
return {
**super().get_context(context),
'custom_fields': obj.get_custom_fields_by_group(),
}
def render(self, context):
ctx = self.get_context(context)
# Hide the panel if no custom fields exist
if not ctx['custom_fields']:
return ''
return render_to_string(self.template_name, self.get_context(context))
class ImageAttachmentsPanel(panels.ObjectsTablePanel):
"""
A panel showing all images attached to the object.
"""
actions = [
actions.AddObject(
'extras.imageattachment',
url_params={
'object_type': lambda ctx: ContentType.objects.get_for_model(ctx['object']).pk,
'object_id': lambda ctx: ctx['object'].pk,
'return_url': lambda ctx: ctx['object'].get_absolute_url(),
},
label=_('Attach an image'),
),
]
def __init__(self, **kwargs):
super().__init__(
'extras.imageattachment',
filters={
'object_type_id': lambda ctx: ContentType.objects.get_for_model(ctx['object']).pk,
'object_id': lambda ctx: ctx['object'].pk,
},
**kwargs,
)
class TagsPanel(panels.ObjectPanel):
"""
A panel showing the tags assigned to the object.
"""
template_name = 'extras/panels/tags.html'
title = _('Tags')
def get_context(self, context):
return {
**super().get_context(context),
'object': resolve_attr_path(context, self.accessor),
}
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/extras/ui/panels.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/api/gfk_fields.py | from drf_spectacular.utils import extend_schema_field
from rest_framework import serializers
from utilities.api import get_serializer_for_model
__all__ = (
'GFKSerializerField',
)
@extend_schema_field(serializers.JSONField(allow_null=True, read_only=True))
class GFKSerializerField(serializers.Field):
def to_representation(self, instance, **kwargs):
if instance is None:
return None
serializer = get_serializer_for_model(instance)
context = {'request': self.context['request']}
return serializer(instance, nested=True, context=context).data
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/api/gfk_fields.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/api/serializers/bulk.py | from rest_framework import serializers
from .features import ChangeLogMessageSerializer
__all__ = (
'BulkOperationSerializer',
)
class BulkOperationSerializer(ChangeLogMessageSerializer):
id = serializers.IntegerField()
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/api/serializers/bulk.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/api/serializers/models.py | from rest_framework import serializers
from users.api.serializers_.mixins import OwnerMixin
from .features import NetBoxModelSerializer
__all__ = (
'NestedGroupModelSerializer',
'OrganizationalModelSerializer',
'PrimaryModelSerializer',
)
class PrimaryModelSerializer(OwnerMixin, NetBoxModelSerializer):
"""
Base serializer class for models inheriting from PrimaryModel.
"""
pass
class NestedGroupModelSerializer(OwnerMixin, NetBoxModelSerializer):
"""
Base serializer class for models inheriting from NestedGroupModel.
"""
_depth = serializers.IntegerField(source='level', read_only=True)
class OrganizationalModelSerializer(OwnerMixin, NetBoxModelSerializer):
"""
Base serializer class for models inheriting from OrganizationalModel.
"""
pass
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/api/serializers/models.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/forms/bulk_edit.py | from django import forms
from django.utils.translation import gettext_lazy as _
from core.models import ObjectType
from extras.choices import *
from extras.models import Tag
from utilities.forms import BulkEditForm
from utilities.forms.fields import CommentField, DynamicModelMultipleChoiceField
from .mixins import ChangelogMessageMixin, CustomFieldsMixin, OwnerMixin
__all__ = (
'NestedGroupModelBulkEditForm',
'NetBoxModelBulkEditForm',
'OrganizationalModelBulkEditForm',
'PrimaryModelBulkEditForm',
)
class NetBoxModelBulkEditForm(ChangelogMessageMixin, CustomFieldsMixin, BulkEditForm):
"""
Base form for modifying multiple NetBox objects (of the same type) in bulk via the UI. Adds support for custom
fields and adding/removing tags.
Attributes:
fieldsets: An iterable of two-tuples which define a heading and field set to display per section of
the rendered form (optional). If not defined, the all fields will be rendered as a single section.
"""
fieldsets = None
pk = forms.ModelMultipleChoiceField(
queryset=None, # Set from self.model on init
widget=forms.MultipleHiddenInput
)
add_tags = DynamicModelMultipleChoiceField(
label=_('Add tags'),
queryset=Tag.objects.all(),
required=False
)
remove_tags = DynamicModelMultipleChoiceField(
label=_('Remove tags'),
queryset=Tag.objects.all(),
required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['pk'].queryset = self.model.objects.all()
# Restrict tag fields by model
object_type = ObjectType.objects.get_for_model(self.model)
self.fields['add_tags'].widget.add_query_param('for_object_type_id', object_type.pk)
self.fields['remove_tags'].widget.add_query_param('for_object_type_id', object_type.pk)
self._extend_nullable_fields()
def _get_form_field(self, customfield):
return customfield.to_form_field(set_initial=False, enforce_required=False)
def _extend_nullable_fields(self):
nullable_common_fields = ['owner']
nullable_custom_fields = [
name for name, customfield in self.custom_fields.items()
if (not customfield.required and customfield.ui_editable == CustomFieldUIEditableChoices.YES)
]
self.nullable_fields = (
*self.nullable_fields,
*nullable_common_fields,
*nullable_custom_fields,
)
class PrimaryModelBulkEditForm(OwnerMixin, NetBoxModelBulkEditForm):
"""
Bulk edit form for models which inherit from PrimaryModel.
"""
description = forms.CharField(
label=_('Description'),
max_length=100,
required=False
)
comments = CommentField()
class OrganizationalModelBulkEditForm(OwnerMixin, NetBoxModelBulkEditForm):
"""
Bulk edit form for models which inherit from OrganizationalModel.
"""
description = forms.CharField(
label=_('Description'),
max_length=200,
required=False
)
comments = CommentField()
class NestedGroupModelBulkEditForm(OwnerMixin, NetBoxModelBulkEditForm):
"""
Bulk edit form for models which inherit from NestedGroupModel.
"""
description = forms.CharField(
label=_('Description'),
max_length=200,
required=False
)
comments = CommentField()
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/forms/bulk_edit.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/forms/bulk_import.py | from django import forms
from django.db import models
from django.utils.translation import gettext_lazy as _
from extras.choices import *
from extras.models import CustomField, Tag
from users.models import Owner
from utilities.forms import CSVModelForm
from utilities.forms.fields import CSVModelChoiceField, CSVModelMultipleChoiceField, SlugField
from .model_forms import NetBoxModelForm
__all__ = (
'NestedGroupModelImportForm',
'NetBoxModelImportForm',
'OrganizationalModelImportForm',
'OwnerCSVMixin',
'PrimaryModelImportForm'
)
class NetBoxModelImportForm(CSVModelForm, NetBoxModelForm):
"""
Base form for creating NetBox objects from CSV data. Used for bulk importing.
"""
tags = CSVModelMultipleChoiceField(
label=_('Tags'),
queryset=Tag.objects.all(),
required=False,
to_field_name='slug',
help_text=_('Tag slugs separated by commas, encased with double quotes (e.g. "tag1,tag2,tag3")')
)
def _get_custom_fields(self, content_type):
# Return only custom fields that are editable in the UI
return [
cf for cf in CustomField.objects.get_for_model(content_type.model_class())
if cf.ui_editable == CustomFieldUIEditableChoices.YES
]
def _get_form_field(self, customfield):
return customfield.to_form_field(for_csv_import=True)
def clean(self):
"""
Cleans data in a form, ensuring proper handling of model fields with `null=True`.
Overrides the `clean` method from the parent form to process and sanitize cleaned
data for defined fields in the associated model.
"""
super().clean()
cleaned = self.cleaned_data
model = getattr(self._meta, "model", None)
if not model:
return cleaned
for f in model._meta.get_fields():
# Only forward, DB-backed fields (skip M2M & reverse relations)
if not isinstance(f, models.Field) or not f.concrete or f.many_to_many:
continue
if getattr(f, "null", False):
name = f.name
if name not in cleaned:
continue
val = cleaned[name]
# Only coerce empty strings; leave other types alone
if isinstance(val, str) and val.strip() == "":
cleaned[name] = None
return cleaned
class OwnerCSVMixin(forms.Form):
owner = CSVModelChoiceField(
queryset=Owner.objects.all(),
required=False,
to_field_name='name',
help_text=_("Name of the object's owner")
)
class PrimaryModelImportForm(OwnerCSVMixin, NetBoxModelImportForm):
"""
Bulk import form for models which inherit from PrimaryModel.
"""
pass
class OrganizationalModelImportForm(OwnerCSVMixin, NetBoxModelImportForm):
"""
Bulk import form for models which inherit from OrganizationalModel.
"""
slug = SlugField()
class NestedGroupModelImportForm(OwnerCSVMixin, NetBoxModelImportForm):
"""
Bulk import form for models which inherit from NestedGroupModel.
"""
slug = SlugField()
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/forms/bulk_import.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/netbox/forms/filtersets.py | from django import forms
from django.utils.translation import gettext_lazy as _
from extras.choices import *
from utilities.forms.fields import QueryField
from utilities.forms.mixins import FilterModifierMixin
from .mixins import CustomFieldsMixin, OwnerFilterMixin, SavedFiltersMixin
__all__ = (
'NestedGroupModelFilterSetForm',
'NetBoxModelFilterSetForm',
'OrganizationalModelFilterSetForm',
'PrimaryModelFilterSetForm',
)
class NetBoxModelFilterSetForm(FilterModifierMixin, CustomFieldsMixin, SavedFiltersMixin, forms.Form):
"""
Base form for FilerSet forms. These are used to filter object lists in the NetBox UI. Note that the
corresponding FilterSet *must* provide a `q` filter.
Attributes:
model: The model class associated with the form
fieldsets: An iterable of two-tuples which define a heading and field set to display per section of
the rendered form (optional). If not defined, the all fields will be rendered as a single section.
selector_fields: An iterable of names of fields to display by default when rendering the form as
a selector widget
"""
q = QueryField(
required=False,
label=_('Search')
)
selector_fields = ('filter_id', 'q')
def _get_custom_fields(self, content_type):
# Return only non-hidden custom fields for which filtering is enabled (excluding JSON fields)
return [
cf for cf in super()._get_custom_fields(content_type) if (
cf.filter_logic != CustomFieldFilterLogicChoices.FILTER_DISABLED and
cf.type != CustomFieldTypeChoices.TYPE_JSON
)
]
def _get_form_field(self, customfield):
return customfield.to_form_field(
set_initial=False, enforce_required=False, enforce_visibility=False, for_filterset_form=True
)
class PrimaryModelFilterSetForm(OwnerFilterMixin, NetBoxModelFilterSetForm):
"""
FilterSet form for models which inherit from PrimaryModel.
"""
pass
class OrganizationalModelFilterSetForm(OwnerFilterMixin, NetBoxModelFilterSetForm):
"""
FilterSet form for models which inherit from OrganizationalModel.
"""
pass
class NestedGroupModelFilterSetForm(OwnerFilterMixin, NetBoxModelFilterSetForm):
"""
FilterSet form for models which inherit from NestedGroupModel.
"""
pass
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/forms/filtersets.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/forms/model_forms.py | import json
from django import forms
from django.contrib.contenttypes.models import ContentType
from extras.choices import *
from utilities.forms.fields import CommentField, SlugField
from utilities.forms.mixins import CheckLastUpdatedMixin
from .mixins import ChangelogMessageMixin, CustomFieldsMixin, OwnerMixin, TagsMixin
__all__ = (
'NestedGroupModelForm',
'NetBoxModelForm',
'OrganizationalModelForm',
'PrimaryModelForm',
)
class NetBoxModelForm(
ChangelogMessageMixin,
CheckLastUpdatedMixin,
CustomFieldsMixin,
TagsMixin,
forms.ModelForm
):
"""
Base form for creating & editing NetBox models. Extends Django's ModelForm to add support for custom fields.
Attributes:
fieldsets: An iterable of FieldSets which define a name and set of fields to display per section of
the rendered form (optional). If not defined, the all fields will be rendered as a single section.
"""
fieldsets = ()
def _get_content_type(self):
return ContentType.objects.get_for_model(self._meta.model)
def _get_form_field(self, customfield):
if self.instance.pk:
form_field = customfield.to_form_field(set_initial=False)
initial = self.instance.custom_field_data.get(customfield.name)
if customfield.type == CustomFieldTypeChoices.TYPE_JSON:
form_field.initial = json.dumps(initial)
else:
form_field.initial = initial
return form_field
return customfield.to_form_field()
def clean(self):
# Save custom field data on instance
for cf_name, customfield in self.custom_fields.items():
if cf_name not in self.fields:
# Custom fields may be absent when performing bulk updates via import
continue
key = cf_name[3:] # Strip "cf_" from field name
value = self.cleaned_data.get(cf_name)
# Convert "empty" values to null
if value in self.fields[cf_name].empty_values:
self.instance.custom_field_data[key] = None
else:
if customfield.type == CustomFieldTypeChoices.TYPE_JSON and type(value) is str:
value = json.loads(value)
self.instance.custom_field_data[key] = customfield.serialize(value)
return super().clean()
def _post_clean(self):
"""
Override BaseModelForm's _post_clean() to store many-to-many field values on the model instance.
"""
self.instance._m2m_values = {}
for field in self.instance._meta.local_many_to_many:
if field.name in self.cleaned_data:
self.instance._m2m_values[field.name] = list(self.cleaned_data[field.name])
return super()._post_clean()
class PrimaryModelForm(OwnerMixin, NetBoxModelForm):
"""
Form for models which inherit from PrimaryModel.
"""
comments = CommentField()
class OrganizationalModelForm(OwnerMixin, NetBoxModelForm):
"""
Form for models which inherit from OrganizationalModel.
"""
slug = SlugField()
comments = CommentField()
class NestedGroupModelForm(OwnerMixin, NetBoxModelForm):
"""
Form for models which inherit from NestedGroupModel.
"""
slug = SlugField()
comments = CommentField()
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/forms/model_forms.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/netbox/forms/search.py | import re
from django import forms
from django.utils.translation import gettext_lazy as _
from netbox.search import LookupTypes
from netbox.search.backends import search_backend
LOOKUP_CHOICES = (
('', _('Partial match')),
(LookupTypes.EXACT, _('Exact match')),
(LookupTypes.STARTSWITH, _('Starts with')),
(LookupTypes.ENDSWITH, _('Ends with')),
(LookupTypes.REGEX, _('Regex')),
)
class SearchForm(forms.Form):
q = forms.CharField(
label=_('Search'),
widget=forms.TextInput(
attrs={
'hx-get': '',
'hx-target': '#object_list',
'hx-trigger': 'keyup[target.value.length >= 3] changed delay:500ms',
}
)
)
obj_types = forms.MultipleChoiceField(
choices=[],
required=False,
label=_('Object type(s)')
)
lookup = forms.ChoiceField(
choices=LOOKUP_CHOICES,
initial=LookupTypes.PARTIAL,
required=False,
label=_('Lookup')
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['obj_types'].choices = search_backend.get_object_types()
def clean(self):
# Validate regular expressions
if self.cleaned_data['lookup'] == LookupTypes.REGEX:
try:
re.compile(self.cleaned_data['q'])
except re.error as e:
raise forms.ValidationError({
'q': f'Invalid regular expression: {e}'
})
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/forms/search.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/graphql/filters.py | from dataclasses import dataclass
from typing import TYPE_CHECKING
import strawberry_django
from strawberry import ID
from strawberry_django import ComparisonFilterLookup, StrFilterLookup
from core.graphql.filter_mixins import ChangeLoggingMixin
from extras.graphql.filter_mixins import CustomFieldsFilterMixin, JournalEntriesFilterMixin, TagsFilterMixin
if TYPE_CHECKING:
from .filters import *
__all__ = (
'BaseModelFilter',
'ChangeLoggedModelFilter',
'NestedGroupModelFilter',
'NetBoxModelFilter',
'OrganizationalModelFilter',
'PrimaryModelFilter',
)
@dataclass
class BaseModelFilter:
id: ComparisonFilterLookup[ID] | None = strawberry_django.filter_field()
class ChangeLoggedModelFilter(ChangeLoggingMixin, BaseModelFilter):
pass
class NetBoxModelFilter(
CustomFieldsFilterMixin,
JournalEntriesFilterMixin,
TagsFilterMixin,
ChangeLoggingMixin,
BaseModelFilter
):
pass
@dataclass
class NestedGroupModelFilter(NetBoxModelFilter):
name: StrFilterLookup[str] | None = strawberry_django.filter_field()
slug: StrFilterLookup[str] | None = strawberry_django.filter_field()
description: StrFilterLookup[str] | None = strawberry_django.filter_field()
parent_id: ID | None = strawberry_django.filter_field()
@dataclass
class OrganizationalModelFilter(NetBoxModelFilter):
name: StrFilterLookup[str] | None = strawberry_django.filter_field()
slug: StrFilterLookup[str] | None = strawberry_django.filter_field()
description: StrFilterLookup[str] | None = strawberry_django.filter_field()
comments: StrFilterLookup[str] | None = strawberry_django.filter_field()
@dataclass
class PrimaryModelFilter(NetBoxModelFilter):
description: StrFilterLookup[str] | None = strawberry_django.filter_field()
comments: StrFilterLookup[str] | None = strawberry_django.filter_field()
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/graphql/filters.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/tests/test_base_classes.py | from django.apps import apps
from django.test import TestCase
from django.utils.module_loading import import_string
from netbox.api.serializers import (
NestedGroupModelSerializer,
NetBoxModelSerializer,
OrganizationalModelSerializer,
PrimaryModelSerializer,
)
from netbox.filtersets import (
NestedGroupModelFilterSet,
NetBoxModelFilterSet,
OrganizationalModelFilterSet,
PrimaryModelFilterSet,
)
from netbox.forms.bulk_edit import (
NestedGroupModelBulkEditForm,
NetBoxModelBulkEditForm,
OrganizationalModelBulkEditForm,
PrimaryModelBulkEditForm,
)
from netbox.forms.bulk_import import (
NestedGroupModelImportForm,
NetBoxModelImportForm,
OrganizationalModelImportForm,
PrimaryModelImportForm,
)
from netbox.forms.filtersets import (
NestedGroupModelFilterSetForm,
NetBoxModelFilterSetForm,
OrganizationalModelFilterSetForm,
PrimaryModelFilterSetForm,
)
from netbox.forms.model_forms import (
NestedGroupModelForm,
NetBoxModelForm,
OrganizationalModelForm,
PrimaryModelForm,
)
from netbox.graphql.types import (
NestedGroupObjectType,
NetBoxObjectType,
OrganizationalObjectType,
PrimaryObjectType,
)
from netbox.models import NestedGroupModel, NetBoxModel, OrganizationalModel, PrimaryModel
from netbox.tables import (
NestedGroupModelTable,
NetBoxTable,
OrganizationalModelTable,
PrimaryModelTable,
)
class FormClassesTestCase(TestCase):
@staticmethod
def get_form_for_model(model, prefix=''):
"""
Import and return the form class for a given model.
"""
app_label = model._meta.app_label
model_name = model.__name__
return import_string(f'{app_label}.forms.{model_name}{prefix}Form')
@staticmethod
def get_model_form_base_class(model):
"""
Return the base form class for creating/editing the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelForm
if issubclass(model, OrganizationalModel):
return OrganizationalModelForm
if issubclass(model, NestedGroupModel):
return NestedGroupModelForm
if issubclass(model, NetBoxModel):
return NetBoxModelForm
return None
@staticmethod
def get_bulk_edit_form_base_class(model):
"""
Return the base form class for bulk editing the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelBulkEditForm
if issubclass(model, OrganizationalModel):
return OrganizationalModelBulkEditForm
if issubclass(model, NestedGroupModel):
return NestedGroupModelBulkEditForm
if issubclass(model, NetBoxModel):
return NetBoxModelBulkEditForm
return None
@staticmethod
def get_import_form_base_class(model):
"""
Return the base form class for importing the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelImportForm
if issubclass(model, OrganizationalModel):
return OrganizationalModelImportForm
if issubclass(model, NestedGroupModel):
return NestedGroupModelImportForm
if issubclass(model, NetBoxModel):
return NetBoxModelImportForm
return None
@staticmethod
def get_filterset_form_base_class(model):
"""
Return the base form class for the given model's FilterSet.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelFilterSetForm
if issubclass(model, OrganizationalModel):
return OrganizationalModelFilterSetForm
if issubclass(model, NestedGroupModel):
return NestedGroupModelFilterSetForm
if issubclass(model, NetBoxModel):
return NetBoxModelFilterSetForm
return None
def test_model_form_base_classes(self):
"""
Check that each model form inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_model_form_base_class(model):
form_class = self.get_form_for_model(model)
self.assertTrue(issubclass(form_class, base_class), f"{form_class} does not inherit from {base_class}")
def test_bulk_edit_form_base_classes(self):
"""
Check that each bulk edit form inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_bulk_edit_form_base_class(model):
form_class = self.get_form_for_model(model, prefix='BulkEdit')
self.assertTrue(issubclass(form_class, base_class), f"{form_class} does not inherit from {base_class}")
def test_import_form_base_classes(self):
"""
Check that each bulk import form inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_import_form_base_class(model):
form_class = self.get_form_for_model(model, prefix='Import')
self.assertTrue(issubclass(form_class, base_class), f"{form_class} does not inherit from {base_class}")
def test_filterset_form_base_classes(self):
"""
Check that each filterset form inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_filterset_form_base_class(model):
form_class = self.get_form_for_model(model, prefix='Filter')
self.assertTrue(issubclass(form_class, base_class), f"{form_class} does not inherit from {base_class}")
class FilterSetClassesTestCase(TestCase):
@staticmethod
def get_filterset_for_model(model):
"""
Import and return the filterset class for a given model.
"""
app_label = model._meta.app_label
model_name = model.__name__
return import_string(f'{app_label}.filtersets.{model_name}FilterSet')
@staticmethod
def get_model_filterset_base_class(model):
"""
Return the base FilterSet class for the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelFilterSet
if issubclass(model, OrganizationalModel):
return OrganizationalModelFilterSet
if issubclass(model, NestedGroupModel):
return NestedGroupModelFilterSet
if issubclass(model, NetBoxModel):
return NetBoxModelFilterSet
return None
def test_model_filterset_base_classes(self):
"""
Check that each FilterSet inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_model_filterset_base_class(model):
filterset = self.get_filterset_for_model(model)
self.assertTrue(
issubclass(filterset, base_class),
f"{filterset} does not inherit from {base_class}",
)
class TableClassesTestCase(TestCase):
@staticmethod
def get_table_for_model(model):
"""
Import and return the table class for a given model.
"""
app_label = model._meta.app_label
model_name = model.__name__
return import_string(f'{app_label}.tables.{model_name}Table')
@staticmethod
def get_model_table_base_class(model):
"""
Return the base table class for the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelTable
if issubclass(model, OrganizationalModel):
return OrganizationalModelTable
if issubclass(model, NestedGroupModel):
return NestedGroupModelTable
if issubclass(model, NetBoxModel):
return NetBoxTable
return None
def test_model_table_base_classes(self):
"""
Check that each table inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_model_table_base_class(model):
table = self.get_table_for_model(model)
self.assertTrue(
issubclass(table, base_class),
f"{table} does not inherit from {base_class}",
)
self.assertTrue(
issubclass(table.Meta, base_class.Meta),
f"{table}.Meta does not inherit from {base_class}.Meta",
)
class SerializerClassesTestCase(TestCase):
@staticmethod
def get_serializer_for_model(model):
"""
Import and return the REST API serializer class for a given model.
"""
app_label = model._meta.app_label
model_name = model.__name__
return import_string(f'{app_label}.api.serializers.{model_name}Serializer')
@staticmethod
def get_model_serializer_base_class(model):
"""
Return the base serializer class for the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryModelSerializer
if issubclass(model, OrganizationalModel):
return OrganizationalModelSerializer
if issubclass(model, NestedGroupModel):
return NestedGroupModelSerializer
if issubclass(model, NetBoxModel):
return NetBoxModelSerializer
return None
def test_model_serializer_base_classes(self):
"""
Check that each model serializer inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_model_serializer_base_class(model):
serializer = self.get_serializer_for_model(model)
self.assertTrue(
issubclass(serializer, base_class),
f"{serializer} does not inherit from {base_class}",
)
class GraphQLTypeClassesTestCase(TestCase):
@staticmethod
def get_type_for_model(model):
"""
Import and return the GraphQL type for a given model.
"""
app_label = model._meta.app_label
model_name = model.__name__
return import_string(f'{app_label}.graphql.types.{model_name}Type')
@staticmethod
def get_model_type_base_class(model):
"""
Return the base GraphQL type for the given model.
"""
if model._meta.app_label == 'dummy_plugin':
return None
if issubclass(model, PrimaryModel):
return PrimaryObjectType
if issubclass(model, OrganizationalModel):
return OrganizationalObjectType
if issubclass(model, NestedGroupModel):
return NestedGroupObjectType
if issubclass(model, NetBoxModel):
return NetBoxObjectType
return None
def test_model_type_base_classes(self):
"""
Check that each GraphQL type inherits from the appropriate base class.
"""
for model in apps.get_models():
if base_class := self.get_model_type_base_class(model):
graphql_type = self.get_type_for_model(model)
self.assertTrue(
issubclass(graphql_type, base_class),
f"{graphql_type} does not inherit from {base_class}",
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/tests/test_base_classes.py",
"license": "Apache License 2.0",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/ui/actions.py | from urllib.parse import urlencode
from django.apps import apps
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from utilities.permissions import get_permission_for_model
from utilities.views import get_viewname
__all__ = (
'AddObject',
'CopyContent',
'LinkAction',
'PanelAction',
)
class PanelAction:
"""
A link (typically a button) within a panel to perform some associated action, such as adding an object.
Attributes:
template_name (str): The name of the template to render
Parameters:
label (str): The human-friendly button text
permissions (list): An iterable of permissions required to display the action
button_class (str): Bootstrap CSS class for the button
button_icon (str): Name of the button's MDI icon
"""
template_name = None
def __init__(self, label, permissions=None, button_class='primary', button_icon=None):
self.label = label
self.permissions = permissions
self.button_class = button_class
self.button_icon = button_icon
def get_context(self, context):
"""
Return the template context used to render the action element.
Parameters:
context (dict): The template context
"""
return {
'label': self.label,
'button_class': self.button_class,
'button_icon': self.button_icon,
}
def render(self, context):
"""
Render the action as HTML.
Parameters:
context (dict): The template context
"""
# Enforce permissions
user = context['request'].user
if not user.has_perms(self.permissions):
return ''
return render_to_string(self.template_name, self.get_context(context))
class LinkAction(PanelAction):
"""
A hyperlink (typically a button) within a panel to perform some associated action, such as adding an object.
Parameters:
view_name (str): Name of the view to which the action will link
view_kwargs (dict): Additional keyword arguments to pass to `reverse()` when resolving the URL
url_params (dict): A dictionary of arbitrary URL parameters to append to the action's URL. If the value of a key
is a callable, it will be passed the current template context.
"""
template_name = 'ui/actions/link.html'
def __init__(self, view_name, view_kwargs=None, url_params=None, **kwargs):
super().__init__(**kwargs)
self.view_name = view_name
self.view_kwargs = view_kwargs or {}
self.url_params = url_params or {}
def get_url(self, context):
"""
Resolve the URL for the action from its view name and kwargs. Append any additional URL parameters.
Parameters:
context (dict): The template context
"""
url = reverse(self.view_name, kwargs=self.view_kwargs)
if self.url_params:
# If the param value is callable, call it with the context and save the result.
url_params = {
k: v(context) if callable(v) else v for k, v in self.url_params.items()
}
# Set the return URL if not already set and an object is available.
if 'return_url' not in url_params and 'object' in context:
url_params['return_url'] = context['object'].get_absolute_url()
url = f'{url}?{urlencode(url_params)}'
return url
def get_context(self, context):
return {
**super().get_context(context),
'url': self.get_url(context),
}
class AddObject(LinkAction):
"""
An action to add a new object.
Parameters:
model (str): The dotted label of the model to be added (e.g. "dcim.site")
url_params (dict): A dictionary of arbitrary URL parameters to append to the resolved URL
"""
def __init__(self, model, url_params=None, **kwargs):
# Resolve the model class from its app.name label
try:
app_label, model_name = model.split('.')
model = apps.get_model(app_label, model_name)
except (ValueError, LookupError):
raise ValueError(f"Invalid model label: {model}")
view_name = get_viewname(model, 'add')
kwargs.setdefault('label', _('Add'))
kwargs.setdefault('button_icon', 'plus-thick')
kwargs.setdefault('permissions', [get_permission_for_model(model, 'add')])
super().__init__(view_name=view_name, url_params=url_params, **kwargs)
class CopyContent(PanelAction):
"""
An action to copy the contents of a panel to the clipboard.
Parameters:
target_id (str): The ID of the target element containing the content to be copied
"""
template_name = 'ui/actions/copy_content.html'
def __init__(self, target_id, **kwargs):
kwargs.setdefault('label', _('Copy'))
kwargs.setdefault('button_icon', 'content-copy')
super().__init__(**kwargs)
self.target_id = target_id
def render(self, context):
return render_to_string(self.template_name, {
'target_id': self.target_id,
'label': self.label,
'button_class': self.button_class,
'button_icon': self.button_icon,
})
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/ui/actions.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/ui/attrs.py | from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from netbox.config import get_config
from utilities.data import resolve_attr_path
__all__ = (
'AddressAttr',
'BooleanAttr',
'ChoiceAttr',
'ColorAttr',
'GPSCoordinatesAttr',
'GenericForeignKeyAttr',
'ImageAttr',
'NestedObjectAttr',
'NumericAttr',
'ObjectAttribute',
'RelatedObjectAttr',
'TemplatedAttr',
'TextAttr',
'TimezoneAttr',
'UtilizationAttr',
)
PLACEHOLDER_HTML = '<span class="text-muted">—</span>'
IMAGE_DECODING_CHOICES = ('auto', 'async', 'sync')
#
# Attributes
#
class ObjectAttribute:
"""
Base class for representing an attribute of an object.
Attributes:
template_name (str): The name of the template to render
placeholder (str): HTML to render for empty/null values
Parameters:
accessor (str): The dotted path to the attribute being rendered (e.g. "site.region.name")
label (str): Human-friendly label for the rendered attribute
"""
template_name = None
label = None
placeholder = mark_safe(PLACEHOLDER_HTML)
def __init__(self, accessor, label=None):
self.accessor = accessor
if label is not None:
self.label = label
def get_value(self, obj):
"""
Return the value of the attribute.
Parameters:
obj (object): The object for which the attribute is being rendered
"""
return resolve_attr_path(obj, self.accessor)
def get_context(self, obj, context):
"""
Return any additional template context used to render the attribute value.
Parameters:
obj (object): The object for which the attribute is being rendered
context (dict): The root template context
"""
return {}
def render(self, obj, context):
value = self.get_value(obj)
# If the value is empty, render a placeholder
if value in (None, ''):
return self.placeholder
return render_to_string(self.template_name, {
**self.get_context(obj, context),
'name': context['name'],
'value': value,
})
class TextAttr(ObjectAttribute):
"""
A text attribute.
Parameters:
style (str): CSS class to apply to the rendered attribute
format_string (str): If specified, the value will be formatted using this string when rendering
copy_button (bool): Set to True to include a copy-to-clipboard button
"""
template_name = 'ui/attrs/text.html'
def __init__(self, *args, style=None, format_string=None, copy_button=False, **kwargs):
super().__init__(*args, **kwargs)
self.style = style
self.format_string = format_string
self.copy_button = copy_button
def get_value(self, obj):
value = resolve_attr_path(obj, self.accessor)
# Apply format string (if any)
if value is not None and value != '' and self.format_string:
return self.format_string.format(value)
return value
def get_context(self, obj, context):
return {
'style': self.style,
'copy_button': self.copy_button,
}
class NumericAttr(ObjectAttribute):
"""
An integer or float attribute.
Parameters:
unit_accessor (str): Accessor for the unit of measurement to display alongside the value (if any)
copy_button (bool): Set to True to include a copy-to-clipboard button
"""
template_name = 'ui/attrs/numeric.html'
def __init__(self, *args, unit_accessor=None, copy_button=False, **kwargs):
super().__init__(*args, **kwargs)
self.unit_accessor = unit_accessor
self.copy_button = copy_button
def get_context(self, obj, context):
unit = resolve_attr_path(obj, self.unit_accessor) if self.unit_accessor else None
return {
'unit': unit,
'copy_button': self.copy_button,
}
class ChoiceAttr(ObjectAttribute):
"""
A selection from a set of choices.
The class calls get_FOO_display() on the object to retrieve the human-friendly choice label. If a get_FOO_color()
method exists on the object, it will be used to render a background color for the attribute value.
"""
template_name = 'ui/attrs/choice.html'
def get_value(self, obj):
try:
return getattr(obj, f'get_{self.accessor}_display')()
except AttributeError:
return resolve_attr_path(obj, self.accessor)
def get_context(self, obj, context):
try:
bg_color = getattr(obj, f'get_{self.accessor}_color')()
except AttributeError:
bg_color = None
return {
'bg_color': bg_color,
}
class BooleanAttr(ObjectAttribute):
"""
A boolean attribute.
Parameters:
display_false (bool): If False, a placeholder will be rendered instead of the "False" indication
"""
template_name = 'ui/attrs/boolean.html'
def __init__(self, *args, display_false=True, **kwargs):
super().__init__(*args, **kwargs)
self.display_false = display_false
def get_value(self, obj):
value = super().get_value(obj)
if value is False and self.display_false is False:
return None
return value
class ColorAttr(ObjectAttribute):
"""
An RGB color value.
"""
template_name = 'ui/attrs/color.html'
label = _('Color')
class ImageAttr(ObjectAttribute):
"""
An attribute representing an image field on the model. Displays the uploaded image.
Parameters:
load_lazy (bool): If True, the image will be loaded lazily (default: True)
decoding (str): Image decoding option ('async', 'sync', 'auto', None)
"""
template_name = 'ui/attrs/image.html'
def __init__(self, *args, load_lazy=True, decoding=None, **kwargs):
super().__init__(*args, **kwargs)
self.load_lazy = load_lazy
if decoding is not None and decoding not in IMAGE_DECODING_CHOICES:
raise ValueError(
_('Invalid decoding option: {decoding}! Must be one of {image_decoding_choices}').format(
decoding=decoding, image_decoding_choices=', '.join(IMAGE_DECODING_CHOICES)
)
)
# Compute default decoding:
# - lazy images: async decoding (performance-friendly hint)
# - non-lazy images: omit decoding (browser default/auto)
if decoding is None and load_lazy:
decoding = 'async'
self.decoding = decoding
def get_context(self, obj, context):
return {
'decoding': self.decoding,
'load_lazy': self.load_lazy,
}
class RelatedObjectAttr(ObjectAttribute):
"""
An attribute representing a related object.
Parameters:
linkify (bool): If True, the rendered value will be hyperlinked to the related object's detail view
grouped_by (str): A second-order object to annotate alongside the related object; for example, an attribute
representing the dcim.Site model might specify grouped_by="region"
"""
template_name = 'ui/attrs/object.html'
def __init__(self, *args, linkify=None, grouped_by=None, **kwargs):
super().__init__(*args, **kwargs)
self.linkify = linkify
self.grouped_by = grouped_by
def get_context(self, obj, context):
value = self.get_value(obj)
group = getattr(value, self.grouped_by, None) if self.grouped_by else None
return {
'linkify': self.linkify,
'group': group,
}
class NestedObjectAttr(ObjectAttribute):
"""
An attribute representing a related nested object. Similar to `RelatedObjectAttr`, but includes the ancestors of the
related object in the rendered output.
Parameters:
linkify (bool): If True, the rendered value will be hyperlinked to the related object's detail view
max_depth (int): Maximum number of ancestors to display (default: all)
"""
template_name = 'ui/attrs/nested_object.html'
def __init__(self, *args, linkify=None, max_depth=None, **kwargs):
super().__init__(*args, **kwargs)
self.linkify = linkify
self.max_depth = max_depth
def get_context(self, obj, context):
value = self.get_value(obj)
nodes = value.get_ancestors(include_self=True)
if self.max_depth:
nodes = list(nodes)[-self.max_depth:]
return {
'nodes': nodes,
'linkify': self.linkify,
}
class GenericForeignKeyAttr(ObjectAttribute):
"""
An attribute representing a related generic relation object.
This attribute is similar to `RelatedObjectAttr` but uses the
ContentType of the related object to be displayed alongside the value.
Parameters:
linkify (bool): If True, the rendered value will be hyperlinked
to the related object's detail view
"""
template_name = 'ui/attrs/generic_object.html'
def __init__(self, *args, linkify=None, **kwargs):
super().__init__(*args, **kwargs)
self.linkify = linkify
def get_context(self, obj, context):
value = self.get_value(obj)
content_type = value._meta.verbose_name
return {
'content_type': content_type,
'linkify': self.linkify,
}
class AddressAttr(ObjectAttribute):
"""
A physical or mailing address.
Parameters:
map_url (bool): If true, the address will render as a hyperlink using settings.MAPS_URL
"""
template_name = 'ui/attrs/address.html'
def __init__(self, *args, map_url=True, **kwargs):
super().__init__(*args, **kwargs)
if map_url is True:
self.map_url = get_config().MAPS_URL
elif map_url:
self.map_url = map_url
else:
self.map_url = None
def get_context(self, obj, context):
return {
'map_url': self.map_url,
}
class GPSCoordinatesAttr(ObjectAttribute):
"""
A GPS coordinates pair comprising latitude and longitude values.
Parameters:
latitude_attr (float): The name of the field containing the latitude value
longitude_attr (float): The name of the field containing the longitude value
map_url (bool): If true, the address will render as a hyperlink using settings.MAPS_URL
"""
template_name = 'ui/attrs/gps_coordinates.html'
label = _('GPS coordinates')
def __init__(self, latitude_attr='latitude', longitude_attr='longitude', map_url=True, **kwargs):
super().__init__(accessor=None, **kwargs)
self.latitude_attr = latitude_attr
self.longitude_attr = longitude_attr
if map_url is True:
self.map_url = get_config().MAPS_URL
elif map_url:
self.map_url = map_url
else:
self.map_url = None
def render(self, obj, context=None):
context = context or {}
latitude = resolve_attr_path(obj, self.latitude_attr)
longitude = resolve_attr_path(obj, self.longitude_attr)
if latitude is None or longitude is None:
return self.placeholder
return render_to_string(self.template_name, {
**context,
'latitude': latitude,
'longitude': longitude,
'map_url': self.map_url,
})
class TimezoneAttr(ObjectAttribute):
"""
A timezone value. Includes the numeric offset from UTC.
"""
template_name = 'ui/attrs/timezone.html'
class TemplatedAttr(ObjectAttribute):
"""
Renders an attribute using a custom template.
Parameters:
template_name (str): The name of the template to render
context (dict): Additional context to pass to the template when rendering
"""
def __init__(self, *args, template_name, context=None, **kwargs):
super().__init__(*args, **kwargs)
self.template_name = template_name
self.context = context or {}
def get_context(self, obj, context):
return {
**self.context,
'object': obj,
}
class UtilizationAttr(ObjectAttribute):
"""
Renders the value of an attribute as a utilization graph.
"""
template_name = 'ui/attrs/utilization.html'
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/ui/attrs.py",
"license": "Apache License 2.0",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/netbox/ui/layout.py | from netbox.ui.panels import Panel, PluginContentPanel
__all__ = (
'Column',
'Layout',
'Row',
'SimpleLayout',
)
#
# Base classes
#
class Layout:
"""
A collection of rows and columns comprising the layout of content within the user interface.
Parameters:
*rows: One or more Row instances
"""
def __init__(self, *rows):
for i, row in enumerate(rows):
if type(row) is not Row:
raise TypeError(f"Row {i} must be a Row instance, not {type(row)}.")
self.rows = rows
class Row:
"""
A collection of columns arranged horizontally.
Parameters:
*columns: One or more Column instances
"""
def __init__(self, *columns):
for i, column in enumerate(columns):
if type(column) is not Column:
raise TypeError(f"Column {i} must be a Column instance, not {type(column)}.")
self.columns = columns
class Column:
"""
A collection of panels arranged vertically.
Parameters:
*panels: One or more Panel instances
"""
def __init__(self, *panels):
for i, panel in enumerate(panels):
if not isinstance(panel, Panel):
raise TypeError(f"Panel {i} must be an instance of a Panel, not {type(panel)}.")
self.panels = panels
#
# Common layouts
#
class SimpleLayout(Layout):
"""
A layout with one row of two columns and a second row with one column.
Plugin content registered for `left_page`, `right_page`, or `full_width_path` is included automatically. Most object
views in NetBox utilize this layout.
```
+-------+-------+
| Col 1 | Col 2 |
+-------+-------+
| Col 3 |
+---------------+
```
Parameters:
left_panels: Panel instances to be rendered in the top lefthand column
right_panels: Panel instances to be rendered in the top righthand column
bottom_panels: Panel instances to be rendered in the bottom row
"""
def __init__(self, left_panels=None, right_panels=None, bottom_panels=None):
left_panels = left_panels or []
right_panels = right_panels or []
bottom_panels = bottom_panels or []
rows = [
Row(
Column(*left_panels, PluginContentPanel('left_page')),
Column(*right_panels, PluginContentPanel('right_page')),
),
Row(
Column(*bottom_panels, PluginContentPanel('full_width_page'))
)
]
super().__init__(*rows)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/ui/layout.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
netbox-community/netbox:netbox/netbox/ui/panels.py | from django.apps import apps
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _
from netbox.ui import attrs
from netbox.ui.actions import CopyContent
from utilities.data import resolve_attr_path
from utilities.querydict import dict_to_querydict
from utilities.string import title
from utilities.templatetags.plugins import _get_registered_content
from utilities.views import get_viewname
__all__ = (
'CommentsPanel',
'ContextTablePanel',
'JSONPanel',
'NestedGroupObjectPanel',
'ObjectAttributesPanel',
'ObjectPanel',
'ObjectsTablePanel',
'OrganizationalObjectPanel',
'Panel',
'PluginContentPanel',
'RelatedObjectsPanel',
'TemplatePanel',
)
#
# Base classes
#
class Panel:
"""
A block of content rendered within an HTML template.
Panels are arranged within rows and columns, (generally) render as discrete "cards" within the user interface. Each
panel has a title and may have one or more actions associated with it, which will be rendered as hyperlinks in the
top right corner of the card.
Attributes:
template_name (str): The name of the template used to render the panel
Parameters:
title (str): The human-friendly title of the panel
actions (list): An iterable of PanelActions to include in the panel header
template_name (str): Overrides the default template name, if defined
"""
template_name = None
title = None
actions = None
def __init__(self, title=None, actions=None, template_name=None):
if title is not None:
self.title = title
self.actions = actions or self.actions or []
if template_name is not None:
self.template_name = template_name
def get_context(self, context):
"""
Return the context data to be used when rendering the panel.
Parameters:
context (dict): The template context
"""
return {
'request': context.get('request'),
'object': context.get('object'),
'title': self.title,
'actions': self.actions,
'panel_class': self.__class__.__name__,
}
def render(self, context):
"""
Render the panel as HTML.
Parameters:
context (dict): The template context
"""
return render_to_string(self.template_name, self.get_context(context))
#
# Object-specific panels
#
class ObjectPanel(Panel):
"""
Base class for object-specific panels.
Parameters:
accessor (str): The dotted path in context data to the object being rendered (default: "object")
"""
accessor = 'object'
def __init__(self, accessor=None, **kwargs):
super().__init__(**kwargs)
if accessor is not None:
self.accessor = accessor
def get_context(self, context):
obj = resolve_attr_path(context, self.accessor)
return {
**super().get_context(context),
'title': self.title or title(obj._meta.verbose_name),
'object': obj,
}
class ObjectAttributesPanelMeta(type):
def __new__(mcls, name, bases, namespace, **kwargs):
declared = {}
# Walk MRO parents (excluding `object`) for declared attributes
for base in reversed([b for b in bases if hasattr(b, "_attrs")]):
for key, attr in getattr(base, '_attrs', {}).items():
if key not in declared:
declared[key] = attr
# Add local declarations in the order they appear in the class body
for key, attr in namespace.items():
if isinstance(attr, attrs.ObjectAttribute):
declared[key] = attr
namespace['_attrs'] = declared
# Remove Attrs from the class namespace to keep things tidy
local_items = [key for key, attr in namespace.items() if isinstance(attr, attrs.ObjectAttribute)]
for key in local_items:
namespace.pop(key)
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
return cls
class ObjectAttributesPanel(ObjectPanel, metaclass=ObjectAttributesPanelMeta):
"""
A panel which displays selected attributes of an object.
Attributes are added to the panel by declaring ObjectAttribute instances in the class body (similar to fields on
a Django form). Attributes are displayed in the order they are declared.
Note that the `only` and `exclude` parameters are mutually exclusive.
Parameters:
only (list): If specified, only attributes in this list will be displayed
exclude (list): If specified, attributes in this list will be excluded from display
"""
template_name = 'ui/panels/object_attributes.html'
def __init__(self, only=None, exclude=None, **kwargs):
super().__init__(**kwargs)
# Set included/excluded attributes
if only is not None and exclude is not None:
raise ValueError("only and exclude cannot both be specified.")
self.only = only or []
self.exclude = exclude or []
@staticmethod
def _name_to_label(name):
"""
Format an attribute's name to be presented as a human-friendly label.
"""
label = name[:1].upper() + name[1:]
label = label.replace('_', ' ')
return _(label)
def get_context(self, context):
# Determine which attributes to display in the panel based on only/exclude args
attr_names = set(self._attrs.keys())
if self.only:
attr_names &= set(self.only)
elif self.exclude:
attr_names -= set(self.exclude)
ctx = super().get_context(context)
return {
**ctx,
'attrs': [
{
'label': attr.label or self._name_to_label(name),
'value': attr.render(ctx['object'], {'name': name}),
} for name, attr in self._attrs.items() if name in attr_names
],
}
class OrganizationalObjectPanel(ObjectAttributesPanel, metaclass=ObjectAttributesPanelMeta):
"""
An ObjectPanel with attributes common to OrganizationalModels. Includes `name` and `description` attributes.
"""
name = attrs.TextAttr('name', label=_('Name'))
description = attrs.TextAttr('description', label=_('Description'))
class NestedGroupObjectPanel(ObjectAttributesPanel, metaclass=ObjectAttributesPanelMeta):
"""
An ObjectPanel with attributes common to NestedGroupObjects. Includes the `parent` attribute.
"""
parent = attrs.NestedObjectAttr('parent', label=_('Parent'), linkify=True)
name = attrs.TextAttr('name', label=_('Name'))
description = attrs.TextAttr('description', label=_('Description'))
class CommentsPanel(ObjectPanel):
"""
A panel which displays comments associated with an object.
Parameters:
field_name (str): The name of the comment field on the object (default: "comments")
"""
template_name = 'ui/panels/comments.html'
title = _('Comments')
def __init__(self, field_name='comments', **kwargs):
super().__init__(**kwargs)
self.field_name = field_name
def get_context(self, context):
return {
**super().get_context(context),
'comments': getattr(context['object'], self.field_name),
}
class JSONPanel(ObjectPanel):
"""
A panel which renders formatted JSON data from an object's JSONField.
Parameters:
field_name (str): The name of the JSON field on the object
copy_button (bool): Set to True (default) to include a copy-to-clipboard button
"""
template_name = 'ui/panels/json.html'
def __init__(self, field_name, copy_button=True, **kwargs):
super().__init__(**kwargs)
self.field_name = field_name
if copy_button:
self.actions.append(CopyContent(f'panel_{field_name}'))
def get_context(self, context):
return {
**super().get_context(context),
'data': getattr(context['object'], self.field_name),
'field_name': self.field_name,
}
#
# Miscellaneous panels
#
class RelatedObjectsPanel(Panel):
"""
A panel which displays the types and counts of related objects.
"""
template_name = 'ui/panels/related_objects.html'
title = _('Related Objects')
def get_context(self, context):
return {
**super().get_context(context),
'related_models': context.get('related_models'),
}
class ObjectsTablePanel(Panel):
"""
A panel which displays a table of objects (rendered via HTMX).
Parameters:
model (str): The dotted label of the model to be added (e.g. "dcim.site")
filters (dict): A dictionary of arbitrary URL parameters to append to the table's URL. If the value of a key is
a callable, it will be passed the current template context.
"""
template_name = 'ui/panels/objects_table.html'
title = None
def __init__(self, model, filters=None, **kwargs):
super().__init__(**kwargs)
# Resolve the model class from its app.name label
try:
app_label, model_name = model.split('.')
self.model = apps.get_model(app_label, model_name)
except (ValueError, LookupError):
raise ValueError(f"Invalid model label: {model}")
self.filters = filters or {}
# If no title is specified, derive one from the model name
if self.title is None:
self.title = title(self.model._meta.verbose_name_plural)
def get_context(self, context):
url_params = {
k: v(context) if callable(v) else v for k, v in self.filters.items()
}
if 'return_url' not in url_params and 'object' in context:
url_params['return_url'] = context['object'].get_absolute_url()
return {
**super().get_context(context),
'viewname': get_viewname(self.model, 'list'),
'url_params': dict_to_querydict(url_params),
}
class TemplatePanel(Panel):
"""
A panel which renders custom content using an HTML template.
Parameters:
template_name (str): The name of the template to render
"""
def __init__(self, template_name):
super().__init__(template_name=template_name)
def render(self, context):
# Pass the entire context to the template
return render_to_string(self.template_name, context.flatten())
class PluginContentPanel(Panel):
"""
A panel which displays embedded plugin content.
Parameters:
method (str): The name of the plugin method to render (e.g. "left_page")
"""
def __init__(self, method, **kwargs):
super().__init__(**kwargs)
self.method = method
def render(self, context):
obj = context.get('object')
return _get_registered_content(obj, self.method, context)
class ContextTablePanel(ObjectPanel):
"""
A panel which renders a django-tables2/NetBoxTable instance provided
via the view's extra context.
This is useful when you already have a fully constructed table
(custom queryset, special columns, no list view) and just want to
render it inside a declarative layout panel.
Parameters:
table (str | callable): Either the context key holding the table
(e.g. "vlan_table") or a callable which accepts the template
context and returns a table instance.
"""
template_name = 'ui/panels/context_table.html'
def __init__(self, table, **kwargs):
super().__init__(**kwargs)
self.table = table
def _resolve_table(self, context):
if callable(self.table):
return self.table(context)
return context.get(self.table)
def get_context(self, context):
table = self._resolve_table(context)
return {
**super().get_context(context),
'table': table,
}
def render(self, context):
table = self._resolve_table(context)
if table is None:
return ''
return super().render(context)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/ui/panels.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/users/api/serializers_/mixins.py | from rest_framework import serializers
from users.api.serializers_.owners import OwnerSerializer
__all__ = (
'OwnerMixin',
)
class OwnerMixin(serializers.Serializer):
"""
Adds an `owner` field for models which have a ForeignKey to users.Owner.
"""
owner = OwnerSerializer(
nested=True,
required=False,
allow_null=True,
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/api/serializers_/mixins.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/api/serializers_/owners.py | from netbox.api.fields import RelatedObjectCountField, SerializedPKRelatedField
from netbox.api.serializers import ValidatedModelSerializer
from users.models import Group, Owner, OwnerGroup, User
from .users import GroupSerializer, UserSerializer
__all__ = (
'OwnerGroupSerializer',
'OwnerSerializer',
)
class OwnerGroupSerializer(ValidatedModelSerializer):
# Related object counts
member_count = RelatedObjectCountField('members')
class Meta:
model = OwnerGroup
fields = ('id', 'url', 'display_url', 'display', 'name', 'description', 'member_count')
brief_fields = ('id', 'url', 'display', 'name', 'description')
class OwnerSerializer(ValidatedModelSerializer):
group = OwnerGroupSerializer(
nested=True,
allow_null=True,
)
user_groups = SerializedPKRelatedField(
queryset=Group.objects.all(),
serializer=GroupSerializer,
nested=True,
required=False,
many=True
)
users = SerializedPKRelatedField(
queryset=User.objects.all(),
serializer=UserSerializer,
nested=True,
required=False,
many=True
)
class Meta:
model = Owner
fields = ('id', 'url', 'display_url', 'display', 'name', 'group', 'description', 'user_groups', 'users')
brief_fields = ('id', 'url', 'display', 'name', 'description')
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/api/serializers_/owners.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/choices.py | from django.utils.translation import gettext_lazy as _
from utilities.choices import ChoiceSet
__all__ = (
'TokenVersionChoices',
)
class TokenVersionChoices(ChoiceSet):
V1 = 1
V2 = 2
CHOICES = [
(V1, _('v1')),
(V2, _('v2')),
]
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/choices.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/filterset_mixins.py | import django_filters
from django.utils.translation import gettext as _
from users.models import Owner, OwnerGroup
__all__ = (
'OwnerFilterMixin',
)
class OwnerFilterMixin(django_filters.FilterSet):
"""
Adds owner & owner_id filters for models which inherit from OwnerMixin.
"""
owner_group_id = django_filters.ModelMultipleChoiceFilter(
queryset=OwnerGroup.objects.all(),
distinct=False,
field_name='owner__group',
label=_('Owner Group (ID)'),
)
owner_group = django_filters.ModelMultipleChoiceFilter(
queryset=OwnerGroup.objects.all(),
distinct=False,
field_name='owner__group__name',
to_field_name='name',
label=_('Owner Group (name)'),
)
owner_id = django_filters.ModelMultipleChoiceFilter(
queryset=Owner.objects.all(),
distinct=False,
label=_('Owner (ID)'),
)
owner = django_filters.ModelMultipleChoiceFilter(
field_name='owner__name',
queryset=Owner.objects.all(),
distinct=False,
to_field_name='name',
label=_('Owner (name)'),
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/filterset_mixins.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/graphql/mixins.py | from typing import TYPE_CHECKING, Annotated
import strawberry
if TYPE_CHECKING:
from users.graphql.types import OwnerType
__all__ = (
'OwnerMixin',
)
@strawberry.type
class OwnerMixin:
owner: Annotated['OwnerType', strawberry.lazy('users.graphql.types')] | None
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/graphql/mixins.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/models/owners.py | from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from netbox.models import AdminModel
from utilities.querysets import RestrictedQuerySet
__all__ = (
'Owner',
'OwnerGroup',
)
class OwnerGroup(AdminModel):
"""
An arbitrary grouping of Owners.
"""
name = models.CharField(
verbose_name=_('name'),
max_length=100,
unique=True,
)
class Meta:
ordering = ['name']
verbose_name = _('owner group')
verbose_name_plural = _('owner groups')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('users:ownergroup', args=[self.pk])
class Owner(AdminModel):
name = models.CharField(
verbose_name=_('name'),
max_length=100,
unique=True,
)
group = models.ForeignKey(
to='users.OwnerGroup',
on_delete=models.PROTECT,
related_name='members',
verbose_name=_('group'),
blank=True,
null=True,
)
user_groups = models.ManyToManyField(
to='users.Group',
verbose_name=_('groups'),
blank=True,
related_name='owners',
related_query_name='owner',
)
users = models.ManyToManyField(
to='users.User',
verbose_name=_('users'),
blank=True,
related_name='owners',
related_query_name='owner',
)
objects = RestrictedQuerySet.as_manager()
clone_fields = ('user_groups', 'users')
class Meta:
ordering = ('name',)
verbose_name = _('owner')
verbose_name_plural = _('owners')
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('users:owner', args=[self.pk])
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/models/owners.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/users/ui/panels.py | from django.utils.translation import gettext_lazy as _
from netbox.ui import actions, attrs, panels
class TokenPanel(panels.ObjectAttributesPanel):
version = attrs.NumericAttr('version')
key = attrs.TextAttr('key')
token = attrs.TextAttr('partial')
pepper_id = attrs.NumericAttr('pepper_id')
user = attrs.RelatedObjectAttr('user', linkify=True)
description = attrs.TextAttr('description')
enabled = attrs.BooleanAttr('enabled')
write_enabled = attrs.BooleanAttr('write_enabled')
expires = attrs.TextAttr('expires')
last_used = attrs.TextAttr('last_used')
allowed_ips = attrs.TextAttr('allowed_ips')
class TokenExamplePanel(panels.Panel):
template_name = 'users/panels/token_example.html'
title = _('Example Usage')
actions = [
actions.CopyContent('token-example')
]
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/users/ui/panels.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/filtersets.py | from netbox.registry import registry
__all__ = (
'register_filterset',
)
def register_filterset(filterset_class):
"""
Decorator for registering a FilterSet with the application registry.
Uses model identifier as key to match search index pattern.
"""
model = filterset_class._meta.model
label = f'{model._meta.app_label}.{model._meta.model_name}'
registry['filtersets'][label] = filterset_class
return filterset_class
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/filtersets.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/forms/widgets/modifiers.py | from django import forms
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from utilities.forms.widgets.apiselect import APISelect, APISelectMultiple
__all__ = (
'MODIFIER_EMPTY_FALSE',
'MODIFIER_EMPTY_TRUE',
'FilterModifierWidget',
)
# Modifier codes for empty/null checking
# These map to Django's 'empty' lookup: field__empty=true/false
MODIFIER_EMPTY_TRUE = 'empty_true'
MODIFIER_EMPTY_FALSE = 'empty_false'
class FilterModifierWidget(forms.Widget):
"""
Wraps an existing widget to add a modifier dropdown for filter lookups.
The original widget's semantics (name, id, attributes) are preserved.
The modifier dropdown controls which lookup type is used (exact, contains, etc.).
"""
template_name = 'widgets/filter_modifier.html'
def __init__(self, widget, lookups, attrs=None):
"""
Args:
widget: The widget being wrapped (e.g., TextInput, NumberInput)
lookups: List of (lookup_code, label) tuples (e.g., [('exact', 'Is'), ('ic', 'Contains')])
attrs: Additional widget attributes
"""
self.original_widget = widget
self.lookups = lookups
super().__init__(attrs or getattr(widget, 'attrs', {}))
def value_from_datadict(self, data, files, name):
"""
Extract value from data, checking all possible lookup variants.
When form redisplays after validation error, the data may contain
serial__ic=test but the field is named serial. This method searches
all lookup variants to find the value.
Returns:
Just the value string for form validation. The modifier is reconstructed
during rendering from the query parameter names.
"""
# Special handling for empty - check if field__empty exists
empty_param = f"{name}__empty"
if empty_param in data:
# Return the boolean value for empty lookup
return data.get(empty_param)
# Try exact field name first
value = self.original_widget.value_from_datadict(data, files, name)
# If not found, check all modifier variants
# Note: SelectMultiple returns [] (empty list) when not found, not None
if value is None or (isinstance(value, list) and len(value) == 0):
for lookup, _ in self.lookups:
if lookup == 'exact':
continue # Already checked above
# Skip empty_true/false variants - they're handled above
if lookup in (MODIFIER_EMPTY_TRUE, MODIFIER_EMPTY_FALSE):
continue
lookup_name = f"{name}__{lookup}"
test_value = self.original_widget.value_from_datadict(data, files, lookup_name)
if test_value is not None:
value = test_value
break
# Return None if no value found (prevents field appearing in changed_data)
# Handle all widget empty value representations
if value is None:
return None
if isinstance(value, str) and not value.strip():
return None
if isinstance(value, (list, tuple)) and len(value) == 0:
return None
# Return just the value for form validation
return value
def get_context(self, name, value, attrs):
"""
Build context for template rendering.
Includes both the original widget's context and our modifier-specific data.
Note: value is now just a simple value (string/int/etc), not a dict.
The JavaScript initializeFromURL() will set the correct modifier dropdown
value based on URL parameters.
"""
# Propagate any attrs set on the wrapper (like data-url from get_bound_field)
# to the original widget before rendering
self.original_widget.attrs.update(self.attrs)
# For APISelect/APISelectMultiple widgets, temporarily clear choices to prevent queryset evaluation
original_choices = None
if isinstance(self.original_widget, (APISelect, APISelectMultiple)):
original_choices = self.original_widget.choices
# Only keep selected choices to preserve the current selection in HTML
if value:
values = value if isinstance(value, (list, tuple)) else [value]
if hasattr(original_choices, 'queryset'):
# Extract valid PKs (exclude special null choice string)
pk_values = [v for v in values if v != settings.FILTERS_NULL_CHOICE_VALUE]
# Build a minimal choice list with just the selected values
choices = []
if pk_values:
selected_objects = original_choices.queryset.filter(pk__in=pk_values)
choices = [(obj.pk, str(obj)) for obj in selected_objects]
# Re-add the "None" option if it was selected via the null choice value
if settings.FILTERS_NULL_CHOICE_VALUE in values:
choices.append((settings.FILTERS_NULL_CHOICE_VALUE, settings.FILTERS_NULL_CHOICE_LABEL))
self.original_widget.choices = choices
else:
self.original_widget.choices = [choice for choice in original_choices if choice[0] in values]
else:
# No selection - render empty select element
self.original_widget.choices = []
# Get context from the original widget
original_context = self.original_widget.get_context(name, value, attrs)
# Restore original choices if we modified them
if original_choices is not None:
self.original_widget.choices = original_choices
# Build our wrapper context
context = super().get_context(name, value, attrs)
context['widget']['original_widget'] = original_context['widget']
context['widget']['lookups'] = self.lookups
context['widget']['field_name'] = name
# Default to 'exact' - JavaScript will update based on URL params
context['widget']['current_modifier'] = 'exact'
context['widget']['current_value'] = value or ''
# Translatable placeholder for empty lookups
context['widget']['empty_placeholder'] = _('(automatically set)')
return context
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/forms/widgets/modifiers.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/utilities/security.py | from django.core.exceptions import ImproperlyConfigured
__all__ = (
'validate_peppers',
)
def validate_peppers(peppers):
"""
Validate the given dictionary of cryptographic peppers for type & sufficient length.
"""
if type(peppers) is not dict:
raise ImproperlyConfigured("API_TOKEN_PEPPERS must be a dictionary.")
for key, pepper in peppers.items():
if type(key) is not int:
raise ImproperlyConfigured(f"Invalid API_TOKEN_PEPPERS key: {key}. All keys must be integers.")
if not 0 <= key <= 32767:
raise ImproperlyConfigured(
f"Invalid API_TOKEN_PEPPERS key: {key}. Key values must be between 0 and 32767, inclusive."
)
if type(pepper) is not str:
raise ImproperlyConfigured(f"Invalid pepper {key}: Pepper value must be a string.")
if len(pepper) < 50:
raise ImproperlyConfigured(f"Invalid pepper {key}: Pepper must be at least 50 characters in length.")
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/security.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/tests/test_filter_modifiers.py | from django import forms
from django.conf import settings
from django.db import models
from django.http import QueryDict
from django.template import Context
from django.test import RequestFactory, TestCase
import dcim.filtersets # noqa: F401 - Import to register Device filterset
from dcim.forms.filtersets import DeviceFilterForm
from dcim.models import Device
from netbox.filtersets import BaseFilterSet
from tenancy.models import Tenant
from users.models import User
from utilities.filtersets import register_filterset
from utilities.forms.fields import TagFilterField
from utilities.forms.mixins import FilterModifierMixin
from utilities.forms.widgets import FilterModifierWidget
from utilities.templatetags.helpers import applied_filters
# Test model for FilterModifierMixin tests
class TestModel(models.Model):
"""Dummy model for testing filter modifiers."""
char_field = models.CharField(max_length=100, blank=True)
integer_field = models.IntegerField(null=True, blank=True)
decimal_field = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
date_field = models.DateField(null=True, blank=True)
boolean_field = models.BooleanField(default=False)
class Meta:
app_label = 'utilities'
managed = False # Don't create actual database table
# Test filterset using BaseFilterSet to automatically generate lookups
@register_filterset
class TestFilterSet(BaseFilterSet):
class Meta:
model = TestModel
fields = ['char_field', 'integer_field', 'decimal_field', 'date_field', 'boolean_field']
class FilterModifierWidgetTest(TestCase):
"""Tests for FilterModifierWidget value extraction and rendering."""
def test_value_from_datadict_finds_value_in_lookup_variant(self):
"""
Widget should find value from serial__ic when field is named serial.
This is critical for form redisplay after validation errors.
"""
widget = FilterModifierWidget(
widget=forms.TextInput(),
lookups=[('exact', 'Is'), ('ic', 'Contains'), ('isw', 'Starts With')]
)
data = QueryDict('serial__ic=test123')
result = widget.value_from_datadict(data, {}, 'serial')
self.assertEqual(result, 'test123')
def test_value_from_datadict_handles_exact_match(self):
"""Widget should detect exact match when field name has no modifier."""
widget = FilterModifierWidget(
widget=forms.TextInput(),
lookups=[('exact', 'Is'), ('ic', 'Contains')]
)
data = QueryDict('serial=test456')
result = widget.value_from_datadict(data, {}, 'serial')
self.assertEqual(result, 'test456')
def test_value_from_datadict_returns_none_when_no_value(self):
"""Widget should return None when no data present to avoid appearing in changed_data."""
widget = FilterModifierWidget(
widget=forms.TextInput(),
lookups=[('exact', 'Is'), ('ic', 'Contains')]
)
data = QueryDict('')
result = widget.value_from_datadict(data, {}, 'serial')
self.assertIsNone(result)
def test_get_context_includes_original_widget_and_lookups(self):
"""Widget context should include original widget context and lookup choices."""
widget = FilterModifierWidget(
widget=forms.TextInput(),
lookups=[('exact', 'Is'), ('ic', 'Contains'), ('isw', 'Starts With')]
)
value = 'test'
context = widget.get_context('serial', value, {})
self.assertIn('original_widget', context['widget'])
self.assertEqual(
context['widget']['lookups'],
[('exact', 'Is'), ('ic', 'Contains'), ('isw', 'Starts With')]
)
self.assertEqual(context['widget']['field_name'], 'serial')
self.assertEqual(context['widget']['current_modifier'], 'exact') # Defaults to exact, JS updates from URL
self.assertEqual(context['widget']['current_value'], 'test')
def test_get_context_handles_null_selection(self):
"""Widget should preserve the 'null' choice when rendering."""
null_value = settings.FILTERS_NULL_CHOICE_VALUE
null_label = settings.FILTERS_NULL_CHOICE_LABEL
# Simulate a query for objects with no tenant assigned (?tenant_id=null)
query_params = QueryDict(f'tenant_id={null_value}')
form = DeviceFilterForm(query_params)
# Rendering the field triggers FilterModifierWidget.get_context()
try:
html = form['tenant_id'].as_widget()
except ValueError as e:
# ValueError: Field 'id' expected a number but got 'null'
self.fail(f"FilterModifierWidget raised ValueError on 'null' selection: {e}")
# Verify the "None" option is rendered so user selection is preserved in the UI
self.assertIn(f'value="{null_value}"', html)
self.assertIn(null_label, html)
def test_get_context_handles_mixed_selection(self):
"""Widget should preserve both real objects and the 'null' choice together."""
null_value = settings.FILTERS_NULL_CHOICE_VALUE
# Create a tenant to simulate a real object
tenant = Tenant.objects.create(name='Tenant A', slug='tenant-a')
# Simulate a selection containing both a real PK and the null sentinel
query_params = QueryDict('', mutable=True)
query_params.setlist('tenant_id', [str(tenant.pk), null_value])
form = DeviceFilterForm(query_params)
# Rendering the field triggers FilterModifierWidget.get_context()
try:
html = form['tenant_id'].as_widget()
except ValueError as e:
# ValueError: Field 'id' expected a number but got 'null'
self.fail(f"FilterModifierWidget raised ValueError on 'null' selection: {e}")
# Verify both the real object and the null option are present in the output
self.assertIn(f'value="{tenant.pk}"', html)
self.assertIn(f'value="{null_value}"', html)
def test_widget_renders_modifier_dropdown_and_input(self):
"""Widget should render modifier dropdown alongside original input."""
widget = FilterModifierWidget(
widget=forms.TextInput(),
lookups=[('exact', 'Is'), ('ic', 'Contains')]
)
html = widget.render('serial', 'test', {})
# Should contain modifier dropdown
self.assertIn('class="form-select modifier-select"', html)
self.assertIn('data-field="serial"', html)
self.assertIn('<option value="exact" selected>Is</option>', html)
self.assertIn('<option value="ic">Contains</option>', html)
# Should contain original input
self.assertIn('type="text"', html)
self.assertIn('name="serial"', html)
self.assertIn('value="test"', html)
class FilterModifierMixinTest(TestCase):
"""Tests for FilterModifierMixin form field enhancement."""
def test_mixin_enhances_char_field_with_modifiers(self):
"""CharField should be enhanced with contains/starts/ends modifiers."""
class TestForm(FilterModifierMixin, forms.Form):
char_field = forms.CharField(required=False)
model = TestModel
form = TestForm()
self.assertIsInstance(form.fields['char_field'].widget, FilterModifierWidget)
lookup_codes = [lookup[0] for lookup in form.fields['char_field'].widget.lookups]
expected_lookups = ['exact', 'n', 'ic', 'isw', 'iew', 'ie', 'regex', 'iregex', 'empty_true', 'empty_false']
self.assertEqual(lookup_codes, expected_lookups)
def test_mixin_skips_boolean_fields(self):
"""Boolean fields should not be enhanced."""
class TestForm(FilterModifierMixin, forms.Form):
boolean_field = forms.BooleanField(required=False)
model = TestModel
form = TestForm()
self.assertNotIsInstance(form.fields['boolean_field'].widget, FilterModifierWidget)
def test_mixin_enhances_tag_filter_field(self):
"""TagFilterField should be enhanced even though it's a MultipleChoiceField."""
class TestForm(FilterModifierMixin, forms.Form):
tag = TagFilterField(Device)
model = Device
form = TestForm()
self.assertIsInstance(form.fields['tag'].widget, FilterModifierWidget)
tag_lookups = [lookup[0] for lookup in form.fields['tag'].widget.lookups]
# Device filterset has tag and tag__n but not tag__empty
expected_lookups = ['exact', 'n']
self.assertEqual(tag_lookups, expected_lookups)
def test_mixin_enhances_integer_field(self):
"""IntegerField should be enhanced with comparison modifiers."""
class TestForm(FilterModifierMixin, forms.Form):
integer_field = forms.IntegerField(required=False)
model = TestModel
form = TestForm()
self.assertIsInstance(form.fields['integer_field'].widget, FilterModifierWidget)
lookup_codes = [lookup[0] for lookup in form.fields['integer_field'].widget.lookups]
expected_lookups = ['exact', 'n', 'gt', 'gte', 'lt', 'lte', 'empty_true', 'empty_false']
self.assertEqual(lookup_codes, expected_lookups)
def test_mixin_enhances_decimal_field(self):
"""DecimalField should be enhanced with comparison modifiers."""
class TestForm(FilterModifierMixin, forms.Form):
decimal_field = forms.DecimalField(required=False)
model = TestModel
form = TestForm()
self.assertIsInstance(form.fields['decimal_field'].widget, FilterModifierWidget)
lookup_codes = [lookup[0] for lookup in form.fields['decimal_field'].widget.lookups]
expected_lookups = ['exact', 'n', 'gt', 'gte', 'lt', 'lte', 'empty_true', 'empty_false']
self.assertEqual(lookup_codes, expected_lookups)
def test_mixin_enhances_date_field(self):
"""DateField should be enhanced with date-appropriate modifiers."""
class TestForm(FilterModifierMixin, forms.Form):
date_field = forms.DateField(required=False)
model = TestModel
form = TestForm()
self.assertIsInstance(form.fields['date_field'].widget, FilterModifierWidget)
lookup_codes = [lookup[0] for lookup in form.fields['date_field'].widget.lookups]
expected_lookups = ['exact', 'n', 'gt', 'gte', 'lt', 'lte', 'empty_true', 'empty_false']
self.assertEqual(lookup_codes, expected_lookups)
class ExtendedLookupFilterPillsTest(TestCase):
"""Tests for filter pill rendering of extended lookups."""
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='test_user')
def test_negation_lookup_filter_pill(self):
"""Filter pill should show 'is not' for negation lookup."""
query_params = QueryDict('serial__n=ABC123')
form = DeviceFilterForm(query_params)
request = RequestFactory().get('/', query_params)
request.user = self.user
context = Context({'request': request})
result = applied_filters(context, Device, form, query_params)
self.assertGreater(len(result['applied_filters']), 0)
filter_pill = result['applied_filters'][0]
self.assertIn('is not', filter_pill['link_text'].lower())
self.assertIn('ABC123', filter_pill['link_text'])
def test_regex_lookup_filter_pill(self):
"""Filter pill should show 'matches pattern' for regex lookup."""
query_params = QueryDict('serial__regex=^ABC.*')
form = DeviceFilterForm(query_params)
request = RequestFactory().get('/', query_params)
request.user = self.user
context = Context({'request': request})
result = applied_filters(context, Device, form, query_params)
self.assertGreater(len(result['applied_filters']), 0)
filter_pill = result['applied_filters'][0]
self.assertIn('matches pattern', filter_pill['link_text'].lower())
def test_exact_lookup_filter_pill(self):
"""Filter pill should show field label and value without lookup modifier for exact match."""
query_params = QueryDict('serial=ABC123')
form = DeviceFilterForm(query_params)
request = RequestFactory().get('/', query_params)
request.user = self.user
context = Context({'request': request})
result = applied_filters(context, Device, form, query_params)
self.assertGreater(len(result['applied_filters']), 0)
filter_pill = result['applied_filters'][0]
# Should not contain lookup modifier text
self.assertNotIn('is not', filter_pill['link_text'].lower())
self.assertNotIn('matches pattern', filter_pill['link_text'].lower())
self.assertNotIn('contains', filter_pill['link_text'].lower())
# Should contain field label and value
self.assertIn('Serial', filter_pill['link_text'])
self.assertIn('ABC123', filter_pill['link_text'])
class EmptyLookupTest(TestCase):
"""Tests for empty (is empty/not empty) lookup support."""
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(username='test_user')
def test_empty_true_appears_in_filter_pills(self):
"""Filter pill should show 'Is Empty' for empty=true."""
query_params = QueryDict('serial__empty=true')
form = DeviceFilterForm(query_params)
request = RequestFactory().get('/', query_params)
request.user = self.user
context = Context({'request': request})
result = applied_filters(context, Device, form, query_params)
self.assertGreater(len(result['applied_filters']), 0)
filter_pill = result['applied_filters'][0]
self.assertIn('empty', filter_pill['link_text'].lower())
def test_empty_false_appears_in_filter_pills(self):
"""Filter pill should show 'Is Not Empty' for empty=false."""
query_params = QueryDict('serial__empty=false')
form = DeviceFilterForm(query_params)
request = RequestFactory().get('/', query_params)
request.user = self.user
context = Context({'request': request})
result = applied_filters(context, Device, form, query_params)
self.assertGreater(len(result['applied_filters']), 0)
filter_pill = result['applied_filters'][0]
self.assertIn('not empty', filter_pill['link_text'].lower())
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/tests/test_filter_modifiers.py",
"license": "Apache License 2.0",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/core/tests/test_openapi_schema.py | """
Unit tests for OpenAPI schema generation.
Refs: #20638
"""
import json
from django.test import TestCase
class OpenAPISchemaTestCase(TestCase):
"""Tests for OpenAPI schema generation."""
def setUp(self):
"""Fetch schema via API endpoint."""
response = self.client.get('/api/schema/', {'format': 'json'})
self.assertEqual(response.status_code, 200)
self.schema = json.loads(response.content)
def test_post_operation_documents_single_or_array(self):
"""
POST operations on NetBoxModelViewSet endpoints should document
support for both single objects and arrays via oneOf.
Refs: #20638
"""
# Test representative endpoints across different apps
test_paths = [
'/api/core/data-sources/',
'/api/dcim/sites/',
'/api/users/users/',
'/api/ipam/ip-addresses/',
]
for path in test_paths:
with self.subTest(path=path):
operation = self.schema['paths'][path]['post']
# Get the request body schema
request_schema = operation['requestBody']['content']['application/json']['schema']
# Should have oneOf with two options
self.assertIn('oneOf', request_schema, f"POST {path} should have oneOf schema")
self.assertEqual(
len(request_schema['oneOf']), 2,
f"POST {path} oneOf should have exactly 2 options"
)
# First option: single object (has $ref or properties)
single_schema = request_schema['oneOf'][0]
self.assertTrue(
'$ref' in single_schema or 'properties' in single_schema,
f"POST {path} first oneOf option should be single object"
)
# Second option: array of objects
array_schema = request_schema['oneOf'][1]
self.assertEqual(
array_schema['type'], 'array',
f"POST {path} second oneOf option should be array"
)
self.assertIn('items', array_schema, f"POST {path} array should have items")
def test_bulk_update_operations_require_array_only(self):
"""
Bulk update/patch operations should require arrays only, not oneOf.
They don't support single object input.
Refs: #20638
"""
test_paths = [
'/api/dcim/sites/',
'/api/users/users/',
]
for path in test_paths:
for method in ['put', 'patch']:
with self.subTest(path=path, method=method):
operation = self.schema['paths'][path][method]
request_schema = operation['requestBody']['content']['application/json']['schema']
# Should be array-only, not oneOf
self.assertNotIn(
'oneOf', request_schema,
f"{method.upper()} {path} should NOT have oneOf (array-only)"
)
self.assertEqual(
request_schema['type'], 'array',
f"{method.upper()} {path} should require array"
)
self.assertIn(
'items', request_schema,
f"{method.upper()} {path} array should have items"
)
def test_bulk_delete_requires_array(self):
"""
Bulk delete operations should require arrays.
Refs: #20638
"""
path = '/api/dcim/sites/'
operation = self.schema['paths'][path]['delete']
request_schema = operation['requestBody']['content']['application/json']['schema']
# Should be array-only
self.assertNotIn('oneOf', request_schema, "DELETE should NOT have oneOf")
self.assertEqual(request_schema['type'], 'array', "DELETE should require array")
self.assertIn('items', request_schema, "DELETE array should have items")
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/tests/test_openapi_schema.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/tests/test_forms.py | from django.test import TestCase
from dcim.choices import InterfaceTypeChoices
from dcim.forms import InterfaceImportForm
from dcim.models import Device, DeviceRole, DeviceType, Interface, Manufacturer, Site
class NetBoxModelImportFormCleanTest(TestCase):
"""
Test the clean() method of NetBoxModelImportForm to ensure it properly converts
empty strings to None for nullable fields during CSV import.
Uses InterfaceImportForm as the concrete implementation to test.
"""
@classmethod
def setUpTestData(cls):
# Create minimal test fixtures for Interface
cls.site = Site.objects.create(name='Test Site', slug='test-site')
cls.manufacturer = Manufacturer.objects.create(name='Test Manufacturer', slug='test-manufacturer')
cls.device_type = DeviceType.objects.create(
manufacturer=cls.manufacturer, model='Test Device Type', slug='test-device-type'
)
cls.device_role = DeviceRole.objects.create(name='Test Role', slug='test-role', color='ff0000')
cls.device = Device.objects.create(
name='Test Device', device_type=cls.device_type, role=cls.device_role, site=cls.site
)
# Create parent interfaces for ForeignKey testing
cls.parent_interface = Interface.objects.create(
device=cls.device, name='Parent Interface', type=InterfaceTypeChoices.TYPE_1GE_GBIC
)
cls.lag_interface = Interface.objects.create(
device=cls.device, name='LAG Interface', type=InterfaceTypeChoices.TYPE_LAG
)
def test_empty_string_to_none_nullable_charfield(self):
"""Empty strings should convert to None for nullable CharField"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 1',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'duplex': '', # nullable CharField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['duplex'])
def test_empty_string_to_none_nullable_integerfield(self):
"""Empty strings should convert to None for nullable PositiveIntegerField"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 2',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'speed': '', # nullable PositiveIntegerField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['speed'])
def test_empty_string_to_none_nullable_smallintegerfield(self):
"""Empty strings should convert to None for nullable SmallIntegerField"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 3',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'tx_power': '', # nullable SmallIntegerField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['tx_power'])
def test_empty_string_to_none_nullable_decimalfield(self):
"""Empty strings should convert to None for nullable DecimalField"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 4',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'rf_channel_frequency': '', # nullable DecimalField
'rf_channel_width': '', # nullable DecimalField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['rf_channel_frequency'])
self.assertIsNone(form.cleaned_data['rf_channel_width'])
def test_empty_string_to_none_nullable_foreignkey(self):
"""Empty strings should convert to None for nullable ForeignKey"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 5',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'lag': '', # nullable ForeignKey
'parent': '', # nullable ForeignKey
'bridge': '', # nullable ForeignKey
'vrf': '', # nullable ForeignKey
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['lag'])
self.assertIsNone(form.cleaned_data['parent'])
self.assertIsNone(form.cleaned_data['bridge'])
self.assertIsNone(form.cleaned_data['vrf'])
def test_empty_string_preserved_non_nullable_charfield(self):
"""Empty strings should be preserved for non-nullable CharField (blank=True only)"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 6',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'label': '', # CharField with blank=True (not null=True)
'description': '', # CharField with blank=True (not null=True)
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
# label and description are NOT nullable in the model, so empty string remains
self.assertEqual(form.cleaned_data['label'], '')
self.assertEqual(form.cleaned_data['description'], '')
def test_empty_string_not_converted_for_required_fields(self):
"""Empty strings should NOT be converted for required fields"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': '', # required field, empty string should remain and cause error
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
}
)
# Form should be invalid because name is required
self.assertFalse(form.is_valid())
if form.errors:
self.assertIn('name', form.errors)
def test_non_string_none_value_preserved(self):
"""None values should be preserved (not modified)"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 7',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'speed': None, # Already None
'tx_power': None, # Already None
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['speed'])
self.assertIsNone(form.cleaned_data['tx_power'])
def test_non_string_numeric_values_preserved(self):
"""Numeric values (including 0) should not be modified"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 8',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'speed': 0, # nullable PositiveIntegerField with value 0
'tx_power': 0, # nullable SmallIntegerField with value 0
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertEqual(form.cleaned_data['speed'], 0)
self.assertEqual(form.cleaned_data['tx_power'], 0)
def test_manytomany_fields_skipped(self):
"""ManyToMany fields should be skipped and not cause errors"""
# Interface has 'vdcs' and 'wireless_lans' as M2M fields
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 9',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
# vdcs and wireless_lans fields are M2M, handled by parent class
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
def test_fields_not_in_cleaned_data_skipped(self):
"""Fields not present in cleaned_data should be skipped gracefully"""
# Create minimal form data - some nullable fields won't be in cleaned_data
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 10',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
# lag, parent, bridge, vrf, speed, etc. not provided
}
)
# Should not raise KeyError when checking fields not in form data
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
def test_valid_string_values_preserved(self):
"""Non-empty string values should be properly converted to their target types"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 11',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'speed': '1000000', # Valid speed value (string will be converted to int)
'mtu': '1500', # Valid mtu value (string will be converted to int)
'description': 'Test description',
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
# speed and mtu are converted to int
self.assertEqual(form.cleaned_data['speed'], 1000000)
self.assertEqual(form.cleaned_data['mtu'], 1500)
self.assertEqual(form.cleaned_data['description'], 'Test description')
def test_multiple_nullable_fields_with_empty_strings(self):
"""Multiple nullable fields with empty strings should all convert to None"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 12',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'speed': '', # nullable
'duplex': '', # nullable
'tx_power': '', # nullable
'vrf': '', # nullable ForeignKey
'poe_mode': '', # nullable
'poe_type': '', # nullable
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
# All nullable fields should convert to None
self.assertIsNone(form.cleaned_data['speed'])
self.assertIsNone(form.cleaned_data['duplex'])
self.assertIsNone(form.cleaned_data['tx_power'])
self.assertIsNone(form.cleaned_data['vrf'])
self.assertIsNone(form.cleaned_data['poe_mode'])
self.assertIsNone(form.cleaned_data['poe_type'])
def test_mixed_nullable_and_non_nullable_empty_strings(self):
"""Combination of nullable and non-nullable fields with empty strings"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 13',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'speed': '', # nullable, should become None
'label': '', # NOT nullable (blank=True only), should remain empty string
'duplex': '', # nullable, should become None
'description': '', # NOT nullable (blank=True only), should remain empty string
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
# Nullable fields convert to None
self.assertIsNone(form.cleaned_data['speed'])
self.assertIsNone(form.cleaned_data['duplex'])
# Non-nullable fields remain empty strings
self.assertEqual(form.cleaned_data['label'], '')
self.assertEqual(form.cleaned_data['description'], '')
def test_wireless_fields_nullable(self):
"""Wireless-specific nullable fields should convert empty strings to None"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 14',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'rf_role': '', # nullable CharField
'rf_channel': '', # nullable CharField
'rf_channel_frequency': '', # nullable DecimalField
'rf_channel_width': '', # nullable DecimalField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['rf_role'])
self.assertIsNone(form.cleaned_data['rf_channel'])
self.assertIsNone(form.cleaned_data['rf_channel_frequency'])
self.assertIsNone(form.cleaned_data['rf_channel_width'])
def test_poe_fields_nullable(self):
"""PoE-specific nullable fields should convert empty strings to None"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 15',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'poe_mode': '', # nullable CharField
'poe_type': '', # nullable CharField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['poe_mode'])
self.assertIsNone(form.cleaned_data['poe_type'])
def test_wwn_field_nullable(self):
"""WWN field (special field type) should convert empty string to None"""
form = InterfaceImportForm(
data={
'device': self.device,
'name': 'Interface 16',
'type': InterfaceTypeChoices.TYPE_1GE_GBIC,
'wwn': '', # nullable WWNField
}
)
self.assertTrue(form.is_valid(), f'Form errors: {form.errors}')
self.assertIsNone(form.cleaned_data['wwn'])
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/tests/test_forms.py",
"license": "Apache License 2.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/ipam/tests/test_lookups.py | from django.db.backends.postgresql.psycopg_any import NumericRange
from django.test import TestCase
from ipam.models import VLANGroup
class VLANGroupRangeContainsLookupTests(TestCase):
@classmethod
def setUpTestData(cls):
# Two ranges: [1,11) and [20,31)
cls.g1 = VLANGroup.objects.create(
name='VlanGroup-A',
slug='VlanGroup-A',
vid_ranges=[NumericRange(1, 11), NumericRange(20, 31)],
)
# One range: [100,201)
cls.g2 = VLANGroup.objects.create(
name='VlanGroup-B',
slug='VlanGroup-B',
vid_ranges=[NumericRange(100, 201)],
)
cls.g_empty = VLANGroup.objects.create(
name='VlanGroup-empty',
slug='VlanGroup-empty',
vid_ranges=[],
)
def test_contains_value_in_first_range(self):
"""
Tests whether a specific value is contained within the first range in a queried
set of VLANGroup objects.
"""
names = list(
VLANGroup.objects.filter(vid_ranges__range_contains=10).values_list('name', flat=True).order_by('name')
)
self.assertEqual(names, ['VlanGroup-A'])
def test_contains_value_in_second_range(self):
"""
Tests if a value exists in the second range of VLANGroup objects and
validates the result against the expected list of names.
"""
names = list(
VLANGroup.objects.filter(vid_ranges__range_contains=25).values_list('name', flat=True).order_by('name')
)
self.assertEqual(names, ['VlanGroup-A'])
def test_upper_bound_is_exclusive(self):
"""
Tests if the upper bound of the range is exclusive in the filter method.
"""
# 11 is NOT in [1,11)
self.assertFalse(VLANGroup.objects.filter(vid_ranges__range_contains=11).exists())
def test_no_match_far_outside(self):
"""
Tests that no VLANGroup contains a VID within a specified range far outside
common VID bounds and returns `False`.
"""
self.assertFalse(VLANGroup.objects.filter(vid_ranges__range_contains=4095).exists())
def test_empty_array_never_matches(self):
"""
Tests the behavior of VLANGroup objects when an empty array is used to match a
specific condition.
"""
self.assertFalse(VLANGroup.objects.filter(pk=self.g_empty.pk, vid_ranges__range_contains=1).exists())
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/ipam/tests/test_lookups.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/monkey.py | from django.db.models import UniqueConstraint
from rest_framework.utils.field_mapping import get_unique_error_message
from rest_framework.validators import UniqueValidator
__all__ = (
'get_unique_validators',
)
def get_unique_validators(field_name, model_field):
"""
Extend Django REST Framework's get_unique_validators() function to attach a UniqueValidator to a field *only* if the
associated UniqueConstraint does NOT have a condition which references another field. See bug #19302.
"""
field_set = {field_name}
conditions = {
c.condition
for c in model_field.model._meta.constraints
if isinstance(c, UniqueConstraint) and set(c.fields) == field_set
}
# START custom logic
conditions = {
cond for cond in conditions
if cond is None or cond.referenced_base_fields == field_set
}
# END custom logic
if getattr(model_field, 'unique', False):
conditions.add(None)
if not conditions:
return
unique_error_message = get_unique_error_message(model_field)
queryset = model_field.model._default_manager
for condition in conditions:
yield UniqueValidator(
queryset=queryset if condition is None else queryset.filter(condition),
message=unique_error_message
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/monkey.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/tests/test_templatetags.py | from unittest.mock import patch
from django.test import TestCase, override_settings
from utilities.templatetags.builtins.tags import static_with_params
class StaticWithParamsTest(TestCase):
"""
Test the static_with_params template tag functionality.
"""
def test_static_with_params_basic(self):
"""Test basic parameter appending to static URL."""
result = static_with_params('test.js', v='1.0.0')
self.assertIn('test.js', result)
self.assertIn('v=1.0.0', result)
@override_settings(STATIC_URL='https://cdn.example.com/static/')
def test_static_with_params_existing_query_params(self):
"""Test appending parameters to URL that already has query parameters."""
# Mock the static() function to return a URL with existing query parameters
with patch('utilities.templatetags.builtins.tags.static') as mock_static:
mock_static.return_value = 'https://cdn.example.com/static/test.js?existing=param'
result = static_with_params('test.js', v='1.0.0')
# Should contain both existing and new parameters
self.assertIn('existing=param', result)
self.assertIn('v=1.0.0', result)
# Should not have double question marks
self.assertEqual(result.count('?'), 1)
@override_settings(STATIC_URL='https://cdn.example.com/static/')
def test_static_with_params_duplicate_parameter_warning(self):
"""Test that a warning is logged when parameters conflict."""
with patch('utilities.templatetags.builtins.tags.static') as mock_static:
mock_static.return_value = 'https://cdn.example.com/static/test.js?v=old_version'
with self.assertLogs('netbox.utilities.templatetags.tags', level='WARNING') as cm:
result = static_with_params('test.js', v='new_version')
# Check that warning was logged
self.assertIn("Parameter 'v' already exists", cm.output[0])
# Check that new parameter value is used
self.assertIn('v=new_version', result)
self.assertNotIn('v=old_version', result)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/tests/test_templatetags.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/utilities/tests/test_serialization.py | from django.test import TestCase
from dcim.choices import SiteStatusChoices
from dcim.models import Site
from extras.models import Tag
from utilities.serialization import deserialize_object, serialize_object
class SerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
tags = (
Tag(name='Tag 1', slug='tag-1'),
Tag(name='Tag 2', slug='tag-2'),
Tag(name='Tag 3', slug='tag-3'),
)
Tag.objects.bulk_create(tags)
def test_serialize_object(self):
site = Site.objects.create(
name='Site 1',
slug='site=1',
description='Ignore me',
)
site.tags.set(Tag.objects.all())
data = serialize_object(site, extra={'foo': 123}, exclude=['description'])
self.assertEqual(data['name'], site.name)
self.assertEqual(data['slug'], site.slug)
self.assertEqual(data['tags'], [tag.name for tag in Tag.objects.all()])
self.assertEqual(data['foo'], 123)
self.assertNotIn('description', data)
def test_deserialize_object(self):
data = {
'name': 'Site 1',
'slug': 'site-1',
'tags': ['Tag 1', 'Tag 2', 'Tag 3'],
'foo': 123,
}
instance = deserialize_object(Site, data, pk=123)
self.assertEqual(instance.object.pk, 123)
self.assertEqual(instance.object.name, data['name'])
self.assertEqual(instance.object.slug, data['slug'])
self.assertEqual(instance.object.status, SiteStatusChoices.STATUS_ACTIVE) # Default field value
self.assertEqual(instance.object.foo, data['foo']) # Non-field attribute
self.assertEqual(list(instance.m2m_data['tags']), list(Tag.objects.all()))
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/tests/test_serialization.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/core/api/serializers_/object_types.py | import inspect
from django.urls import NoReverseMatch
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema_field
from rest_framework import serializers
from core.models import ObjectType
from netbox.api.serializers import BaseModelSerializer
from utilities.views import get_action_url
__all__ = (
'ObjectTypeSerializer',
)
class ObjectTypeSerializer(BaseModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='core-api:objecttype-detail')
app_name = serializers.CharField(source='app_verbose_name', read_only=True)
model_name = serializers.CharField(source='model_verbose_name', read_only=True)
model_name_plural = serializers.CharField(source='model_verbose_name_plural', read_only=True)
is_plugin_model = serializers.BooleanField(read_only=True)
rest_api_endpoint = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
class Meta:
model = ObjectType
fields = [
'id', 'url', 'display', 'app_label', 'app_name', 'model', 'model_name', 'model_name_plural', 'public',
'features', 'is_plugin_model', 'rest_api_endpoint', 'description',
]
read_only_fields = ['public', 'features']
@extend_schema_field(OpenApiTypes.STR)
def get_rest_api_endpoint(self, obj):
if not (model := obj.model_class()):
return None
try:
return get_action_url(model, action='list', rest_api=True)
except NoReverseMatch:
return None
@extend_schema_field(OpenApiTypes.STR)
def get_description(self, obj):
if not (model := obj.model_class()):
return None
return inspect.getdoc(model)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/api/serializers_/object_types.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/core/dataclasses.py | import logging
from dataclasses import dataclass, field
from datetime import datetime
from django.utils import timezone
__all__ = (
'JobLogEntry',
)
@dataclass
class JobLogEntry:
level: str
message: str
timestamp: datetime = field(default_factory=timezone.now)
@classmethod
def from_logrecord(cls, record: logging.LogRecord):
return cls(record.levelname.lower(), record.msg)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/dataclasses.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/core/models/object_types.py | import inspect
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.indexes import GinIndex
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection, models
from django.db.models import Q
from django.utils.translation import gettext as _
from netbox.context import query_cache
from netbox.plugins import PluginConfig
from netbox.registry import registry
from utilities.string import title
__all__ = (
'ObjectType',
'ObjectTypeManager',
'ObjectTypeQuerySet',
)
class ObjectTypeQuerySet(models.QuerySet):
def create(self, **kwargs):
# If attempting to create a new ObjectType for a given app_label & model, replace those kwargs
# with a reference to the ContentType (if one exists).
if (app_label := kwargs.get('app_label')) and (model := kwargs.get('model')):
try:
kwargs['contenttype_ptr'] = ContentType.objects.get(app_label=app_label, model=model)
except ObjectDoesNotExist:
pass
return super().create(**kwargs)
class ObjectTypeManager(models.Manager):
# TODO: Remove this in NetBox v5.0
# Cache the result of introspection to avoid repeated queries.
_table_exists = False
def get_queryset(self):
return ObjectTypeQuerySet(self.model, using=self._db)
def get_by_natural_key(self, app_label, model):
"""
Retrieve an ObjectType by its application label & model name.
This method exists to provide parity with ContentTypeManager.
"""
return self.get(app_label=app_label, model=model)
# TODO: Remove in NetBox v4.5
def get_for_id(self, id):
"""
Retrieve an ObjectType by its primary key (numeric ID).
This method exists to provide parity with ContentTypeManager.
"""
return self.get(pk=id)
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def get_for_model(self, model, for_concrete_model=True):
"""
Retrieve or create and return the ObjectType for a model.
"""
from netbox.models.features import get_model_features, model_is_public
# Check the request cache before hitting the database
cache = query_cache.get()
if cache is not None:
if ot := cache['object_types'].get((model._meta.model, for_concrete_model)):
return ot
# TODO: Remove this in NetBox v5.0
# If the ObjectType table has not yet been provisioned (e.g. because we're in a pre-v4.4 migration),
# fall back to ContentType.
if not ObjectTypeManager._table_exists:
if 'core_objecttype' not in connection.introspection.table_names():
ct = ContentType.objects.get_for_model(model, for_concrete_model=for_concrete_model)
ct.features = get_model_features(ct.model_class())
return ct
ObjectTypeManager._table_exists = True
if not inspect.isclass(model):
model = model.__class__
opts = self._get_opts(model, for_concrete_model)
try:
# Use .get() instead of .get_or_create() initially to ensure db_for_read is honored (Django bug #20401).
ot = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# If the ObjectType doesn't exist, create it. (Use .get_or_create() to avoid race conditions.)
ot = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
public=model_is_public(model),
features=get_model_features(model),
)[0]
# Populate the request cache to avoid redundant lookups
if cache is not None:
cache['object_types'][(model._meta.model, for_concrete_model)] = ot
return ot
def get_for_models(self, *models, for_concrete_models=True):
"""
Retrieve or create the ObjectTypes for multiple models, returning a mapping {model: ObjectType}.
This method exists to provide parity with ContentTypeManager.
"""
from netbox.models.features import get_model_features, model_is_public
results = {}
# Compile the model and options mappings
needed_models = defaultdict(set)
needed_opts = defaultdict(list)
for model in models:
if not inspect.isclass(model):
model = model.__class__
opts = self._get_opts(model, for_concrete_models)
needed_models[opts.app_label].add(opts.model_name)
needed_opts[(opts.app_label, opts.model_name)].append(model)
# Fetch existing ObjectType from the database
condition = Q(
*(
Q(('app_label', app_label), ('model__in', model_names))
for app_label, model_names in needed_models.items()
),
_connector=Q.OR,
)
for ot in self.filter(condition):
opts_models = needed_opts.pop((ot.app_label, ot.model), [])
for model in opts_models:
results[model] = ot
# Create any missing ObjectTypes
for (app_label, model_name), opts_models in needed_opts.items():
for model in opts_models:
results[model] = self.create(
app_label=app_label,
model=model_name,
public=model_is_public(model),
features=get_model_features(model),
)
return results
def public(self):
"""
Includes only ObjectTypes for "public" models.
Filter the base queryset to return only ObjectTypes corresponding to public models; those which are intended
for reference by other objects within the application.
"""
return self.get_queryset().filter(public=True)
def with_feature(self, feature):
"""
Return ObjectTypes only for models which support the given feature.
Only ObjectTypes which list the specified feature will be included. Supported features are declared in the
application registry under `registry["model_features"]`. For example, we can find all ObjectTypes for models
which support event rules with:
ObjectType.objects.with_feature('event_rules')
"""
if feature not in registry['model_features']:
raise KeyError(
f"{feature} is not a registered model feature! Valid features are: {registry['model_features'].keys()}"
)
return self.get_queryset().filter(features__contains=[feature])
class ObjectType(ContentType):
"""
Wrap Django's native ContentType model to use our custom manager.
"""
contenttype_ptr = models.OneToOneField(
on_delete=models.CASCADE,
to='contenttypes.ContentType',
parent_link=True,
primary_key=True,
serialize=False,
related_name='object_type',
)
public = models.BooleanField(
default=False,
)
features = ArrayField(
base_field=models.CharField(max_length=50),
default=list,
)
objects = ObjectTypeManager()
class Meta:
verbose_name = _('object type')
verbose_name_plural = _('object types')
ordering = ('app_label', 'model')
indexes = [
GinIndex(fields=['features']),
]
@property
def app_labeled_name(self):
# Override ContentType's "app | model" representation style.
return f"{self.app_verbose_name} > {title(self.model_verbose_name)}"
@property
def app_verbose_name(self):
if model := self.model_class():
return model._meta.app_config.verbose_name
return None
@property
def model_verbose_name(self):
if model := self.model_class():
return model._meta.verbose_name
return None
@property
def model_verbose_name_plural(self):
if model := self.model_class():
return model._meta.verbose_name_plural
return None
@property
def is_plugin_model(self):
if not (model := self.model_class()):
return None # Return null if model class is invalid
return isinstance(model._meta.app_config, PluginConfig)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/models/object_types.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/core/object_actions.py | from django.utils.translation import gettext_lazy as _
from netbox.object_actions import ObjectAction
__all__ = (
'BulkSync',
)
class BulkSync(ObjectAction):
"""
Synchronize multiple objects at once.
"""
name = 'bulk_sync'
label = _('Sync Data')
multi = True
permissions_required = {'sync'}
template_name = 'core/buttons/bulk_sync.html'
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/core/object_actions.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/dcim/object_actions.py | from django.utils.translation import gettext_lazy as _
from netbox.object_actions import ObjectAction
__all__ = (
'BulkAddComponents',
'BulkDisconnect',
)
class BulkAddComponents(ObjectAction):
"""
Add components to the selected devices.
"""
label = _('Add Components')
multi = True
permissions_required = {'change'}
template_name = 'dcim/buttons/bulk_add_components.html'
@classmethod
def get_context(cls, context, obj):
return {
'formaction': context.get('formaction'),
}
class BulkDisconnect(ObjectAction):
"""
Disconnect each of a set of objects to which a cable is connected.
"""
name = 'bulk_disconnect'
label = _('Disconnect Selected')
multi = True
permissions_required = {'change'}
template_name = 'dcim/buttons/bulk_disconnect.html'
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/dcim/object_actions.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/metrics.py | from django_prometheus import middleware
from django_prometheus.conf import NAMESPACE
from prometheus_client import Counter
__all__ = (
'Metrics',
)
class Metrics(middleware.Metrics):
"""
Expand the stock Metrics class from django_prometheus to add our own counters.
"""
def register(self):
super().register()
# REST API metrics
self.rest_api_requests = self.register_metric(
Counter,
"rest_api_requests_total_by_method",
"Count of total REST API requests by method",
["method"],
namespace=NAMESPACE,
)
self.rest_api_requests_by_view_method = self.register_metric(
Counter,
"rest_api_requests_total_by_view_method",
"Count of REST API requests by view & method",
["view", "method"],
namespace=NAMESPACE,
)
# GraphQL API metrics
self.graphql_api_requests = self.register_metric(
Counter,
"graphql_api_requests_total",
"Count of total GraphQL API requests",
namespace=NAMESPACE,
)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/metrics.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/netbox/object_actions.py | from django.db.models import ForeignKey
from django.template import loader
from django.urls.exceptions import NoReverseMatch
from django.utils.translation import gettext_lazy as _
from core.models import ObjectType
from extras.models import ExportTemplate
from utilities.querydict import prepare_cloned_fields
from utilities.views import get_action_url
__all__ = (
'AddObject',
'BulkDelete',
'BulkEdit',
'BulkExport',
'BulkImport',
'BulkRename',
'CloneObject',
'DeleteObject',
'EditObject',
'ObjectAction',
)
class ObjectAction:
"""
Base class for single- and multi-object operations.
Params:
name: The action name appended to the module for view resolution
label: Human-friendly label for the rendered button
template_name: Name of the HTML template which renders the button
multi: Set to True if this action is performed by selecting multiple objects (i.e. using a table)
permissions_required: The set of permissions a user must have to perform the action
url_kwargs: The set of URL keyword arguments to pass when resolving the view's URL
"""
name = ''
label = None
template_name = None
multi = False
permissions_required = set()
url_kwargs = []
@classmethod
def get_url(cls, obj):
kwargs = {
kwarg: getattr(obj, kwarg) for kwarg in cls.url_kwargs
}
try:
return get_action_url(obj, action=cls.name, kwargs=kwargs)
except NoReverseMatch:
return None
@classmethod
def get_url_params(cls, context):
request = context['request']
params = request.GET.copy()
if 'return_url' in context:
params['return_url'] = context['return_url']
return params
@classmethod
def get_context(cls, context, obj):
"""
Return any additional context data needed to render the button.
"""
return {}
@classmethod
def render(cls, context, obj, **kwargs):
ctx = {
'perms': context['perms'],
'request': context['request'],
'url': cls.get_url(obj),
'url_params': cls.get_url_params(context),
'label': cls.label,
**cls.get_context(context, obj),
**kwargs,
}
return loader.render_to_string(cls.template_name, ctx)
class AddObject(ObjectAction):
"""
Create a new object.
"""
name = 'add'
label = _('Add')
permissions_required = {'add'}
template_name = 'buttons/add.html'
class CloneObject(ObjectAction):
"""
Populate the new object form with select details from an existing object.
"""
name = 'add'
label = _('Clone')
permissions_required = {'add'}
template_name = 'buttons/clone.html'
@classmethod
def get_url(cls, obj):
url = super().get_url(obj)
param_string = prepare_cloned_fields(obj).urlencode()
return f'{url}?{param_string}' if param_string else None
class EditObject(ObjectAction):
"""
Edit a single object.
"""
name = 'edit'
label = _('Edit')
permissions_required = {'change'}
url_kwargs = ['pk']
template_name = 'buttons/edit.html'
class DeleteObject(ObjectAction):
"""
Delete a single object.
"""
name = 'delete'
label = _('Delete')
permissions_required = {'delete'}
url_kwargs = ['pk']
template_name = 'buttons/delete.html'
class BulkImport(ObjectAction):
"""
Import multiple objects at once.
"""
name = 'bulk_import'
label = _('Import')
permissions_required = {'add'}
template_name = 'buttons/import.html'
class BulkExport(ObjectAction):
"""
Export multiple objects at once.
"""
name = 'export'
label = _('Export')
permissions_required = {'view'}
template_name = 'buttons/export.html'
@classmethod
def get_context(cls, context, model):
object_type = ObjectType.objects.get_for_model(model)
user = context['request'].user
# Determine if the "all data" export returns CSV or YAML
data_format = 'YAML' if hasattr(object_type.model_class(), 'to_yaml') else 'CSV'
# Retrieve all export templates for this model
export_templates = ExportTemplate.objects.restrict(user, 'view').filter(object_types=object_type)
return {
'object_type': object_type,
'url_params': context['request'].GET.urlencode() if context['request'].GET else '',
'export_templates': export_templates,
'data_format': data_format,
}
class BulkEdit(ObjectAction):
"""
Change the value of one or more fields on a set of objects.
"""
name = 'bulk_edit'
label = _('Edit Selected')
multi = True
permissions_required = {'change'}
template_name = 'buttons/bulk_edit.html'
@classmethod
def get_context(cls, context, model):
url_params = super().get_url_params(context)
# If this is a child object, pass the parent's PK as a URL parameter
if parent := context.get('object'):
for field in model._meta.get_fields():
if isinstance(field, ForeignKey) and field.remote_field.model == parent.__class__:
url_params[field.name] = parent.pk
break
return {
'url_params': url_params,
}
class BulkRename(ObjectAction):
"""
Rename multiple objects at once.
"""
name = 'bulk_rename'
label = _('Rename Selected')
multi = True
permissions_required = {'change'}
template_name = 'buttons/bulk_rename.html'
class BulkDelete(ObjectAction):
"""
Delete each of a set of objects.
"""
name = 'bulk_delete'
label = _('Delete Selected')
multi = True
permissions_required = {'delete'}
template_name = 'buttons/bulk_delete.html'
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/object_actions.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/netbox/tests/dummy_plugin/webhook_callbacks.py | from extras.webhooks import register_webhook_callback
@register_webhook_callback
def set_context(object_type, event_type, data, request):
return {
'foo': 123,
}
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/tests/dummy_plugin/webhook_callbacks.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/tests/test_model_features.py | from unittest import skipIf
from django.conf import settings
from django.test import TestCase
from taggit.models import Tag
from core.models import AutoSyncRecord, DataSource
from dcim.models import Site
from extras.models import CustomLink
from ipam.models import Prefix
from netbox.models.features import get_model_features, has_feature, model_is_public
class ModelFeaturesTestCase(TestCase):
"""
A test case class for verifying model features and utility functions.
"""
@skipIf('netbox.tests.dummy_plugin' not in settings.PLUGINS, 'dummy_plugin not in settings.PLUGINS')
def test_model_is_public(self):
"""
Test that the is_public() utility function returns True for public models only.
"""
from netbox.tests.dummy_plugin.models import DummyModel
# Public model
self.assertFalse(hasattr(DataSource, '_netbox_private'))
self.assertTrue(model_is_public(DataSource))
# Private model
self.assertTrue(getattr(AutoSyncRecord, '_netbox_private'))
self.assertFalse(model_is_public(AutoSyncRecord))
# Plugin model
self.assertFalse(hasattr(DummyModel, '_netbox_private'))
self.assertTrue(model_is_public(DummyModel))
# Non-core model
self.assertFalse(hasattr(Tag, '_netbox_private'))
self.assertFalse(model_is_public(Tag))
def test_has_feature(self):
"""
Test the functionality of the has_feature() utility function.
"""
# Sanity checking
self.assertTrue(hasattr(DataSource, 'bookmarks'), "Invalid test?")
self.assertFalse(hasattr(AutoSyncRecord, 'bookmarks'), "Invalid test?")
self.assertTrue(has_feature(DataSource, 'bookmarks'))
self.assertFalse(has_feature(AutoSyncRecord, 'bookmarks'))
def test_get_model_features(self):
"""
Check that get_model_features() returns the expected features for a model.
"""
# Sanity checking
self.assertTrue(hasattr(CustomLink, 'clone'), "Invalid test?")
self.assertFalse(hasattr(CustomLink, 'bookmarks'), "Invalid test?")
features = get_model_features(CustomLink)
self.assertIn('cloning', features)
self.assertNotIn('bookmarks', features)
def test_cloningmixin_injects_gfk_attribute(self):
"""
Tests the cloning mixin with GFK attribute injection in the `clone` method.
This test validates that the `clone` method correctly handles
and retains the General Foreign Key (GFK) attributes on an
object when the cloning fields are explicitly defined.
"""
site = Site.objects.create(name='Test Site', slug='test-site')
prefix = Prefix.objects.create(prefix='10.0.0.0/24', scope=site)
original_clone_fields = getattr(Prefix, 'clone_fields', None)
try:
Prefix.clone_fields = ('scope_type', 'scope_id')
attrs = prefix.clone()
self.assertEqual(attrs['scope_type'], prefix.scope_type_id)
self.assertEqual(attrs['scope_id'], prefix.scope_id)
self.assertEqual(attrs['scope'], prefix.scope_id)
finally:
if original_clone_fields is None:
delattr(Prefix, 'clone_fields')
else:
Prefix.clone_fields = original_clone_fields
def test_cloningmixin_does_not_inject_gfk_attribute_if_incomplete(self):
"""
Tests the cloning mixin with incomplete cloning fields does not inject the GFK attribute.
This test validates that the `clone` method correctly handles
the case where the cloning fields are incomplete, ensuring that
the generic foreign key (GFK) attribute is not injected during
the cloning process.
"""
site = Site.objects.create(name='Test Site', slug='test-site')
prefix = Prefix.objects.create(prefix='10.0.0.0/24', scope=site)
original_clone_fields = getattr(Prefix, 'clone_fields', None)
try:
Prefix.clone_fields = ('scope_type',)
attrs = prefix.clone()
self.assertIn('scope_type', attrs)
self.assertNotIn('scope', attrs)
finally:
if original_clone_fields is None:
delattr(Prefix, 'clone_fields')
else:
Prefix.clone_fields = original_clone_fields
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/tests/test_model_features.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/tests/test_object_actions.py | from unittest import skipIf
from django.conf import settings
from django.test import RequestFactory, TestCase
from dcim.models import Device, DeviceType, Manufacturer
from netbox.object_actions import AddObject, BulkEdit, BulkImport
class ObjectActionTest(TestCase):
def test_get_url_core_model(self):
"""Test URL generation for core NetBox models"""
obj = Device()
url = AddObject.get_url(obj)
self.assertEqual(url, '/dcim/devices/add/')
url = BulkImport.get_url(obj)
self.assertEqual(url, '/dcim/devices/import/')
@skipIf('netbox.tests.dummy_plugin' not in settings.PLUGINS, 'dummy_plugin not in settings.PLUGINS')
def test_get_url_plugin_model(self):
"""Test URL generation for plugin models includes plugins: namespace"""
from netbox.tests.dummy_plugin.models import DummyNetBoxModel
obj = DummyNetBoxModel()
url = AddObject.get_url(obj)
self.assertEqual(url, '/plugins/dummy-plugin/netboxmodel/add/')
url = BulkImport.get_url(obj)
self.assertEqual(url, '/plugins/dummy-plugin/netboxmodel/import/')
def test_bulk_edit_get_context_child_object(self):
"""
Test that the parent object's PK is included in the context for child objects.
Ensure that BulkEdit.get_context() correctly identifies and
includes the parent object's PK when rendering a child object's
action button.
"""
manufacturer = Manufacturer.objects.create(name='Manufacturer 1', slug='manufacturer-1')
device_type = DeviceType.objects.create(manufacturer=manufacturer, model='Device Type 1', slug='device-type-1')
# Mock context containing the parent object (DeviceType)
request = RequestFactory().get('/')
context = {
'request': request,
'object': device_type,
}
# Get context for the child model (Device)
action_context = BulkEdit.get_context(context, Device)
# Verify that 'device_type' (the FK field name) is present in
# url_params with the parent's PK
self.assertIn('url_params', action_context)
self.assertEqual(action_context['url_params'].get('device_type'), device_type.pk)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/tests/test_object_actions.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/utilities/apps.py | from django.apps import apps
def get_installed_apps():
"""
Return the name and version number for each installed Django app.
"""
installed_apps = {}
for app_config in apps.get_app_configs():
app = app_config.module
if version := getattr(app, 'VERSION', getattr(app, '__version__', None)):
if type(version) is tuple:
version = '.'.join(str(n) for n in version)
installed_apps[app_config.name] = version
return {
k: v for k, v in sorted(installed_apps.items())
}
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/apps.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/export.py | from django.utils.translation import gettext_lazy as _
from django_tables2.export import TableExport as TableExport_
from utilities.constants import CSV_DELIMITERS
__all__ = (
'TableExport',
)
class TableExport(TableExport_):
"""
A subclass of django-tables2's TableExport class which allows us to specify a delimiting
characters for CSV exports.
"""
def __init__(self, *args, delimiter=None, **kwargs):
if delimiter and delimiter not in CSV_DELIMITERS.keys():
raise ValueError(_("Invalid delimiter name: {name}").format(name=delimiter))
self.delimiter = delimiter or 'comma'
super().__init__(*args, **kwargs)
def export(self):
if self.format == self.CSV and self.delimiter is not None:
delimiter = CSV_DELIMITERS[self.delimiter]
return self.dataset.export(self.format, delimiter=delimiter)
return super().export()
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/export.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/jobs.py | from django.contrib import messages
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from netbox.jobs import AsyncViewJob
from utilities.request import copy_safe_request
__all__ = (
'is_background_request',
'process_request_as_job',
)
def is_background_request(request):
"""
Return True if the request is being processed as a background job.
"""
return hasattr(request, 'job')
def process_request_as_job(view, request, name=None):
"""
Process a request using a view as a background job.
"""
# Check that the request that is not already being processed as a background job (would be a loop)
if is_background_request(request):
return None
# Create a serializable copy of the original request
request_copy = copy_safe_request(request)
# Enqueue a job to perform the work in the background
job = AsyncViewJob.enqueue(
name=name,
user=request.user,
view_cls=view,
request=request_copy,
)
# Record a message on the original request indicating deferral to a background job
msg = _('Created background job {id}: <a href="{url}">{name}</a>').format(
id=job.pk,
url=job.get_absolute_url(),
name=job.name
)
messages.info(request, mark_safe(msg))
return job
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/jobs.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/virtualization/object_actions.py | from django.utils.translation import gettext_lazy as _
from netbox.object_actions import ObjectAction
__all__ = (
'BulkAddComponents',
)
class BulkAddComponents(ObjectAction):
"""
Add components to the selected virtual machines.
"""
label = _('Add Components')
multi = True
permissions_required = {'change'}
template_name = 'virtualization/buttons/bulk_add_components.html'
@classmethod
def get_context(cls, context, obj):
return {
'formaction': context.get('formaction'),
}
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/virtualization/object_actions.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/prefetch.py | from django.contrib.contenttypes.fields import GenericRelation
from django.db.models import ManyToManyField
from django.db.models.fields.related import ForeignObjectRel
from taggit.managers import TaggableManager
__all__ = (
'get_prefetchable_fields',
)
def get_prefetchable_fields(model):
"""
Return a list containing the names of all fields on the given model which support prefetching.
"""
field_names = []
for field in model._meta.get_fields():
# Forward relations (e.g. ManyToManyFields)
if isinstance(field, ManyToManyField):
field_names.append(field.name)
# Reverse relations (e.g. reverse ForeignKeys, reverse M2M)
elif isinstance(field, ForeignObjectRel):
field_names.append(field.get_accessor_name())
# Generic relations
elif isinstance(field, GenericRelation):
field_names.append(field.name)
# Tags
elif isinstance(field, TaggableManager):
field_names.append(field.name)
return field_names
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/prefetch.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
netbox-community/netbox:netbox/utilities/tests/test_prefetch.py | from circuits.models import Circuit, Provider
from utilities.prefetch import get_prefetchable_fields
from utilities.testing.base import TestCase
class GetPrefetchableFieldsTest(TestCase):
"""
Verify the operation of get_prefetchable_fields()
"""
def test_get_prefetchable_fields(self):
field_names = get_prefetchable_fields(Provider)
self.assertIn('asns', field_names) # ManyToManyField
self.assertIn('circuits', field_names) # Reverse relation
self.assertIn('tags', field_names) # Tags
field_names = get_prefetchable_fields(Circuit)
self.assertIn('group_assignments', field_names) # Generic relation
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/utilities/tests/test_prefetch.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/netbox/models/deletion.py | import logging
from django.contrib.contenttypes.fields import GenericRelation
from django.db import router
from django.db.models.deletion import CASCADE, Collector
logger = logging.getLogger("netbox.models.deletion")
class CustomCollector(Collector):
"""
Override Django's stock Collector to handle GenericRelations and ensure proper ordering of cascading deletions.
"""
def collect(
self,
objs,
source=None,
nullable=False,
collect_related=True,
source_attr=None,
reverse_dependency=False,
keep_parents=False,
fail_on_restricted=True,
):
# By default, Django will force the deletion of dependent objects before the parent only if the ForeignKey field
# is not nullable. We want to ensure proper ordering regardless, so if the ForeignKey has `on_delete=CASCADE`
# applied, we set `nullable` to False when calling `collect()`.
if objs and source and source_attr:
model = objs[0].__class__
field = model._meta.get_field(source_attr)
if field.remote_field.on_delete == CASCADE:
nullable = False
super().collect(
objs,
source=source,
nullable=nullable,
collect_related=collect_related,
source_attr=source_attr,
reverse_dependency=reverse_dependency,
keep_parents=keep_parents,
fail_on_restricted=fail_on_restricted,
)
# Add GenericRelations to the dependency graph
processed_relations = set()
for _, instances in list(self.data.items()):
for instance in instances:
# Get all GenericRelations for this model
for field in instance._meta.private_fields:
if isinstance(field, GenericRelation):
# Create a unique key for this relation
relation_key = f"{instance._meta.model_name}.{field.name}"
if relation_key in processed_relations:
continue
processed_relations.add(relation_key)
# Add the model that the generic relation points to as a dependency
self.add_dependency(field.related_model, instance, reverse_dependency=True)
class DeleteMixin:
"""
Mixin to override the model delete function to use our custom collector.
"""
def delete(self, using=None, keep_parents=False):
"""
Override delete to use our custom collector.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
f"{self._meta.object_name} object can't be deleted because its "
f"{self._meta.pk.attname} attribute is set to None."
)
collector = CustomCollector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
@classmethod
def verify_mro(cls, instance):
"""
Verify that this mixin is first in the MRO.
"""
mro = instance.__class__.__mro__
if mro.index(cls) != 0:
raise RuntimeError(f"{cls.__name__} must be first in the MRO. Current MRO: {mro}")
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/netbox/models/deletion.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
netbox-community/netbox:netbox/vpn/tests/test_tables.py | from django.test import RequestFactory, TestCase, tag
from vpn.models import TunnelTermination
from vpn.tables import TunnelTerminationTable
@tag('regression')
class TunnelTerminationTableTest(TestCase):
def test_every_orderable_field_does_not_throw_exception(self):
terminations = TunnelTermination.objects.all()
fake_request = RequestFactory().get("/")
disallowed = {'actions'}
orderable_columns = [
column.name for column in TunnelTerminationTable(terminations).columns
if column.orderable and column.name not in disallowed
]
for col in orderable_columns:
for dir in ('-', ''):
table = TunnelTerminationTable(terminations)
table.order_by = f'{dir}{col}'
table.as_html(fake_request)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/vpn/tests/test_tables.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
netbox-community/netbox:netbox/circuits/tests/test_tables.py | from django.test import RequestFactory, TestCase, tag
from circuits.models import CircuitTermination
from circuits.tables import CircuitTerminationTable
@tag('regression')
class CircuitTerminationTableTest(TestCase):
def test_every_orderable_field_does_not_throw_exception(self):
terminations = CircuitTermination.objects.all()
disallowed = {'actions', }
orderable_columns = [
column.name for column in CircuitTerminationTable(terminations).columns
if column.orderable and column.name not in disallowed
]
fake_request = RequestFactory().get("/")
for col in orderable_columns:
for dir in ('-', ''):
table = CircuitTerminationTable(terminations)
table.order_by = f'{dir}{col}'
table.as_html(fake_request)
| {
"repo_id": "netbox-community/netbox",
"file_path": "netbox/circuits/tests/test_tables.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
nginx-proxy/nginx-proxy:test/test_dockergen/test_dockergen_network_segregation-custom-label.py | import pytest
@pytest.mark.flaky
def test_unknown_virtual_host_is_503(docker_compose, nginxproxy):
r = nginxproxy.get("http://unknown.nginx-proxy.tld/")
assert r.status_code == 503
@pytest.mark.flaky
def test_forwards_to_whoami(docker_compose, nginxproxy):
r = nginxproxy.get("http://whoami2.nginx-proxy.tld/")
assert r.status_code == 200
whoami_container = docker_compose.containers.get("whoami2")
assert r.text == f"I'm {whoami_container.id[:12]}\n"
| {
"repo_id": "nginx-proxy/nginx-proxy",
"file_path": "test/test_dockergen/test_dockergen_network_segregation-custom-label.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:bin/bump_version.py | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2017-2019 Joe Rickerby and contributors
# SPDX-License-Identifier: BSD-2-Clause
"""Bump the version number in all the right places."""
from __future__ import annotations
import glob
import os
import subprocess
import sys
import time
import urllib.parse
from pathlib import Path
import cyclopts
from packaging.version import InvalidVersion, Version
try:
from github import Github, GithubException
except ImportError:
Github = None # type: ignore
GithubException = Exception # type: ignore
import ocrmypdf
config = [
# file path, version find/replace format
("src/ocrmypdf/_version.py", '__version__ = "{}"'),
("pyproject.toml", 'version = "{}"'),
]
RED = "\u001b[31m"
GREEN = "\u001b[32m"
YELLOW = "\u001b[33m"
OFF = "\u001b[0m"
REPO_NAME = "ocrmypdf/OCRmyPDF"
def validate_release_notes(new_version: str) -> bool:
"""Check that the version appears in the release notes.
Returns True if the version is found, False otherwise.
"""
version_obj = Version(new_version)
major = version_obj.major
release_notes_path = Path(f"docs/releasenotes/version{major:02d}.md")
if not release_notes_path.exists():
print(f"{RED}error:{OFF} Release notes file not found: {release_notes_path}")
return False
content = release_notes_path.read_text(encoding="utf8")
version_header = f"## v{new_version}"
if version_header not in content:
print(
f"{RED}error:{OFF} Version v{new_version} not found in {release_notes_path}"
)
print(f" Expected to find: {version_header}")
return False
print(f"{GREEN}Found v{new_version} in {release_notes_path}{OFF}")
return True
def get_github_client():
"""Get an authenticated GitHub client."""
if Github is None:
print(f"{RED}error:{OFF} PyGithub is not installed")
print(" Install with: pip install PyGithub")
return None
# Try GITHUB_TOKEN env var first
token = os.environ.get("GITHUB_TOKEN")
# Fall back to gh CLI
if not token:
try:
result = subprocess.run(
["gh", "auth", "token"],
capture_output=True,
encoding="utf8",
check=True,
)
token = result.stdout.strip()
except (FileNotFoundError, subprocess.CalledProcessError):
print(f"{RED}error:{OFF} No GitHub authentication found")
print(" Set GITHUB_TOKEN env var or run: gh auth login")
return None
try:
return Github(token)
except GithubException as e:
print(f"{RED}error:{OFF} Failed to authenticate with GitHub: {e}")
return None
def wait_for_ci_completion(commit_sha: str, timeout_minutes: int = 30) -> bool:
"""Wait for CI to complete on the given commit.
Returns True if CI passed, False otherwise.
"""
gh = get_github_client()
if gh is None:
return False
try:
repo = gh.get_repo(REPO_NAME)
except GithubException as e:
print(f"{RED}error:{OFF} Failed to access repository: {e}")
return False
workflow_name = "Test and deploy"
start_time = time.time()
timeout_seconds = timeout_minutes * 60
poll_interval = 30 # seconds
print(f"Waiting for CI workflow '{workflow_name}' on commit {commit_sha[:8]}...")
# First, wait for the workflow run to appear
run = None
while time.time() - start_time < timeout_seconds:
try:
runs = repo.get_workflow_runs(head_sha=commit_sha)
for r in runs:
if r.name == workflow_name:
run = r
break
if run:
break
except GithubException as e:
print(f"{YELLOW}Warning:{OFF} Error checking workflow runs: {e}")
elapsed = int(time.time() - start_time)
print(f" Waiting for workflow to start... ({elapsed}s)")
time.sleep(poll_interval)
if not run:
print(
f"{RED}error:{OFF} Workflow run not found within {timeout_minutes} minutes"
)
return False
print(f" Found workflow run #{run.run_number} (ID: {run.id})")
# Now wait for the workflow to complete
while time.time() - start_time < timeout_seconds:
try:
run = repo.get_workflow_run(run.id) # Refresh the run
except GithubException as e:
print(f"{YELLOW}Warning:{OFF} Error refreshing workflow run: {e}")
time.sleep(poll_interval)
continue
status = run.status
conclusion = run.conclusion
elapsed = int(time.time() - start_time)
if status == "completed":
if conclusion == "success":
print(f"{GREEN}CI passed!{OFF} (took {elapsed}s)")
return True
else:
print(f"{RED}CI failed!{OFF} Conclusion: {conclusion}")
print(f" View details: {run.html_url}")
return False
else:
print(f" Status: {status} ({elapsed}s elapsed)")
time.sleep(poll_interval)
print(f"{RED}error:{OFF} CI did not complete within {timeout_minutes} minutes")
return False
def push_and_wait_for_ci(branch: str) -> bool:
"""Push to remote and wait for CI tests to pass."""
print("Pushing to GitHub...")
push_result = subprocess.run(
["git", "push", "origin", branch],
capture_output=True,
encoding="utf8",
)
if push_result.returncode != 0:
print(f"{RED}error:{OFF} Failed to push: {push_result.stderr}")
return False
# Get the commit SHA we just pushed
sha_result = subprocess.run(
["git", "rev-parse", "HEAD"],
capture_output=True,
encoding="utf8",
check=True,
)
commit_sha = sha_result.stdout.strip()
print(f"Pushed commit {commit_sha[:8]}")
return wait_for_ci_completion(commit_sha)
def push_tag(tag: str) -> bool:
"""Push the tag to trigger release workflow."""
print(f"Pushing tag {tag} to trigger release...")
result = subprocess.run(
["git", "push", "origin", tag],
capture_output=True,
encoding="utf8",
)
if result.returncode != 0:
print(f"{RED}error:{OFF} Failed to push tag: {result.stderr}")
return False
print(f"{GREEN}Tag {tag} pushed successfully!{OFF}")
return True
def bump_version() -> None:
"""Bump the version number in all the right places."""
current_version = ocrmypdf.__version__ # type: ignore
try:
commit_date_str = subprocess.run(
[
"git",
"show",
"--no-patch",
"--pretty=format:%ci",
f"v{current_version}^{{commit}}",
],
check=True,
capture_output=True,
encoding="utf8",
).stdout
cd_date, cd_time, cd_tz = commit_date_str.split(" ")
url_opts = urllib.parse.urlencode(
{"q": f"is:pr merged:>{cd_date}T{cd_time}{cd_tz}"}
)
url = f"https://github.com/{REPO_NAME}/pulls?{url_opts}"
print(f"PRs merged since last release:\n {url}")
print()
except subprocess.CalledProcessError as e:
print(e)
print("Failed to get previous version tag information.")
print("Is the virtual environment active?")
sys.exit(1)
git_changes_result = subprocess.run(["git diff-index --quiet HEAD --"], shell=True)
repo_has_uncommitted_changes = git_changes_result.returncode != 0
if repo_has_uncommitted_changes:
print("error: Uncommitted changes detected.")
sys.exit(1)
# fmt: off
print( 'Current version:', current_version)
new_version = input(' New version: ').strip()
# fmt: on
try:
Version(new_version)
except InvalidVersion:
print("error: This version doesn't conform to PEP440")
print(" https://www.python.org/dev/peps/pep-0440/")
sys.exit(1)
# Validate release notes contain this version
if not validate_release_notes(new_version):
print()
print("Please add release notes for this version before proceeding.")
print(f"Edit: docs/releasenotes/version{Version(new_version).major:02d}.md")
sys.exit(1)
actions = []
for path_pattern, version_pattern in config:
paths = [Path(p) for p in glob.glob(path_pattern)]
if not paths:
print(f"error: Pattern {path_pattern} didn't match any files")
sys.exit(1)
find_pattern = version_pattern.format(current_version)
replace_pattern = version_pattern.format(new_version)
found_at_least_one_file_needing_update = False
for path in paths:
contents = path.read_text(encoding="utf8")
if find_pattern in contents:
found_at_least_one_file_needing_update = True
actions.append(
(
path,
find_pattern,
replace_pattern,
)
)
if not found_at_least_one_file_needing_update:
print(
f'''error: Didn't find any occurrences of "{find_pattern}" in "{path_pattern}"'''
)
sys.exit(1)
print()
print("Here's the plan:")
print()
for action in actions:
path, find, replace = action
print(f"{path} {RED}{find}{OFF} → {GREEN}{replace}{OFF}")
print(f"Then commit, and tag as v{new_version}")
answer = input("Proceed? [y/N] ").strip()
if answer != "y":
print("Aborted")
sys.exit(1)
for path, find, replace in actions:
contents = path.read_text(encoding="utf8")
contents = contents.replace(find, replace)
path.write_text(contents, encoding="utf8")
print("Files updated.")
print()
while input('Type "done" to continue: ').strip().lower() != "done":
pass
subprocess.run(
[
"git",
"commit",
"--all",
f"--message=Bump version: v{new_version}",
],
check=True,
)
subprocess.run(
[
"git",
"tag",
"--annotate",
f"--message=v{new_version}",
f"v{new_version}",
],
check=True,
)
print("Commit and tag created locally.")
print()
# Get current branch
branch_result = subprocess.run(
["git", "rev-parse", "--abbrev-ref", "HEAD"],
capture_output=True,
encoding="utf8",
check=True,
)
branch = branch_result.stdout.strip()
# Push commit and wait for CI
if not push_and_wait_for_ci(branch):
print()
print(f"{RED}CI failed. The tag was NOT pushed.{OFF}")
print("Fix the issues, then manually push the tag:")
print(f" git push origin v{new_version}")
sys.exit(1)
# Push tag to trigger release
if not push_tag(f"v{new_version}"):
print(f"{RED}Failed to push tag.{OFF} Push manually:")
print(f" git push origin v{new_version}")
sys.exit(1)
print()
print(f"{GREEN}Done! Release workflow has been triggered.{OFF}")
print()
release_url = f"https://github.com/{REPO_NAME}/releases/tag/v{new_version}"
print("Monitor the release at:")
print(f" {release_url}")
if __name__ == "__main__":
os.chdir(Path(__file__).parent.parent.resolve())
cyclopts.run(bump_version)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "bin/bump_version.py",
"license": "Mozilla Public License 2.0",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:scripts/generate_glyphless_font.py | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2024 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Generate the Occulta glyphless font for OCRmyPDF.
Occulta (Latin for "hidden") is a glyphless font designed for invisible text layers
in searchable PDFs. It has proper Unicode cmap coverage using format 13 (many-to-one)
for efficient mapping of all BMP codepoints to a small set of width-specific glyphs.
Features:
- Full BMP coverage (U+0000 to U+FFFF)
- Width-aware glyphs for proper text selection:
- Zero-width for combining marks and invisible characters
- Regular width (500 units) for Latin, Greek, Cyrillic, Arabic, Hebrew, etc.
- Double width (1000 units) for CJK and fullwidth characters
- Uses cmap format 13 (many-to-one) for ~12KB size vs ~780KB with format 12
- Compatible with fpdf2 and other modern PDF libraries
Usage:
python scripts/generate_glyphless_font.py
Output:
src/ocrmypdf/data/Occulta.ttf
"""
from __future__ import annotations
import unicodedata
from pathlib import Path
from fontTools.fontBuilder import FontBuilder
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._c_m_a_p import CmapSubtable
from fontTools.ttLib.tables._g_l_y_f import Glyph
# Output path relative to this script
OUTPUT_PATH = Path(__file__).parent.parent / "src" / "ocrmypdf" / "data" / "Occulta.ttf"
# Font metrics (units per em = 1000)
UNITS_PER_EM = 1000
ASCENT = 800
DESCENT = -200
# Glyph definitions: (name, advance_width, left_side_bearing)
GLYPHS = [
(".notdef", 500, 0), # Required, used for unmapped characters
("space", 500, 0), # U+0020 SPACE
("nbspace", 500, 0), # U+00A0 NO-BREAK SPACE
("blank0", 0, 0), # Zero-width (combining marks, ZWNJ, ZWJ, BOM)
("blank1", 500, 0), # Regular width (most scripts)
("blank2", 1000, 0), # Double width (CJK, fullwidth)
]
# Explicit zero-width character codepoints
ZERO_WIDTH_CHARS = frozenset(
[
0x200B, # ZERO WIDTH SPACE
0x200C, # ZERO WIDTH NON-JOINER
0x200D, # ZERO WIDTH JOINER
0xFEFF, # ZERO WIDTH NO-BREAK SPACE (BOM)
0x200E, # LEFT-TO-RIGHT MARK
0x200F, # RIGHT-TO-LEFT MARK
0x202A, # LEFT-TO-RIGHT EMBEDDING
0x202B, # RIGHT-TO-LEFT EMBEDDING
0x202C, # POP DIRECTIONAL FORMATTING
0x202D, # LEFT-TO-RIGHT OVERRIDE
0x202E, # RIGHT-TO-LEFT OVERRIDE
0x2060, # WORD JOINER
0x2061, # FUNCTION APPLICATION
0x2062, # INVISIBLE TIMES
0x2063, # INVISIBLE SEPARATOR
0x2064, # INVISIBLE PLUS
]
)
def classify_codepoint(codepoint: int) -> str:
"""Classify a Unicode codepoint into one of our glyph categories.
Args:
codepoint: Unicode codepoint (0x0000 to 0xFFFF)
Returns:
Glyph name to map this codepoint to
"""
# Special cases first
if codepoint == 0x0020:
return "space"
if codepoint == 0x00A0:
return "nbspace"
if codepoint in ZERO_WIDTH_CHARS:
return "blank0"
# Use Unicode properties for the rest
char = chr(codepoint)
try:
category = unicodedata.category(char)
east_asian_width = unicodedata.east_asian_width(char)
# Combining marks are zero-width
if category.startswith("M"):
return "blank0"
# Wide and Fullwidth characters are double-width
if east_asian_width in ("W", "F"):
return "blank2"
# Everything else is regular width
return "blank1"
except (ValueError, TypeError):
# Fallback for any edge cases
return "blank1"
def build_cmap() -> dict[int, str]:
"""Build the Unicode to glyph name mapping for the entire BMP.
Returns:
Dictionary mapping codepoints to glyph names
"""
return {cp: classify_codepoint(cp) for cp in range(0x10000)}
def create_font() -> TTFont:
"""Create the Occulta glyphless font.
Returns:
TTFont object ready to be saved
"""
glyph_names = [g[0] for g in GLYPHS]
# Start building the font
fb = FontBuilder(UNITS_PER_EM, isTTF=True)
fb.setupGlyphOrder(glyph_names)
# Create empty (invisible) glyphs
glyphs = {}
for name, _, _ in GLYPHS:
glyph = Glyph()
glyph.numberOfContours = 0
glyphs[name] = glyph
fb.setupGlyf(glyphs)
# Set up horizontal metrics
metrics = {name: (width, lsb) for name, width, lsb in GLYPHS}
fb.setupHorizontalMetrics(metrics)
# Minimal cmap to satisfy FontBuilder (we'll replace it later)
fb.setupCharacterMap({0x0020: "space", 0x00A0: "nbspace"})
# Set up other required tables
fb.setupHorizontalHeader(ascent=ASCENT, descent=DESCENT)
fb.setupOS2(
sTypoAscender=ASCENT,
sTypoDescender=DESCENT,
sTypoLineGap=0,
usWinAscent=UNITS_PER_EM,
usWinDescent=abs(DESCENT),
sxHeight=500,
sCapHeight=700,
)
import time
# Use current time for font timestamps
now = int(time.time())
fb.setupHead(unitsPerEm=UNITS_PER_EM, created=now, modified=now)
fb.setupPost()
fb.setupNameTable(
{
"familyName": "Occulta",
"styleName": "Regular",
"uniqueFontIdentifier": "OCRmyPDF;Occulta-Regular;2026",
"fullName": "Occulta Regular",
"version": "Version 2.0",
"psName": "Occulta-Regular",
}
)
# Build the font
font = fb.font
# Now replace the cmap with format 13 for efficient many-to-one mapping
char_to_glyph = build_cmap()
cmap13 = CmapSubtable.newSubtable(13)
cmap13.platformID = 3 # Windows
cmap13.platEncID = 10 # Unicode full repertoire
cmap13.language = 0
cmap13.cmap = char_to_glyph
font["cmap"].tables = [cmap13]
return font
def main() -> None:
"""Generate the Occulta font and save it."""
print("Generating Occulta glyphless font...")
font = create_font()
# Create output directory if needed
OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
# Save the font
font.save(str(OUTPUT_PATH))
font.close()
# Report statistics
size = OUTPUT_PATH.stat().st_size
print(f"Saved to: {OUTPUT_PATH}")
print(f"Size: {size:,} bytes")
# Verify cmap
font = TTFont(str(OUTPUT_PATH))
for table in font["cmap"].tables:
print(
f"cmap: Platform {table.platformID}, "
f"Encoding {table.platEncID}, "
f"Format {table.format}, "
f"{len(table.cmap)} mappings"
)
font.close()
print("Done!")
if __name__ == "__main__":
main()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "scripts/generate_glyphless_font.py",
"license": "Mozilla Public License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/_exec/verapdf.py | # SPDX-FileCopyrightText: 2024 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Interface to verapdf executable."""
from __future__ import annotations
import json
import logging
from pathlib import Path
from subprocess import PIPE
from typing import NamedTuple
from packaging.version import Version
from ocrmypdf.exceptions import MissingDependencyError
from ocrmypdf.subprocess import get_version, run
log = logging.getLogger(__name__)
class ValidationResult(NamedTuple):
"""Result of PDF/A validation."""
valid: bool
failed_rules: int
message: str
def version() -> Version:
"""Get verapdf version."""
return Version(get_version('verapdf', regex=r'veraPDF (\d+(\.\d+)*)'))
def available() -> bool:
"""Check if verapdf is available."""
try:
version()
except MissingDependencyError:
return False
return True
def output_type_to_flavour(output_type: str) -> str:
"""Map OCRmyPDF output_type to verapdf flavour.
Args:
output_type: One of 'pdfa', 'pdfa-1', 'pdfa-2', 'pdfa-3'
Returns:
verapdf flavour string like '1b', '2b', '3b'
"""
mapping = {
'pdfa': '2b',
'pdfa-1': '1b',
'pdfa-2': '2b',
'pdfa-3': '3b',
}
return mapping.get(output_type, '2b')
def validate(input_file: Path, flavour: str) -> ValidationResult:
"""Validate a PDF against a PDF/A profile.
Args:
input_file: Path to PDF file to validate
flavour: verapdf flavour (1a, 1b, 2a, 2b, 2u, 3a, 3b, 3u)
Returns:
ValidationResult with validation status
"""
args = [
'verapdf',
'--format',
'json',
'--flavour',
flavour,
str(input_file),
]
try:
proc = run(args, stdout=PIPE, stderr=PIPE, check=False)
except FileNotFoundError as e:
raise MissingDependencyError('verapdf') from e
try:
result = json.loads(proc.stdout)
jobs = result.get('report', {}).get('jobs', [])
if not jobs:
return ValidationResult(False, -1, 'No validation jobs in result')
validation_results = jobs[0].get('validationResult', [])
if not validation_results:
return ValidationResult(False, -1, 'No validation result in output')
validation_result = validation_results[0]
details = validation_result.get('details', {})
failed_rules = details.get('failedRules', 0)
if failed_rules == 0:
return ValidationResult(True, 0, 'PDF/A validation passed')
else:
return ValidationResult(
False,
failed_rules,
f'PDF/A validation failed with {failed_rules} rule violations',
)
except (json.JSONDecodeError, KeyError, TypeError) as e:
log.debug('Failed to parse verapdf output: %s', e)
return ValidationResult(False, -1, f'Failed to parse verapdf output: {e}')
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/_exec/verapdf.py",
"license": "Mozilla Public License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/_options.py | # SPDX-FileCopyrightText: 2024 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Internal options model for OCRmyPDF."""
from __future__ import annotations
import json
import logging
import os
import shlex
import unicodedata
from collections.abc import Sequence
from enum import StrEnum
from io import IOBase
from pathlib import Path
from typing import Any, BinaryIO
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from ocrmypdf._defaults import DEFAULT_LANGUAGE, DEFAULT_ROTATE_PAGES_THRESHOLD
from ocrmypdf.exceptions import BadArgsError
from ocrmypdf.helpers import monotonic
# Import plugin option models - these will be available after plugins are loaded
# We'll use forward references and handle imports dynamically
log = logging.getLogger(__name__)
# Module-level registry for plugin option models
# This is populated by setup_plugin_infrastructure() after plugins are loaded
_plugin_option_models: dict[str, type] = {}
PathOrIO = BinaryIO | IOBase | Path | str | bytes
class ProcessingMode(StrEnum):
"""OCR processing mode for handling pages with existing text.
This enum controls how OCRmyPDF handles pages that already contain text:
- ``default``: Error if text is found (standard OCR behavior)
- ``force``: Rasterize all content and run OCR regardless of existing text
- ``skip``: Skip OCR on pages that already have text
- ``redo``: Re-OCR pages, stripping old invisible text layer
"""
default = 'default'
force = 'force'
skip = 'skip'
redo = 'redo'
class TaggedPdfMode(StrEnum):
"""Control behavior when encountering a Tagged PDF.
Tagged PDFs often indicate documents generated from office applications
that may not need OCR. This enum controls how OCRmyPDF handles them:
- ``default``: Error if ProcessingMode is default, otherwise warn
- ``ignore``: Always warn but continue processing (never error)
"""
default = 'default'
ignore = 'ignore'
def _pages_from_ranges(ranges: str) -> set[int]:
"""Convert page range string to set of page numbers."""
pages: list[int] = []
page_groups = ranges.replace(' ', '').split(',')
for group in page_groups:
if not group:
continue
try:
start, end = group.split('-')
except ValueError:
pages.append(int(group) - 1)
else:
try:
new_pages = list(range(int(start) - 1, int(end)))
if not new_pages:
raise BadArgsError(
f"invalid page subrange '{start}-{end}'"
) from None
pages.extend(new_pages)
except ValueError:
raise BadArgsError(f"invalid page subrange '{group}'") from None
if not pages:
raise BadArgsError(
f"The string of page ranges '{ranges}' did not contain any recognizable "
f"page ranges."
)
if not monotonic(pages):
log.warning(
"List of pages to process contains duplicate pages, or pages that are "
"out of order"
)
if any(page < 0 for page in pages):
raise BadArgsError("pages refers to a page number less than 1")
log.debug("OCRing only these pages: %s", pages)
return set(pages)
class OcrOptions(BaseModel):
"""Internal options model that can masquerade as argparse.Namespace.
This model provides proper typing and validation while maintaining
compatibility with existing code that expects argparse.Namespace behavior.
"""
# I/O options
input_file: PathOrIO
output_file: PathOrIO
sidecar: PathOrIO | None = None
output_folder: Path | None = None
work_folder: Path | None = None
# Core OCR options
languages: list[str] = Field(default_factory=lambda: [DEFAULT_LANGUAGE])
output_type: str = 'auto'
mode: ProcessingMode = ProcessingMode.default
# Backward compatibility properties for force_ocr, skip_text, redo_ocr
@property
def force_ocr(self) -> bool:
"""Backward compatibility alias for mode == ProcessingMode.force."""
return self.mode == ProcessingMode.force
@property
def skip_text(self) -> bool:
"""Backward compatibility alias for mode == ProcessingMode.skip."""
return self.mode == ProcessingMode.skip
@property
def redo_ocr(self) -> bool:
"""Backward compatibility alias for mode == ProcessingMode.redo."""
return self.mode == ProcessingMode.redo
# Job control
jobs: int | None = None
use_threads: bool = True
progress_bar: bool = True
quiet: bool = False
verbose: int = 0
keep_temporary_files: bool = False
# Image processing
image_dpi: int | None = None
deskew: bool = False
clean: bool = False
clean_final: bool = False
rotate_pages: bool = False
remove_background: bool = False
remove_vectors: bool = False
oversample: int = 0
unpaper_args: list[str] | None = None
# OCR behavior
skip_big: float | None = None
pages: str | set[int] | None = None # Can be string or set after validation
invalidate_digital_signatures: bool = False
tagged_pdf_mode: TaggedPdfMode = TaggedPdfMode.default
# Metadata
title: str | None = None
author: str | None = None
subject: str | None = None
keywords: str | None = None
# Optimization
optimize: int = 1
jpg_quality: int | None = None
png_quality: int | None = None
jbig2_threshold: float = 0.85
# Compatibility alias for plugins that expect jpeg_quality
@property
def jpeg_quality(self):
"""Compatibility alias for jpg_quality."""
return self.jpg_quality
@jpeg_quality.setter
def jpeg_quality(self, value):
"""Compatibility alias for jpg_quality."""
self.jpg_quality = value
# Advanced options
max_image_mpixels: float = 250.0
pdf_renderer: str = 'auto'
ocr_engine: str = 'auto'
rasterizer: str = 'auto'
rotate_pages_threshold: float = DEFAULT_ROTATE_PAGES_THRESHOLD
user_words: os.PathLike | None = None
user_patterns: os.PathLike | None = None
fast_web_view: float = 1.0
continue_on_soft_render_error: bool | None = None
# Tesseract options - also accessible via options.tesseract.<field>
tesseract_config: list[str] = []
tesseract_pagesegmode: int | None = None
tesseract_oem: int | None = None
tesseract_thresholding: int | None = None
tesseract_timeout: float | None = None
tesseract_non_ocr_timeout: float | None = None
tesseract_downsample_above: int = 32767
tesseract_downsample_large_images: bool | None = None
# Ghostscript options - also accessible via options.ghostscript.<field>
pdfa_image_compression: str | None = None
color_conversion_strategy: str = "LeaveColorUnchanged"
# Optimize/JBIG2 options - also accessible via options.optimize.<field>
jbig2_threshold: float = 0.85
# Plugin system
plugins: Sequence[Path | str] | None = None
# Store any extra attributes (for plugins and dynamic options)
extra_attrs: dict[str, Any] = Field(
default_factory=dict, exclude=True, alias='_extra_attrs'
)
@field_validator('languages')
@classmethod
def validate_languages(cls, v):
"""Ensure languages list is not empty."""
if not v:
return [DEFAULT_LANGUAGE]
return v
@field_validator('output_type')
@classmethod
def validate_output_type(cls, v):
"""Validate output type is one of the allowed values."""
valid_types = {'auto', 'pdfa', 'pdf', 'pdfa-1', 'pdfa-2', 'pdfa-3', 'none'}
if v not in valid_types:
raise ValueError(f"output_type must be one of {valid_types}")
return v
@field_validator('pdf_renderer')
@classmethod
def validate_pdf_renderer(cls, v):
"""Validate PDF renderer is one of the allowed values."""
valid_renderers = {'auto', 'sandwich', 'fpdf2'}
# Legacy hocr/hocrdebug are accepted but redirected to fpdf2
legacy_renderers = {'hocr', 'hocrdebug'}
all_accepted = valid_renderers | legacy_renderers
if v not in all_accepted:
raise ValueError(f"pdf_renderer must be one of {all_accepted}")
return v
@field_validator('rasterizer')
@classmethod
def validate_rasterizer(cls, v):
"""Validate rasterizer is one of the allowed values."""
valid_rasterizers = {'auto', 'ghostscript', 'pypdfium'}
if v not in valid_rasterizers:
raise ValueError(f"rasterizer must be one of {valid_rasterizers}")
return v
@field_validator('clean_final')
@classmethod
def validate_clean_final(cls, v, info):
"""If clean_final is True, also set clean to True."""
if v and hasattr(info, 'data') and 'clean' in info.data:
info.data['clean'] = True
return v
@field_validator('jobs')
@classmethod
def validate_jobs(cls, v):
"""Validate jobs is a reasonable number."""
if v is not None and (v < 0 or v > 256):
raise ValueError("jobs must be between 0 and 256")
return v
@field_validator('verbose')
@classmethod
def validate_verbose(cls, v):
"""Validate verbose level."""
if v < 0 or v > 2:
raise ValueError("verbose must be between 0 and 2")
return v
@field_validator('oversample')
@classmethod
def validate_oversample(cls, v):
"""Validate oversample DPI."""
if v < 0 or v > 5000:
raise ValueError("oversample must be between 0 and 5000")
return v
@field_validator('max_image_mpixels')
@classmethod
def validate_max_image_mpixels(cls, v):
"""Validate max image megapixels."""
if v < 0:
raise ValueError("max_image_mpixels must be non-negative")
return v
@field_validator('rotate_pages_threshold')
@classmethod
def validate_rotate_pages_threshold(cls, v):
"""Validate rotate pages threshold."""
if v < 0 or v > 1000:
raise ValueError("rotate_pages_threshold must be between 0 and 1000")
return v
@field_validator('title', 'author', 'keywords', 'subject')
@classmethod
def validate_metadata_unicode(cls, v):
"""Validate metadata strings don't contain unsupported Unicode characters."""
if v is None:
return v
for char in v:
if unicodedata.category(char) == 'Co' or ord(char) >= 0x10000:
hexchar = hex(ord(char))[2:].upper()
raise ValueError(
f"Metadata string contains unsupported Unicode character: "
f"{char} (U+{hexchar})"
)
return v
@field_validator('pages')
@classmethod
def validate_pages_format(cls, v):
"""Convert page ranges string to set of page numbers."""
if v is None:
return v
if isinstance(v, set):
return v # Already processed
# Convert string ranges to set of page numbers
return _pages_from_ranges(v)
@field_validator('unpaper_args', mode='before')
@classmethod
def validate_unpaper_args(cls, v):
"""Normalize unpaper_args from string to list and validate security."""
if v is None:
return v
if isinstance(v, str):
v = shlex.split(v)
if isinstance(v, list):
if any(('/' in arg or arg == '.' or arg == '..') for arg in v):
raise ValueError('No filenames allowed in --unpaper-args')
return v
raise ValueError(f'unpaper_args must be a string or list, got {type(v)}')
@model_validator(mode='before')
@classmethod
def handle_special_cases(cls, data):
"""Handle special cases for API compatibility and legacy options."""
if isinstance(data, dict):
# For hOCR API, output_file might not be present
if 'output_folder' in data and 'output_file' not in data:
data['output_file'] = '/dev/null' # Placeholder
# Convert legacy boolean options (force_ocr, skip_text, redo_ocr) to mode
force = data.pop('force_ocr', None)
skip = data.pop('skip_text', None)
redo = data.pop('redo_ocr', None)
# Count how many legacy options are set to True
legacy_set = [
(force, ProcessingMode.force),
(skip, ProcessingMode.skip),
(redo, ProcessingMode.redo),
]
legacy_true = [(val, mode) for val, mode in legacy_set if val]
legacy_count = len(legacy_true)
# Get current mode value (may be string or enum)
current_mode = data.get('mode', ProcessingMode.default)
if isinstance(current_mode, str):
current_mode = ProcessingMode(current_mode)
mode_is_set = current_mode != ProcessingMode.default
if legacy_count > 1:
raise ValueError(
"Choose only one of --force-ocr, --skip-text, --redo-ocr."
)
if legacy_count == 1:
expected_mode = legacy_true[0][1]
if mode_is_set and current_mode != expected_mode:
legacy_flag = f"--{expected_mode.value.replace('_', '-')}-ocr"
raise ValueError(
f"Conflicting options: --mode {current_mode.value} "
f"cannot be used with {legacy_flag} or similar legacy flag."
)
# Set mode from legacy option
data['mode'] = expected_mode
return data
@model_validator(mode='after')
def validate_redo_ocr_options(self):
"""Validate options compatible with redo mode."""
if self.mode == ProcessingMode.redo and (
self.deskew or self.clean_final or self.remove_background
):
raise ValueError(
"--redo-ocr (or --mode redo) is not currently compatible with "
"--deskew, --clean-final, and --remove-background"
)
return self
@model_validator(mode='after')
def validate_output_type_compatibility(self):
"""Validate output type is compatible with output file."""
if self.output_type == 'none' and str(self.output_file) not in (
os.devnull,
'-',
):
raise ValueError(
"Since you specified `--output-type none`, the output file "
f"{self.output_file} cannot be produced. Set the output file to "
f"`-` to suppress this message."
)
return self
@property
def lossless_reconstruction(self):
"""Determine lossless_reconstruction based on other options."""
lossless = not any(
[
self.deskew,
self.clean_final,
self.mode == ProcessingMode.force,
self.remove_background,
]
)
return lossless
def model_dump_json_safe(self) -> str:
"""Serialize to JSON with special handling for non-serializable types."""
# Create a copy of the model data for serialization
data = self.model_dump()
# Handle special types that don't serialize to JSON directly
def _serialize_value(value):
if isinstance(value, Path):
return {'__type__': 'Path', 'value': str(value)}
elif (
isinstance(value, BinaryIO | IOBase)
or hasattr(value, 'read')
or hasattr(value, 'write')
):
# Stream object - replace with placeholder
return {'__type__': 'Stream', 'value': 'stream'}
elif hasattr(value, '__class__') and 'Iterator' in value.__class__.__name__:
# Handle Pydantic serialization iterators
return {'__type__': 'Stream', 'value': 'stream'}
elif isinstance(value, property):
# Handle property objects that shouldn't be serialized
return None
elif isinstance(value, list | tuple):
return [_serialize_value(item) for item in value]
elif isinstance(value, dict):
return {k: _serialize_value(v) for k, v in value.items()}
else:
return value
# Process all fields
serializable_data = {}
for key, value in data.items():
serialized_value = _serialize_value(value)
if serialized_value is not None: # Skip None values from properties
serializable_data[key] = serialized_value
# Add extra_attrs, excluding plugin cache entries (they'll be recreated lazily)
if self.extra_attrs:
filtered_extra = {
k: v
for k, v in self.extra_attrs.items()
if not k.startswith('_plugin_cache_')
}
if filtered_extra:
serializable_data['_extra_attrs'] = _serialize_value(filtered_extra)
return json.dumps(serializable_data)
@classmethod
def model_validate_json_safe(cls, json_str: str) -> OcrOptions:
"""Reconstruct from JSON with special handling for non-serializable types."""
data = json.loads(json_str)
# Handle special types during deserialization
def _deserialize_value(value):
if isinstance(value, dict) and '__type__' in value:
if value['__type__'] == 'Path':
return Path(value['value'])
elif value['__type__'] == 'Stream':
# For streams, we'll use a placeholder string
return value['value']
else:
return value['value']
elif isinstance(value, list):
return [_deserialize_value(item) for item in value]
elif isinstance(value, dict):
return {k: _deserialize_value(v) for k, v in value.items()}
else:
return value
# Process all fields
deserialized_data = {}
extra_attrs = {}
for key, value in data.items():
if key == '_extra_attrs':
extra_attrs = _deserialize_value(value)
else:
deserialized_data[key] = _deserialize_value(value)
# Create instance
instance = cls(**deserialized_data)
instance.extra_attrs = extra_attrs
return instance
model_config = ConfigDict(
extra="forbid", # Force use of extra_attrs for unknown fields
arbitrary_types_allowed=True, # Allow BinaryIO, Path, etc.
validate_assignment=True, # Validate on attribute assignment
)
@classmethod
def register_plugin_models(cls, models: dict[str, type]) -> None:
"""Register plugin option model classes for nested access.
Args:
models: Dictionary mapping namespace to model class
"""
global _plugin_option_models
_plugin_option_models.update(models)
def _get_plugin_options(self, namespace: str) -> Any:
"""Get or create a plugin options instance for the given namespace.
This method creates plugin option instances lazily from flat field values.
Args:
namespace: The plugin namespace (e.g., 'tesseract', 'optimize')
Returns:
An instance of the plugin's option model, or None if not registered
"""
# Use extra_attrs to cache plugin option instances
cache_key = f'_plugin_cache_{namespace}'
if cache_key in self.extra_attrs:
return self.extra_attrs[cache_key]
if namespace not in _plugin_option_models:
raise AttributeError(
f"Plugin namespace '{namespace}' is not registered. "
f"Ensure setup_plugin_infrastructure() was called."
)
model_class = _plugin_option_models[namespace]
def _convert_value(value):
"""Convert value to be compatible with plugin model fields."""
if isinstance(value, os.PathLike):
return os.fspath(value)
return value
# Build kwargs from flat fields
kwargs = {}
for field_name in model_class.model_fields:
# Try namespace_field pattern first (e.g., tesseract_timeout)
flat_name = f"{namespace}_{field_name}"
if flat_name in OcrOptions.model_fields:
value = getattr(self, flat_name)
if value is not None:
kwargs[field_name] = _convert_value(value)
# Also check direct field name (for fields like jbig2_lossy)
elif field_name in OcrOptions.model_fields:
value = getattr(self, field_name)
if value is not None:
kwargs[field_name] = _convert_value(value)
# Check for special mappings
elif namespace == 'optimize' and field_name == 'level':
# 'optimize' field maps to 'level' in OptimizeOptions
if 'optimize' in OcrOptions.model_fields:
value = self.optimize
if value is not None:
kwargs[field_name] = _convert_value(value)
elif namespace == 'optimize' and field_name == 'jpeg_quality':
# jpg_quality maps to jpeg_quality
if 'jpg_quality' in OcrOptions.model_fields:
value = self.jpg_quality
if value is not None:
kwargs[field_name] = _convert_value(value)
# Create and cache the plugin options instance
instance = model_class(**kwargs)
self.extra_attrs[cache_key] = instance
return instance
def __getattr__(self, name: str) -> Any:
"""Support dynamic access to plugin option namespaces.
This allows accessing plugin options like:
options.tesseract.timeout
options.optimize.level
Plugin models must be registered via register_plugin_models() for
namespace access to work. Built-in plugins register their models
during initialization.
Args:
name: Attribute name
Returns:
Plugin options instance if name is a registered namespace,
otherwise raises AttributeError
"""
# Check if this is a plugin namespace
if name.startswith('_'):
# Private attributes should not trigger plugin lookup
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
# Try to get plugin options for this namespace
if name in _plugin_option_models:
return self._get_plugin_options(name)
# Check extra_attrs
if 'extra_attrs' in self.__dict__ and name in self.extra_attrs:
return self.extra_attrs[name]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/_options.py",
"license": "Mozilla Public License 2.0",
"lines": 540,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/_plugin_registry.py | # SPDX-FileCopyrightText: 2024 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Plugin option registry for dynamic model composition."""
from __future__ import annotations
import logging
from pydantic import BaseModel
log = logging.getLogger(__name__)
class PluginOptionRegistry:
"""Registry for plugin option models.
This registry collects option models from plugins during initialization.
Plugin options can be accessed via nested namespaces on OcrOptions
(e.g., options.tesseract.timeout) or via flat field names for backward
compatibility (e.g., options.tesseract_timeout).
"""
def __init__(self):
self._option_models: dict[str, type[BaseModel]] = {}
def register_option_model(
self, namespace: str, model_class: type[BaseModel]
) -> None:
"""Register a plugin's option model.
Args:
namespace: The namespace for the plugin options (e.g., 'tesseract')
model_class: The Pydantic model class for the plugin options
"""
if namespace in self._option_models:
log.warning(
f"Plugin option namespace '{namespace}' already registered, overriding"
)
self._option_models[namespace] = model_class
log.debug(
f"Registered plugin option model for namespace '{namespace}': "
f"{model_class.__name__}"
)
def get_registered_models(self) -> dict[str, type[BaseModel]]:
"""Get all registered plugin option models."""
return self._option_models.copy()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/_plugin_registry.py",
"license": "Mozilla Public License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/_validation_coordinator.py | # SPDX-FileCopyrightText: 2024 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Validation coordinator for plugin options and cross-cutting concerns."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import pluggy
from ocrmypdf._options import OcrOptions
log = logging.getLogger(__name__)
class ValidationCoordinator:
"""Coordinates validation across plugin models and core options."""
def __init__(self, plugin_manager: pluggy.PluginManager):
self.plugin_manager = plugin_manager
self.registry = getattr(plugin_manager, '_option_registry', None)
def validate_all_options(self, options: OcrOptions) -> None:
"""Run comprehensive validation on all options.
This runs validation in the correct order:
1. Plugin self-validation (already done by Pydantic)
2. Plugin context validation (requires external context)
3. Cross-cutting validation (between plugins and core)
Args:
options: The options to validate
"""
# Step 1: Plugin context validation
self._validate_plugin_contexts(options)
# Step 2: Cross-cutting validation
self._validate_cross_cutting_concerns(options)
def _validate_plugin_contexts(self, options: OcrOptions) -> None:
"""Validate plugin options that require external context."""
# For now, we'll run the plugin validation directly since the models
# are still being integrated. This ensures the validation warnings
# and checks still work as expected.
# Run Tesseract validation
self._validate_tesseract_options(options)
# Run Optimize validation
self._validate_optimize_options(options)
def _validate_tesseract_options(self, options: OcrOptions) -> None:
"""Validate Tesseract options."""
# Check pagesegmode warning
if options.tesseract.pagesegmode in (0, 2):
log.warning(
"The tesseract-pagesegmode you selected will disable OCR. "
"This may cause processing to fail."
)
# Check downsample consistency
if (
options.tesseract.downsample_above != 32767
and not options.tesseract.downsample_large_images
):
log.warning(
"The --tesseract-downsample-above argument will have no effect unless "
"--tesseract-downsample-large-images is also given."
)
# Note: blocked languages (equ, osd) are checked earlier in
# check_options_languages() to ensure the check runs before
# the missing language check.
def _validate_optimize_options(self, options: OcrOptions) -> None:
"""Validate optimization options."""
# Check optimization consistency
if options.optimize == 0 and any(
[
options.png_quality and options.png_quality > 0,
options.jpeg_quality and options.jpeg_quality > 0,
]
):
log.warning(
"The arguments --png-quality and --jpeg-quality "
"will be ignored because --optimize=0."
)
def _validate_cross_cutting_concerns(self, options: OcrOptions) -> None:
"""Validate cross-cutting concerns that span multiple plugins."""
from ocrmypdf._options import ProcessingMode
# Handle deprecated pdf_renderer values
self._handle_deprecated_pdf_renderer(options)
# Note: Mutual exclusivity of force_ocr/skip_text/redo_ocr is now enforced
# by the ProcessingMode enum - only one mode can be active at a time.
# Validate redo mode compatibility
if options.mode == ProcessingMode.redo and (
options.deskew or options.clean_final or options.remove_background
):
raise ValueError(
"--redo-ocr (or --mode redo) is not currently compatible with "
"--deskew, --clean-final, and --remove-background"
)
# Validate output type compatibility
if options.output_type == 'none' and str(options.output_file) not in (
os.devnull,
'-',
):
raise ValueError(
"Since you specified `--output-type none`, the output file "
f"{options.output_file} cannot be produced. Set the output file to "
"`-` to suppress this message."
)
# Validate PDF/A image compression compatibility
if (
options.ghostscript.pdfa_image_compression
and options.ghostscript.pdfa_image_compression != 'auto'
and not options.output_type.startswith('pdfa')
):
log.warning(
"--pdfa-image-compression argument only applies when "
"--output-type is one of 'pdfa', 'pdfa-1', or 'pdfa-2'"
)
def _handle_deprecated_pdf_renderer(self, options: OcrOptions) -> None:
"""Handle deprecated pdf_renderer values by redirecting to fpdf2."""
if options.pdf_renderer in ('hocr', 'hocrdebug'):
log.info(
"The '%s' PDF renderer has been removed. Using 'fpdf2' instead, "
"which provides full international language support, proper RTL "
"rendering, and improved text positioning.",
options.pdf_renderer,
)
# Modify the options object to use fpdf2
object.__setattr__(options, 'pdf_renderer', 'fpdf2')
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/_validation_coordinator.py",
"license": "Mozilla Public License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/builtin_plugins/null_ocr.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Built-in plugin implementing a null OCR engine (no OCR).
This plugin provides an OCR engine that produces no text output. It is useful
when users want OCRmyPDF's image processing, PDF/A conversion, or optimization
features without performing actual OCR.
Usage:
ocrmypdf --ocr-engine none input.pdf output.pdf
"""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from PIL import Image
from ocrmypdf import hookimpl
from ocrmypdf.hocrtransform import BoundingBox, OcrClass, OcrElement
from ocrmypdf.pluginspec import OcrEngine, OrientationConfidence
if TYPE_CHECKING:
from ocrmypdf._options import OcrOptions
class NullOcrEngine(OcrEngine):
"""A no-op OCR engine that produces no text output.
Use this when you want OCRmyPDF's image processing, PDF/A conversion,
or optimization features without performing actual OCR.
"""
@staticmethod
def version() -> str:
"""Return version string."""
return "none"
@staticmethod
def creator_tag(options: OcrOptions) -> str:
"""Return creator tag for PDF metadata."""
return "OCRmyPDF (no OCR)"
def __str__(self) -> str:
"""Return human-readable engine name."""
return "No OCR engine"
@staticmethod
def languages(options: OcrOptions) -> set[str]:
"""Return supported languages (empty set for null engine)."""
return set()
@staticmethod
def get_orientation(input_file: Path, options: OcrOptions) -> OrientationConfidence:
"""Return neutral orientation (no rotation detected)."""
return OrientationConfidence(angle=0, confidence=0.0)
@staticmethod
def get_deskew(input_file: Path, options: OcrOptions) -> float:
"""Return zero deskew angle."""
return 0.0
@staticmethod
def supports_generate_ocr() -> bool:
"""Return True - this engine supports the generate_ocr() API."""
return True
@staticmethod
def generate_ocr(
input_file: Path,
options: OcrOptions,
page_number: int = 0,
) -> tuple[OcrElement, str]:
"""Generate empty OCR results.
Args:
input_file: The image file (used to get dimensions).
options: OCR options (ignored).
page_number: Page number (stored in result).
Returns:
A tuple of (empty OcrElement page, empty string).
"""
# Get image dimensions
with Image.open(input_file) as img:
width, height = img.size
dpi_info = img.info.get('dpi', (72, 72))
dpi = dpi_info[0] if isinstance(dpi_info, tuple) else dpi_info
# Create empty page element with correct dimensions
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=width, bottom=height),
dpi=float(dpi),
page_number=page_number,
)
return page, ""
@staticmethod
def generate_hocr(
input_file: Path,
output_hocr: Path,
output_text: Path,
options: OcrOptions,
) -> None:
"""Generate empty hOCR file.
Creates minimal valid hOCR output with no text content.
"""
# Get image dimensions for hOCR bbox
with Image.open(input_file) as img:
width, height = img.size
hocr_content = f'''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>OCRmyPDF - No OCR</title>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8"/>
<meta name='ocr-system' content='OCRmyPDF null engine'/>
</head>
<body>
<div class='ocr_page' title='bbox 0 0 {width} {height}'>
</div>
</body>
</html>
'''
output_hocr.write_text(hocr_content, encoding='utf-8')
output_text.write_text('', encoding='utf-8')
@staticmethod
def generate_pdf(
input_file: Path,
output_pdf: Path,
output_text: Path,
options: OcrOptions,
) -> None:
"""NullOcrEngine cannot generate PDFs directly.
Use pdf_renderer='fpdf2' instead of 'sandwich'.
"""
raise NotImplementedError(
"NullOcrEngine cannot generate PDFs directly. "
"Use --pdf-renderer fpdf2 instead of sandwich mode."
)
@hookimpl
def get_ocr_engine(options):
"""Return NullOcrEngine when --ocr-engine none is selected."""
if options is not None:
ocr_engine = getattr(options, 'ocr_engine', 'auto')
if ocr_engine != 'none':
return None
return NullOcrEngine()
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/builtin_plugins/null_ocr.py",
"license": "Mozilla Public License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/builtin_plugins/pypdfium.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Built-in plugin to implement PDF page rasterization using pypdfium2."""
from __future__ import annotations
import logging
import threading
from contextlib import closing
from pathlib import Path
from typing import TYPE_CHECKING, Literal
if TYPE_CHECKING:
import pypdfium2 as pdfium
else:
try:
import pypdfium2 as pdfium
except ImportError:
pdfium = None
from PIL import Image
from ocrmypdf import hookimpl
from ocrmypdf.exceptions import MissingDependencyError
from ocrmypdf.helpers import Resolution
log = logging.getLogger(__name__)
# pypdfium2/PDFium is not thread-safe. All calls to the library must be serialized.
# See: https://pypdfium2.readthedocs.io/en/stable/python_api.html#incompatibility-with-threading
# When using process-based parallelism (use_threads=False), each process has its own
# pdfium instance, so locking is not needed across processes.
_pdfium_lock = threading.Lock()
@hookimpl
def check_options(options):
"""Check that pypdfium2 is available if explicitly requested."""
if options.rasterizer == 'pypdfium' and pdfium is None:
raise MissingDependencyError(
"The --rasterizer pypdfium option requires the pypdfium2 package. "
"Install it with: pip install pypdfium2"
)
def _open_pdf_document(input_file: Path):
"""Open a PDF document using pypdfium2."""
assert pdfium is not None, "pypdfium2 must be available to call this function"
return pdfium.PdfDocument(input_file)
def _calculate_mediabox_crop(page) -> tuple[float, float, float, float]:
"""Calculate crop values to expand rendering from CropBox to MediaBox.
By default pypdfium2 renders to the CropBox. To render the full MediaBox,
we need negative crop values to expand the rendering area.
Returns:
Tuple of (left, bottom, right, top) crop values. Negative values
expand the rendering area beyond the CropBox to the MediaBox.
"""
mediabox = page.get_mediabox() # (left, bottom, right, top)
cropbox = page.get_cropbox() # (left, bottom, right, top), defaults to mediabox
# Calculate how much to expand from cropbox to mediabox
# Negative values = expand, positive = shrink
return (
mediabox[0] - cropbox[0], # Expand left
mediabox[1] - cropbox[1], # Expand bottom
cropbox[2] - mediabox[2], # Expand right
cropbox[3] - mediabox[3], # Expand top
)
def _render_page_to_bitmap(
page: pdfium.PdfPage,
raster_device: str,
raster_dpi: Resolution,
rotation: int | None,
use_cropbox: bool,
) -> tuple[pdfium.PdfBitmap, int, int]:
"""Render a PDF page to a bitmap."""
# Round DPI to match Ghostscript's precision
raster_dpi = raster_dpi.round(6)
# Get page dimensions BEFORE applying rotation
page_width_pts, page_height_pts = page.get_size()
# Calculate expected output dimensions using separate x/y DPI
expected_width = int(round(page_width_pts * raster_dpi.x / 72.0))
expected_height = int(round(page_height_pts * raster_dpi.y / 72.0))
# Calculate the scale factor based on DPI
# pypdfium2 uses points (72 DPI) as base unit
scale = raster_dpi.to_scalar() / 72.0
# Apply rotation if specified
if rotation:
# pypdfium2 rotation is in degrees, same as our input
# we track rotation in CCW, and pypdfium2 expects CW, so negate
page.set_rotation(-rotation % 360)
# When rotation is 90 or 270, dimensions are swapped in output
if rotation % 180 == 90:
expected_width, expected_height = expected_height, expected_width
# Render the page to a bitmap
# The scale parameter controls the resolution
# Render in grayscale for mono and gray devices (better input for 1-bit conversion)
grayscale = raster_device.lower() in ('pngmono', 'pnggray', 'jpeggray')
# Calculate crop to render the appropriate box
# Default (use_cropbox=False) renders MediaBox for consistency with Ghostscript
crop = (0, 0, 0, 0) if use_cropbox else _calculate_mediabox_crop(page)
bitmap = page.render(
scale=scale,
rotation=0, # We already set rotation on the page
crop=crop,
may_draw_forms=True,
draw_annots=True,
grayscale=grayscale,
# Note: pypdfium2 doesn't have a direct equivalent to filter_vector
# This would require more complex implementation if needed
)
return bitmap, expected_width, expected_height
def _process_image_for_output(
pil_image: Image.Image,
raster_device: str,
raster_dpi: Resolution,
page_dpi: Resolution | None,
stop_on_soft_error: bool,
expected_width: int | None = None,
expected_height: int | None = None,
) -> tuple[Image.Image, Literal['PNG', 'TIFF', 'JPEG']]:
"""Process PIL image for output format and set DPI metadata."""
# Correct dimensions if slightly off (within 2 pixels tolerance)
if expected_width and expected_height:
actual_width, actual_height = pil_image.width, pil_image.height
width_diff = abs(actual_width - expected_width)
height_diff = abs(actual_height - expected_height)
# Only resize if off by small amount (1-2 pixels)
if (width_diff <= 2 or height_diff <= 2) and (
width_diff > 0 or height_diff > 0
):
log.debug(
f"Adjusting rendered dimensions from "
f"{actual_width}x{actual_height} to expected "
f"{expected_width}x{expected_height}"
)
pil_image = pil_image.resize(
(expected_width, expected_height), Image.Resampling.LANCZOS
)
# Set the DPI metadata if page_dpi is specified
if page_dpi:
# PIL expects DPI as a tuple
dpi_tuple = (float(page_dpi.x), float(page_dpi.y))
pil_image.info['dpi'] = dpi_tuple
else:
# Use the raster DPI
dpi_tuple = (float(raster_dpi.x), float(raster_dpi.y))
pil_image.info['dpi'] = dpi_tuple
# Convert image mode to match raster_device
# This ensures pypdfium output matches Ghostscript's native device output
raster_device_lower = raster_device.lower()
if raster_device_lower == 'pngmono':
# Convert to 1-bit black and white (matches Ghostscript pngmono device)
if pil_image.mode != '1':
if pil_image.mode not in ('L', '1'):
pil_image = pil_image.convert('L')
pil_image = pil_image.convert('1')
elif raster_device_lower in ('pnggray', 'jpeggray'):
# Convert to 8-bit grayscale
if pil_image.mode not in ('L', '1'):
pil_image = pil_image.convert('L')
elif raster_device_lower == 'png256':
# Convert to 8-bit indexed color (256 colors)
if pil_image.mode != 'P':
if pil_image.mode not in ('RGB', 'RGBA'):
pil_image = pil_image.convert('RGB')
pil_image = pil_image.quantize(colors=256)
elif raster_device_lower in ('png16m', 'jpeg'):
# Convert to RGB
if pil_image.mode == 'RGBA':
background = Image.new('RGB', pil_image.size, (255, 255, 255))
background.paste(pil_image, mask=pil_image.split()[-1])
pil_image = background
elif pil_image.mode not in ('RGB',):
pil_image = pil_image.convert('RGB')
# pngalpha: keep RGBA as-is
# Determine output format based on raster_device
png_devices = ('png', 'pngmono', 'pnggray', 'png256', 'png16m', 'pngalpha')
if raster_device_lower in png_devices:
format_name = 'PNG'
elif raster_device_lower in ('jpeg', 'jpeggray', 'jpg'):
format_name = 'JPEG'
elif raster_device_lower in ('tiff', 'tif'):
format_name = 'TIFF'
else:
# Default to PNG for unknown formats
format_name = 'PNG'
if stop_on_soft_error:
raise ValueError(f"Unsupported raster device: {raster_device}")
else:
log.warning(f"Unsupported raster device {raster_device}, using PNG")
return pil_image, format_name
def _save_image(pil_image: Image.Image, output_file: Path, format_name: str) -> None:
"""Save PIL image to file with appropriate DPI metadata."""
save_kwargs = {}
if (
format_name in ('PNG', 'TIFF')
and 'dpi' in pil_image.info
or format_name == 'JPEG'
and 'dpi' in pil_image.info
):
save_kwargs['dpi'] = pil_image.info['dpi']
pil_image.save(output_file, format=format_name, **save_kwargs)
@hookimpl
def rasterize_pdf_page(
input_file: Path,
output_file: Path,
raster_device: str,
raster_dpi: Resolution,
pageno: int,
page_dpi: Resolution | None,
rotation: int | None,
filter_vector: bool,
stop_on_soft_error: bool,
options,
use_cropbox: bool,
) -> Path | None:
"""Rasterize a single page of a PDF file using pypdfium2.
Returns None if pypdfium2 is not available or if the user has selected
a different rasterizer, allowing Ghostscript to be used.
"""
# Check if user explicitly requested a different rasterizer
if options is not None and options.rasterizer == 'ghostscript':
return None # Let Ghostscript handle it
if pdfium is None:
return None # Fall back to Ghostscript
# Acquire lock to ensure thread-safe access to pypdfium2
with (
_pdfium_lock,
closing(_open_pdf_document(input_file)) as pdf,
closing(pdf[pageno - 1]) as page,
):
# Render the page to a bitmap
bitmap, expected_width, expected_height = _render_page_to_bitmap(
page, raster_device, raster_dpi, rotation, use_cropbox
)
with closing(bitmap):
# Convert to PIL Image
pil_image = bitmap.to_pil()
# Process and save image outside the lock (PIL operations are thread-safe)
pil_image, format_name = _process_image_for_output(
pil_image,
raster_device,
raster_dpi,
page_dpi,
stop_on_soft_error,
expected_width,
expected_height,
)
_save_image(pil_image, output_file, format_name)
return output_file
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/builtin_plugins/pypdfium.py",
"license": "Mozilla Public License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/font/font_manager.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Base font management for PDF rendering.
This module provides the base FontManager class that handles font loading
and glyph checking using uharfbuzz.
"""
from __future__ import annotations
from pathlib import Path
import uharfbuzz as hb
class FontManager:
"""Manages font loading and glyph checking for PDF rendering.
This base class handles loading fonts with uharfbuzz for glyph checking
and text shaping. Renderer-specific subclasses should extend this to
add their own font objects.
Attributes:
font_path: Path to the font file
font_data: Raw font file bytes
font_index: Index within TTC collection (0 for single-font files)
hb_face: uharfbuzz Face object
hb_font: uharfbuzz Font object
"""
def __init__(self, font_path: Path, font_index: int = 0):
"""Initialize font manager.
Args:
font_path: Path to TrueType/OpenType font file
font_index: Index of font within a TTC collection (default 0).
For single-font files (.ttf, .otf), use 0.
"""
self.font_path = font_path
self.font_index = font_index
# Load font data
self.font_data = font_path.read_bytes()
# Load font with uharfbuzz for glyph checking and text measurement
# Note: uharfbuzz Face also supports font_index for TTC files
self.hb_face = hb.Face(self.font_data, font_index)
self.hb_font = hb.Font(self.hb_face)
def get_hb_font(self) -> hb.Font:
"""Get uharfbuzz Font object for text measurement.
Returns:
UHarfBuzz Font instance
"""
return self.hb_font
def has_glyph(self, codepoint: int) -> bool:
"""Check if font has a glyph for given codepoint.
Args:
codepoint: Unicode codepoint
Returns:
True if font has a real glyph (not .notdef)
"""
glyph_id = self.hb_font.get_nominal_glyph(codepoint)
return glyph_id is not None and glyph_id != 0
def get_font_metrics(self) -> tuple[float, float, float]:
"""Get normalized font metrics (ascent, descent, units_per_em).
Returns:
Tuple of (ascent, descent, units_per_em) where ascent and descent
are in font units. Ascent is positive (above baseline), descent
is typically negative (below baseline).
"""
extents = self.hb_font.get_font_extents('ltr')
units_per_em = self.hb_face.upem
return (extents.ascender, extents.descender, units_per_em)
def get_left_side_bearing(self, char: str, font_size: float) -> float:
"""Get the left side bearing of a character at a given font size.
The left side bearing (lsb) is the horizontal distance from the glyph
origin (x=0) to the leftmost pixel of the glyph. A positive lsb means
there's whitespace before the glyph starts.
Args:
char: Single character to get lsb for
font_size: Font size in points
Returns:
Left side bearing in points. Returns 0 if character not found.
"""
if not char:
return 0.0
codepoint = ord(char)
glyph_id = self.hb_font.get_nominal_glyph(codepoint)
if glyph_id is None or glyph_id == 0:
return 0.0
# Get glyph extents which include left/right bearing info
extents = self.hb_font.get_glyph_extents(glyph_id)
if extents is None:
return 0.0
# x_bearing is the left side bearing in font units
units_per_em = self.hb_face.upem
lsb_units = extents.x_bearing
lsb_pt = lsb_units * font_size / units_per_em
return lsb_pt
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/font/font_manager.py",
"license": "Mozilla Public License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/font/font_provider.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Font provider protocol and implementations for PDF rendering."""
from __future__ import annotations
import logging
from pathlib import Path
from typing import Protocol
from ocrmypdf.font.font_manager import FontManager
log = logging.getLogger(__name__)
class FontProvider(Protocol):
"""Protocol for providing fonts to MultiFontManager.
Implementations are responsible for knowing where fonts are located
and loading them. MultiFontManager asks for fonts by name and uses
them for glyph coverage checking.
"""
def get_font(self, font_name: str) -> FontManager | None:
"""Get a FontManager for the named font.
Args:
font_name: Logical font name (e.g., 'NotoSans-Regular')
Returns:
FontManager if font is available, None otherwise
"""
...
def get_available_fonts(self) -> list[str]:
"""Get list of available font names.
Returns:
List of font names that can be retrieved with get_font()
"""
...
def get_fallback_font(self) -> FontManager:
"""Get the glyphless fallback font.
This font must always be available and handles any codepoint.
Returns:
FontManager for the glyphless fallback font (Occulta.ttf)
"""
...
class BuiltinFontProvider:
"""Font provider using builtin fonts from ocrmypdf/data directory."""
# Mapping of logical font names to filenames
# Only Latin (NotoSans) and the glyphless fallback (Occulta.ttf) are bundled.
# All other scripts (Arabic, Devanagari, CJK, etc.) are discovered from
# system fonts by SystemFontProvider to reduce package size.
FONT_FILES = {
'NotoSans-Regular': 'NotoSans-Regular.ttf',
'Occulta': 'Occulta.ttf',
}
def __init__(self, font_dir: Path | None = None):
"""Initialize builtin font provider.
Args:
font_dir: Directory containing font files. If None, uses
the default ocrmypdf/data directory.
"""
if font_dir is None:
font_dir = Path(__file__).parent.parent / "data"
self.font_dir = font_dir
self._fonts: dict[str, FontManager] = {}
self._load_fonts()
def _load_fonts(self) -> None:
"""Load available fonts, logging warnings for missing ones."""
for font_name, font_file in self.FONT_FILES.items():
font_path = self.font_dir / font_file
if not font_path.exists():
if font_name == 'Occulta':
raise FileNotFoundError(
f"Required fallback font not found: {font_path}"
)
log.warning(
"Font %s not found at %s - OCR output quality for some "
"scripts may be affected",
font_name,
font_path,
)
continue
try:
self._fonts[font_name] = FontManager(font_path)
except Exception as e:
if font_name == 'Occulta':
raise ValueError(
f"Failed to load required fallback font {font_file}: {e}"
) from e
log.warning(
"Failed to load font %s: %s - OCR output quality may be affected",
font_name,
e,
)
def get_font(self, font_name: str) -> FontManager | None:
"""Get a FontManager for the named font."""
return self._fonts.get(font_name)
def get_available_fonts(self) -> list[str]:
"""Get list of available font names."""
return list(self._fonts.keys())
def get_fallback_font(self) -> FontManager:
"""Get the glyphless fallback font."""
return self._fonts['Occulta']
class ChainedFontProvider:
"""Font provider that tries multiple providers in order.
This allows combining builtin fonts with system fonts, trying
the builtin provider first and falling back to system fonts
for fonts not bundled with the package.
"""
def __init__(self, providers: list[FontProvider]):
"""Initialize chained font provider.
Args:
providers: List of font providers to try in order.
The first provider that returns a font wins.
"""
if not providers:
raise ValueError("At least one provider is required")
self.providers = providers
def get_font(self, font_name: str) -> FontManager | None:
"""Get a FontManager for the named font.
Tries each provider in order until one returns a font.
Args:
font_name: Logical font name (e.g., 'NotoSans-Regular')
Returns:
FontManager if any provider has the font, None otherwise
"""
for provider in self.providers:
if font := provider.get_font(font_name):
return font
return None
def get_available_fonts(self) -> list[str]:
"""Get list of available font names from all providers.
Returns:
Combined list of font names (deduplicated, order preserved)
"""
seen: set[str] = set()
result: list[str] = []
for provider in self.providers:
for name in provider.get_available_fonts():
if name not in seen:
seen.add(name)
result.append(name)
return result
def get_fallback_font(self) -> FontManager:
"""Get the glyphless fallback font.
Tries each provider until one provides a fallback font.
Returns:
FontManager for the fallback font
Raises:
RuntimeError: If no provider can provide a fallback font
"""
for provider in self.providers:
try:
return provider.get_fallback_font()
except (NotImplementedError, AttributeError, KeyError):
continue
raise RuntimeError("No fallback font available from any provider")
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/font/font_provider.py",
"license": "Mozilla Public License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/font/multi_font_manager.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Multi-font management for PDF rendering.
Provides automatic font selection for multilingual documents based on
language hints and glyph coverage analysis.
"""
from __future__ import annotations
import logging
from pathlib import Path
from ocrmypdf.font.font_manager import FontManager
from ocrmypdf.font.font_provider import (
BuiltinFontProvider,
ChainedFontProvider,
FontProvider,
)
from ocrmypdf.font.system_font_provider import SystemFontProvider
log = logging.getLogger(__name__)
class MultiFontManager:
"""Manages multiple fonts with automatic selection and fallback.
This class orchestrates multiple FontManager instances to provide
word-level font selection for multilingual documents. It uses a hybrid
approach combining language hints from hOCR with glyph coverage analysis.
Font selection strategy:
1. Try language-preferred font (if language hint available)
2. Try fallback fonts in order by glyph coverage
3. Fall back to Occulta.ttf (glyphless fallback)
"""
# Language to font mapping
# Keys are ISO 639-2/3 codes or Tesseract language codes
LANGUAGE_FONT_MAP = {
# Arabic script
'ara': 'NotoSansArabic-Regular', # Arabic
'per': 'NotoSansArabic-Regular', # Persian (uses Arabic script)
'fas': 'NotoSansArabic-Regular', # Farsi (alternative code for Persian)
'urd': 'NotoSansArabic-Regular', # Urdu (uses Arabic script)
'pus': 'NotoSansArabic-Regular', # Pashto
'kur': 'NotoSansArabic-Regular', # Kurdish (Arabic script variant)
# Devanagari script
'hin': 'NotoSansDevanagari-Regular', # Hindi
'san': 'NotoSansDevanagari-Regular', # Sanskrit
'mar': 'NotoSansDevanagari-Regular', # Marathi
'nep': 'NotoSansDevanagari-Regular', # Nepali
'kok': 'NotoSansDevanagari-Regular', # Konkani
'bho': 'NotoSansDevanagari-Regular', # Bhojpuri
'mai': 'NotoSansDevanagari-Regular', # Maithili
# CJK
'chi': 'NotoSansCJK-Regular', # Chinese (generic)
'zho': 'NotoSansCJK-Regular', # Chinese (ISO 639-3)
'chi_sim': 'NotoSansCJK-Regular', # Chinese Simplified (Tesseract)
'chi_tra': 'NotoSansCJK-Regular', # Chinese Traditional (Tesseract)
'jpn': 'NotoSansCJK-Regular', # Japanese
'kor': 'NotoSansCJK-Regular', # Korean
# Thai
'tha': 'NotoSansThai-Regular', # Thai
# Hebrew
'heb': 'NotoSansHebrew-Regular', # Hebrew
'yid': 'NotoSansHebrew-Regular', # Yiddish (uses Hebrew script)
# Bengali script
'ben': 'NotoSansBengali-Regular', # Bengali
'asm': 'NotoSansBengali-Regular', # Assamese (uses Bengali script)
# Tamil
'tam': 'NotoSansTamil-Regular', # Tamil
# Gujarati
'guj': 'NotoSansGujarati-Regular', # Gujarati
# Telugu
'tel': 'NotoSansTelugu-Regular', # Telugu
# Kannada
'kan': 'NotoSansKannada-Regular', # Kannada
# Malayalam
'mal': 'NotoSansMalayalam-Regular', # Malayalam
# Myanmar (Burmese)
'mya': 'NotoSansMyanmar-Regular', # Myanmar
# Khmer (Cambodian)
'khm': 'NotoSansKhmer-Regular', # Khmer
# Lao
'lao': 'NotoSansLao-Regular', # Lao
# Georgian
'kat': 'NotoSansGeorgian-Regular', # Georgian
'geo': 'NotoSansGeorgian-Regular', # Georgian (alternative)
# Armenian
'hye': 'NotoSansArmenian-Regular', # Armenian
'arm': 'NotoSansArmenian-Regular', # Armenian (alternative)
# Ethiopic
'amh': 'NotoSansEthiopic-Regular', # Amharic
'tir': 'NotoSansEthiopic-Regular', # Tigrinya
# Sinhala
'sin': 'NotoSansSinhala-Regular', # Sinhala
# Gurmukhi (Punjabi)
'pan': 'NotoSansGurmukhi-Regular', # Punjabi
'pnb': 'NotoSansGurmukhi-Regular', # Western Punjabi
# Oriya
'ori': 'NotoSansOriya-Regular', # Oriya
'ory': 'NotoSansOriya-Regular', # Oriya (alternative)
# Tibetan
'bod': 'NotoSansTibetan-Regular', # Tibetan
'tib': 'NotoSansTibetan-Regular', # Tibetan (alternative)
}
# Ordered fallback chain for fonts (after language-preferred font)
# Order matters: most common scripts first for faster matching
FALLBACK_FONTS = [
'NotoSans-Regular', # Latin, Greek, Cyrillic
'NotoSansArabic-Regular',
'NotoSansDevanagari-Regular',
'NotoSansCJK-Regular',
'NotoSansThai-Regular',
'NotoSansHebrew-Regular',
'NotoSansBengali-Regular',
'NotoSansTamil-Regular',
'NotoSansGujarati-Regular',
'NotoSansTelugu-Regular',
'NotoSansKannada-Regular',
'NotoSansMalayalam-Regular',
'NotoSansMyanmar-Regular',
'NotoSansKhmer-Regular',
'NotoSansLao-Regular',
'NotoSansGeorgian-Regular',
'NotoSansArmenian-Regular',
'NotoSansEthiopic-Regular',
'NotoSansSinhala-Regular',
'NotoSansGurmukhi-Regular',
'NotoSansOriya-Regular',
'NotoSansTibetan-Regular',
]
def __init__(
self,
font_dir: Path | None = None,
*,
font_provider: FontProvider | None = None,
):
"""Initialize multi-font manager.
Args:
font_dir: Directory containing font files. If font_provider is
not specified, this is passed to BuiltinFontProvider.
font_provider: Provider for loading fonts. If None, uses a
ChainedFontProvider that tries builtin fonts first,
then searches system fonts.
"""
if font_provider is not None:
self.font_provider = font_provider
else:
# Use chained provider: try builtin fonts first, then system fonts
self.font_provider = ChainedFontProvider(
[
BuiltinFontProvider(font_dir),
SystemFontProvider(),
]
)
# Font selection cache: (word_text, language) -> font_name
self._selection_cache: dict[tuple[str, str | None], str] = {}
# Track whether we've warned about missing fonts (warn once per script)
self._warned_scripts: set[str] = set()
@property
def fonts(self) -> dict[str, FontManager]:
"""Get all loaded fonts (backward compatibility)."""
return self.get_all_fonts()
def _try_font(
self, font_name: str, word_text: str, cache_key: tuple[str, str | None]
) -> FontManager | None:
"""Try to use a font for the given word.
Args:
font_name: Name of font to try
word_text: Text content to check
cache_key: Cache key for storing successful result
Returns:
FontManager if font exists and has all glyphs, None otherwise
"""
font = self.font_provider.get_font(font_name)
if font is None:
return None
if self._has_all_glyphs(font, word_text):
self._selection_cache[cache_key] = font_name
return font
return None
def select_font_for_word(
self, word_text: str, line_language: str | None
) -> FontManager:
"""Select appropriate font for a word.
Uses a hybrid approach:
1. Language-based selection (if language hint available)
2. Ordered fallback through available fonts by glyph coverage
3. Final fallback to Occulta.ttf (glyphless)
Args:
word_text: The text content of the word
line_language: Language code from hOCR (e.g., 'ara', 'eng')
Returns:
FontManager instance to use for rendering this word
"""
cache_key = (word_text, line_language)
if cache_key in self._selection_cache:
cached_name = self._selection_cache[cache_key]
font = self.font_provider.get_font(cached_name)
if font:
return font
tried_fonts: set[str] = set()
# Phase 1: Try language-preferred font
if line_language and line_language in self.LANGUAGE_FONT_MAP:
preferred = self.LANGUAGE_FONT_MAP[line_language]
tried_fonts.add(preferred)
if result := self._try_font(preferred, word_text, cache_key):
return result
# Phase 2: Try fallback fonts in order
for font_name in self.FALLBACK_FONTS:
if font_name in tried_fonts:
continue
if result := self._try_font(font_name, word_text, cache_key):
return result
# Phase 3: Glyphless fallback (always succeeds)
# Warn if we're falling back for non-ASCII text (likely missing font)
self._warn_missing_font(word_text, line_language)
self._selection_cache[cache_key] = 'Occulta'
return self.font_provider.get_fallback_font()
def _warn_missing_font(self, word_text: str, line_language: str | None) -> None:
"""Warn user about missing font for non-Latin text.
Only warns once per language/script to avoid log spam.
"""
# Determine a key for deduplication (language or 'non-ascii')
warn_key = line_language if line_language else 'unknown'
# Only warn for non-ASCII text and only once per key
if warn_key in self._warned_scripts:
return
# Check if text contains non-ASCII characters
if not any(ord(c) > 127 for c in word_text):
return
self._warned_scripts.add(warn_key)
if line_language and line_language in self.LANGUAGE_FONT_MAP:
font_name = self.LANGUAGE_FONT_MAP[line_language]
log.warning(
"No font found with glyphs for '%s' text. "
"Install %s for better rendering. "
"See https://fonts.google.com/noto",
line_language,
font_name,
)
else:
log.warning(
"No font found with glyphs for some text. "
"Install Noto fonts for better rendering. "
"See https://fonts.google.com/noto"
)
def _has_all_glyphs(self, font: FontManager, text: str) -> bool:
"""Check if a font has glyphs for all characters in text.
Args:
font: FontManager instance to check
text: Text to verify coverage for
Returns:
True if font has real glyphs for all characters (not .notdef)
"""
if not text:
return True
hb_font = font.get_hb_font()
for char in text:
codepoint = ord(char)
glyph_id = hb_font.get_nominal_glyph(codepoint)
if glyph_id is None or glyph_id == 0: # 0 = .notdef glyph
return False
return True
def has_font(self, font_name: str) -> bool:
"""Check if a named font is available.
Args:
font_name: Name of font to check
Returns:
True if font is available
"""
return self.font_provider.get_font(font_name) is not None
def has_all_glyphs(self, font_name: str, text: str) -> bool:
"""Check if a named font has glyphs for all characters in text.
Args:
font_name: Name of font to check
text: Text to verify coverage for
Returns:
True if font has real glyphs for all characters (not .notdef)
"""
font = self.font_provider.get_font(font_name)
if font is None:
return False
return self._has_all_glyphs(font, text)
def get_all_fonts(self) -> dict[str, FontManager]:
"""Get all loaded font managers.
Returns:
Dictionary mapping font names to FontManager instances
"""
result = {}
for name in self.font_provider.get_available_fonts():
font = self.font_provider.get_font(name)
if font is not None:
result[name] = font
return result
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/font/multi_font_manager.py",
"license": "Mozilla Public License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/font/system_font_provider.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""System font discovery for PDF rendering.
Provides lazy discovery of Noto fonts installed on the system across
Linux, macOS, and Windows platforms.
"""
from __future__ import annotations
import logging
import os
import sys
from pathlib import Path
from ocrmypdf.font.font_manager import FontManager
log = logging.getLogger(__name__)
class SystemFontProvider:
"""Discovers and provides system-installed Noto fonts with lazy scanning.
This provider searches standard system font directories for Noto fonts.
Scanning is performed lazily - only when a font is actually requested
and not found in the builtin fonts. Results are cached for the lifetime
of the provider instance.
"""
# System font directories by platform
SYSTEM_FONT_DIRS: dict[str, list[Path]] = {
'linux': [
Path('/usr/share/fonts'),
Path('/usr/local/share/fonts'),
Path.home() / '.fonts',
Path.home() / '.local/share/fonts',
],
'freebsd': [
Path('/usr/local/share/fonts'),
Path.home() / '.fonts',
],
'darwin': [
Path('/Library/Fonts'),
Path('/System/Library/Fonts'),
Path.home() / 'Library/Fonts',
],
# Windows is handled dynamically in _get_font_dirs()
}
# Noto font logical names → possible filenames (priority order)
# The first match found will be used
NOTO_FONT_PATTERNS: dict[str, list[str]] = {
'NotoSans-Regular': [
'NotoSans-Regular.ttf',
'NotoSans-Regular.otf',
],
'NotoSansArabic-Regular': [
'NotoSansArabic-Regular.ttf',
'NotoSansArabic-Regular.otf',
],
'NotoSansDevanagari-Regular': [
'NotoSansDevanagari-Regular.ttf',
'NotoSansDevanagari-Regular.otf',
],
'NotoSansCJK-Regular': [
# Language-specific variants (any will work for CJK)
'NotoSansCJKsc-Regular.otf', # Simplified Chinese
'NotoSansCJKtc-Regular.otf', # Traditional Chinese
'NotoSansCJKjp-Regular.otf', # Japanese
'NotoSansCJKkr-Regular.otf', # Korean
# TTC collections (common on Linux distros)
'NotoSansCJK-Regular.ttc',
'NotoSansCJKsc-Regular.ttc',
# Variable fonts
'NotoSansCJKsc-VF.otf',
],
'NotoSansThai-Regular': [
'NotoSansThai-Regular.ttf',
'NotoSansThai-Regular.otf',
],
'NotoSansHebrew-Regular': [
'NotoSansHebrew-Regular.ttf',
'NotoSansHebrew-Regular.otf',
],
'NotoSansBengali-Regular': [
'NotoSansBengali-Regular.ttf',
'NotoSansBengali-Regular.otf',
],
'NotoSansTamil-Regular': [
'NotoSansTamil-Regular.ttf',
'NotoSansTamil-Regular.otf',
],
'NotoSansGujarati-Regular': [
'NotoSansGujarati-Regular.ttf',
'NotoSansGujarati-Regular.otf',
],
'NotoSansTelugu-Regular': [
'NotoSansTelugu-Regular.ttf',
'NotoSansTelugu-Regular.otf',
],
'NotoSansKannada-Regular': [
'NotoSansKannada-Regular.ttf',
'NotoSansKannada-Regular.otf',
],
'NotoSansMalayalam-Regular': [
'NotoSansMalayalam-Regular.ttf',
'NotoSansMalayalam-Regular.otf',
],
'NotoSansMyanmar-Regular': [
'NotoSansMyanmar-Regular.ttf',
'NotoSansMyanmar-Regular.otf',
],
'NotoSansKhmer-Regular': [
'NotoSansKhmer-Regular.ttf',
'NotoSansKhmer-Regular.otf',
],
'NotoSansLao-Regular': [
'NotoSansLao-Regular.ttf',
'NotoSansLao-Regular.otf',
],
'NotoSansGeorgian-Regular': [
'NotoSansGeorgian-Regular.ttf',
'NotoSansGeorgian-Regular.otf',
],
'NotoSansArmenian-Regular': [
'NotoSansArmenian-Regular.ttf',
'NotoSansArmenian-Regular.otf',
],
'NotoSansEthiopic-Regular': [
'NotoSansEthiopic-Regular.ttf',
'NotoSansEthiopic-Regular.otf',
],
'NotoSansSinhala-Regular': [
'NotoSansSinhala-Regular.ttf',
'NotoSansSinhala-Regular.otf',
],
'NotoSansGurmukhi-Regular': [
'NotoSansGurmukhi-Regular.ttf',
'NotoSansGurmukhi-Regular.otf',
],
'NotoSansOriya-Regular': [
'NotoSansOriya-Regular.ttf',
'NotoSansOriya-Regular.otf',
],
'NotoSansTibetan-Regular': [
'NotoSansTibetan-Regular.ttf',
'NotoSansTibetan-Regular.otf',
],
}
def __init__(self) -> None:
"""Initialize system font provider with empty caches."""
# Cache: font_name -> FontManager (successfully loaded fonts)
self._font_cache: dict[str, FontManager] = {}
# Negative cache: font names we've searched for but not found
self._not_found: set[str] = set()
# Cached font directories (computed lazily)
self._font_dirs: list[Path] | None = None
def _get_platform(self) -> str:
"""Get the current platform identifier.
Returns:
Platform string: 'linux', 'darwin', 'windows', or 'freebsd'
"""
if sys.platform == 'win32':
return 'windows'
elif sys.platform == 'darwin':
return 'darwin'
elif 'freebsd' in sys.platform:
return 'freebsd'
else:
return 'linux'
def _get_font_dirs(self) -> list[Path]:
"""Get font directories for the current platform.
Returns:
List of paths to search for fonts (may include non-existent paths)
"""
if self._font_dirs is not None:
return self._font_dirs
platform = self._get_platform()
if platform == 'windows':
# Get Windows font directories from environment
windir = os.environ.get('WINDIR', r'C:\Windows')
self._font_dirs = [Path(windir) / 'Fonts']
# User-installed fonts (Windows 10+)
localappdata = os.environ.get('LOCALAPPDATA')
if localappdata:
self._font_dirs.append(
Path(localappdata) / 'Microsoft' / 'Windows' / 'Fonts'
)
else:
self._font_dirs = list(self.SYSTEM_FONT_DIRS.get(platform, []))
return self._font_dirs
def _find_font_file(self, font_name: str) -> Path | None:
"""Search system directories for a font file.
Args:
font_name: Logical font name (e.g., 'NotoSansCJK-Regular')
Returns:
Path to font file if found, None otherwise
"""
if font_name not in self.NOTO_FONT_PATTERNS:
return None
patterns = self.NOTO_FONT_PATTERNS[font_name]
for font_dir in self._get_font_dirs():
if not font_dir.exists():
continue
for pattern in patterns:
# Search recursively for the font file
try:
matches = list(font_dir.rglob(pattern))
if matches:
log.debug(
"Found system font %s at %s", font_name, matches[0]
)
return matches[0]
except PermissionError:
# Skip directories we can't read
continue
return None
def get_font(self, font_name: str) -> FontManager | None:
"""Get a FontManager for the named font (lazy loading).
This method implements lazy scanning: fonts are only searched for
when first requested. Results (both positive and negative) are
cached for subsequent calls.
Args:
font_name: Logical font name (e.g., 'NotoSansCJK-Regular')
Returns:
FontManager if font is found and loadable, None otherwise
"""
# Check positive cache first
if font_name in self._font_cache:
return self._font_cache[font_name]
# Check negative cache (already searched, not found)
if font_name in self._not_found:
return None
# Lazy scan for this specific font
font_path = self._find_font_file(font_name)
if font_path is not None:
try:
fm = FontManager(font_path)
self._font_cache[font_name] = fm
return fm
except Exception as e:
log.warning(
"Found font %s at %s but failed to load: %s",
font_name,
font_path,
e,
)
# Cache negative result
self._not_found.add(font_name)
return None
def get_available_fonts(self) -> list[str]:
"""Get list of font names this provider can potentially find.
Note: This returns all font names we know patterns for, not
necessarily fonts that are actually installed. Use get_font()
to check if a specific font is available.
Returns:
List of logical font names
"""
return list(self.NOTO_FONT_PATTERNS.keys())
def get_fallback_font(self) -> FontManager:
"""Get the glyphless fallback font.
Raises:
NotImplementedError: System provider doesn't provide fallback.
Use BuiltinFontProvider for the fallback font.
"""
raise NotImplementedError(
"SystemFontProvider does not provide a fallback font. "
"Use BuiltinFontProvider for Occulta.ttf fallback."
)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/font/system_font_provider.py",
"license": "Mozilla Public License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/fpdf_renderer/renderer.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""fpdf2-based PDF renderer for OCR text layers.
This module provides PDF rendering using fpdf2 for creating searchable
OCR text layers.
"""
from __future__ import annotations
import logging
from dataclasses import dataclass
from math import atan, cos, degrees, radians, sin, sqrt
from pathlib import Path
from fpdf import FPDF
from fpdf.enums import PDFResourceType, TextMode
from pikepdf import Matrix, Rectangle
from ocrmypdf.font import FontManager, MultiFontManager
from ocrmypdf.models.ocr_element import OcrClass, OcrElement
log = logging.getLogger(__name__)
def transform_point(matrix: Matrix, x: float, y: float) -> tuple[float, float]:
"""Transform a point (x, y) by a matrix.
Args:
matrix: pikepdf Matrix to apply
x: X coordinate
y: Y coordinate
Returns:
Tuple of (transformed_x, transformed_y)
"""
# Use a degenerate rectangle to transform a single point
rect = Rectangle(x, y, x, y)
transformed = matrix.transform(rect)
return (transformed.llx, transformed.lly)
def transform_box(
matrix: Matrix, left: float, top: float, right: float, bottom: float
) -> tuple[float, float, float, float]:
"""Transform a bounding box by a matrix.
Args:
matrix: pikepdf Matrix to apply
left: Left edge of box
top: Top edge of box
right: Right edge of box
bottom: Bottom edge of box
Returns:
Tuple of (llx, lly, width, height) of the transformed box
"""
rect = Rectangle(left, top, right, bottom)
transformed = matrix.transform(rect)
return (
transformed.llx,
transformed.lly,
transformed.width,
transformed.height,
)
@dataclass
class DebugRenderOptions:
"""Options for debug visualization during rendering.
When enabled, draws colored lines/shapes to visualize OCR structure.
"""
render_baseline: bool = False # Magenta lines along baselines
render_line_bbox: bool = False # Blue rectangles around lines
render_word_bbox: bool = False # Green rectangles around words
class CoordinateTransform:
"""Manages coordinate transformations for fpdf2 rendering.
Handles conversion from OCR pixel coordinates (top-left origin) to
PDF points. fpdf2 uses top-left origin like hOCR, so no Y-flip needed.
"""
def __init__(self, dpi: float, page_width_px: float, page_height_px: float):
"""Initialize coordinate transform."""
self.dpi = dpi
self.page_width_px = page_width_px
self.page_height_px = page_height_px
@property
def page_width_pt(self) -> float:
"""Page width in PDF points."""
return self.page_width_px * 72.0 / self.dpi
@property
def page_height_pt(self) -> float:
"""Page height in PDF points."""
return self.page_height_px * 72.0 / self.dpi
def px_to_pt(self, value: float) -> float:
"""Convert pixels to PDF points."""
return value * 72.0 / self.dpi
def bbox_to_pt(self, bbox) -> tuple[float, float, float, float]:
"""Convert BoundingBox from pixels to points."""
return (
self.px_to_pt(bbox.left),
self.px_to_pt(bbox.top),
self.px_to_pt(bbox.right),
self.px_to_pt(bbox.bottom),
)
class Fpdf2PdfRenderer:
"""Renders OcrElement trees to PDF using fpdf2.
This class provides the core rendering logic for converting OCR output
into PDF text layers using fpdf2's text drawing capabilities.
"""
def __init__(
self,
page: OcrElement,
dpi: float,
multi_font_manager: MultiFontManager,
invisible_text: bool = True,
image: Path | None = None,
debug_render_options: DebugRenderOptions | None = None,
):
"""Initialize renderer.
Args:
page: Root OcrElement (must be ocr_page)
dpi: Source image DPI
multi_font_manager: MultiFontManager instance
invisible_text: If True, render text as invisible (text mode 3)
image: Optional path to image to overlay on top of the text layer,
creating a sandwich PDF (text underneath, image on top)
debug_render_options: Options for debug visualization
Raises:
ValueError: If page is not an ocr_page or lacks a bounding box
"""
if page.ocr_class != OcrClass.PAGE:
raise ValueError("Root element must be ocr_page")
if page.bbox is None:
raise ValueError("Page must have bounding box")
self.page = page
self.dpi = dpi
self.multi_font_manager = multi_font_manager
self.invisible_text = invisible_text
self.image = image
self.debug_options = debug_render_options or DebugRenderOptions()
# Setup coordinate transform
self.coord_transform = CoordinateTransform(
dpi=dpi,
page_width_px=page.bbox.width,
page_height_px=page.bbox.height,
)
# Registered fonts: font_path -> fpdf_family_name
self._registered_fonts: dict[str, str] = {}
# Track whether we've already logged the info-level suppression message
self._logged_aspect_ratio_suppression = False
def render(self, output_path: Path) -> None:
"""Render page to PDF file.
Args:
output_path: Output PDF file path
"""
# Create PDF with custom page size
pdf = FPDF(
unit="pt",
format=(
self.coord_transform.page_width_pt,
self.coord_transform.page_height_pt,
),
)
pdf.set_auto_page_break(auto=False)
# Enable text shaping for complex scripts
pdf.set_text_shaping(True)
# Disable cell margin to ensure precise text positioning
# fpdf2's cell() adds c_margin padding by default, which shifts text
pdf.c_margin = 0
# Set text mode for invisible text
if self.invisible_text:
pdf.text_mode = TextMode.INVISIBLE
else:
pdf.text_mode = TextMode.FILL
# Render content to PDF
self.render_to_pdf(pdf)
# Write PDF
pdf.output(str(output_path))
def render_to_pdf(self, pdf: FPDF) -> None:
"""Render page content to an existing FPDF instance.
This method adds a page and renders all content. Used by both
single-page rendering and multi-page rendering.
Args:
pdf: FPDF instance to render into
"""
# Add page with correct dimensions
pdf.add_page(
format=(
self.coord_transform.page_width_pt,
self.coord_transform.page_height_pt,
)
)
# Render all paragraphs
for para in self.page.paragraphs:
self._render_paragraph(pdf, para)
# If no paragraphs, render lines directly
if not self.page.paragraphs:
for line in self.page.lines:
self._render_line(pdf, line)
# Place image on top of text layer (sandwich mode)
if self.image is not None:
pdf.image(
str(self.image),
x=0,
y=0,
w=self.coord_transform.page_width_pt,
h=self.coord_transform.page_height_pt,
)
def _register_font(self, pdf: FPDF, font_manager: FontManager) -> str:
"""Register font with fpdf2 if not already registered.
Args:
pdf: FPDF instance
font_manager: FontManager containing the font
Returns:
Font family name to use with pdf.set_font()
"""
font_path_str = str(font_manager.font_path)
if font_path_str not in self._registered_fonts:
# Use the font filename stem as the family name
family_name = font_manager.font_path.stem
pdf.add_font(family=family_name, fname=font_path_str)
self._registered_fonts[font_path_str] = family_name
return self._registered_fonts[font_path_str]
def _render_paragraph(self, pdf: FPDF, para: OcrElement) -> None:
"""Render a paragraph element.
Args:
pdf: FPDF instance
para: Paragraph OCR element
"""
for line in para.children:
if line.ocr_class in OcrClass.LINE_TYPES:
self._render_line(pdf, line)
def _render_line(self, pdf: FPDF, line: OcrElement) -> None:
"""Render a line element with baseline support.
Strategy (following pikepdf reference implementation):
1. Create a baseline_matrix that transforms from hOCR coordinates to
a coordinate system aligned with the text baseline
2. For each word, transform its hOCR bbox using baseline_matrix.inverse()
to get its position in the baseline coordinate system
3. Render words along the baseline with horizontal scaling
Args:
pdf: FPDF instance
line: Line OCR element
"""
if line.bbox is None:
return
# Validate line bbox
if line.bbox.height <= 0:
log.error(
"line box is invalid so we cannot render it: box=%s text=%s",
line.bbox,
line.text if hasattr(line, 'text') else '',
)
return
# Convert line bbox to PDF points
line_left_pt = self.coord_transform.px_to_pt(line.bbox.left)
line_top_pt = self.coord_transform.px_to_pt(line.bbox.top)
line_right_pt = self.coord_transform.px_to_pt(line.bbox.right)
line_bottom_pt = self.coord_transform.px_to_pt(line.bbox.bottom)
# Note: line_width_pt and line_height_pt not needed since we compute
# dimensions in the un-rotated coordinate system via matrix transform
# Debug rendering: draw line bbox (in page coordinates)
if self.debug_options.render_line_bbox:
self._render_debug_line_bbox(
pdf, line_left_pt, line_top_pt, line_right_pt, line_bottom_pt
)
# Get textangle (rotation of the entire line)
textangle = line.textangle or 0.0
# Read baseline early so we can detect rotation from steep slopes.
# When Tesseract doesn't report textangle for rotated text, the
# rotation gets encoded as a very steep baseline slope instead.
slope = 0.0
intercept_pt = 0.0
has_meaningful_baseline = False
if line.baseline is not None:
slope = line.baseline.slope
intercept_pt = self.coord_transform.px_to_pt(line.baseline.intercept)
if abs(slope) < 0.005:
slope = 0.0
has_meaningful_baseline = True
# Detect text rotation from steep baseline slope.
# A slope magnitude > 1.0 corresponds to > 45° from horizontal,
# which indicates the line is rotated, not merely skewed.
if textangle == 0.0 and abs(slope) > 1.0:
textangle = degrees(atan(slope))
# The original baseline slope and intercept are not meaningful
# after extracting rotation; recalculate intercept from font
# metrics below.
slope = 0.0
has_meaningful_baseline = False
# Build line_size_aabb_matrix: transforms from page coords to un-rotated
# line coords. The hOCR bbox is the minimum axis-aligned bounding box
# enclosing the rotated text.
# Start at top-left corner of line bbox, then rotate by -textangle
line_size_aabb_matrix = (
Matrix()
.translated(line_left_pt, line_top_pt)
.rotated(-textangle) # textangle is counter-clockwise per hOCR spec
)
# Get the line dimensions in the un-rotated coordinate system
# Transform line bbox corners to get the un-rotated dimensions
inv_line_matrix = line_size_aabb_matrix.inverse()
# Transform bottom-right corner to get line dimensions in rotated space
_, _, line_size_width, line_size_height = transform_box(
inv_line_matrix, line_left_pt, line_top_pt, line_right_pt, line_bottom_pt
)
# Get baseline intercept
if not has_meaningful_baseline:
# No baseline provided or baseline was used for rotation detection:
# calculate intercept from font metrics
default_font_manager = self.multi_font_manager.fonts['NotoSans-Regular']
ascent, descent, units_per_em = default_font_manager.get_font_metrics()
ascent_norm = ascent / units_per_em
descent_norm = descent / units_per_em
# Baseline intercept based on font metrics
intercept_pt = (
-abs(descent_norm)
* line_size_height
/ (ascent_norm + abs(descent_norm))
)
slope_angle_deg = degrees(atan(slope)) if slope != 0.0 else 0.0
# Build baseline_matrix: transforms from page coords to baseline coords
# 1. Start with line_size_aabb_matrix (translates to line corner, rotates)
# 2. Translate down to bottom of un-rotated line (line_size_height)
# 3. Apply baseline intercept offset
# 4. Rotate by baseline slope
baseline_matrix = (
line_size_aabb_matrix.translated(
0, line_size_height
) # Move to bottom of line
.translated(0, intercept_pt) # Apply baseline intercept
.rotated(slope_angle_deg) # Rotate by baseline slope
)
# Calculate font size: height from baseline to top of line
font_size = line_size_height + intercept_pt
if font_size < 1.0:
font_size = line_size_height * 0.8
# Total rotation for rendering (textangle + slope)
total_rotation_deg = -textangle + slope_angle_deg
# Debug rendering: draw baseline
if self.debug_options.render_baseline:
# Baseline starts at origin in baseline coords, extends line width
baseline_start = transform_point(baseline_matrix, 0, 0)
baseline_end = transform_point(baseline_matrix, line_size_width, 0)
pdf.set_draw_color(255, 0, 255) # Magenta
pdf.set_line_width(0.75)
pdf.line(
baseline_start[0], baseline_start[1], baseline_end[0], baseline_end[1]
)
# Extract line language for font selection
line_language = line.language
# Get inverse of baseline_matrix for transforming word bboxes
inv_baseline_matrix = baseline_matrix.inverse()
# Collect words to render
words: list[OcrElement | None] = [
w for w in line.children if w.ocr_class == OcrClass.WORD and w.text
]
# Suppress lines where the text aspect ratio is implausible.
# This catches cases where Tesseract failed to detect rotation
# entirely (slope=0, no textangle) and produced garbage text in a
# bounding box whose shape doesn't match the text content at all.
if not self._check_aspect_ratio_plausible(
pdf, words, font_size, slope_angle_deg,
line_size_width, line_size_height, line_language,
):
return
# Collect word rendering data: (text, x_baseline, font_family, word_tz)
word_render_data: list[tuple[str, float, str, float]] = []
for word in words:
if word is None or not word.text or word.bbox is None:
continue
word_left_pt = self.coord_transform.px_to_pt(word.bbox.left)
word_top_pt = self.coord_transform.px_to_pt(word.bbox.top)
word_right_pt = self.coord_transform.px_to_pt(word.bbox.right)
word_bottom_pt = self.coord_transform.px_to_pt(word.bbox.bottom)
word_width_pt = word_right_pt - word_left_pt
# Debug rendering: draw word bbox (in page coordinates)
if self.debug_options.render_word_bbox:
self._render_debug_word_bbox(
pdf, word_left_pt, word_top_pt, word_right_pt, word_bottom_pt
)
# Get x position in baseline coordinate system
box_llx, _, _, _ = transform_box(
inv_baseline_matrix,
word_left_pt,
word_top_pt,
word_right_pt,
word_bottom_pt,
)
# Select font and compute word-only Tz
font_manager = self.multi_font_manager.select_font_for_word(
word.text, line_language
)
font_family = self._register_font(pdf, font_manager)
pdf.set_font(font_family, size=font_size)
natural_width = pdf.get_string_width(word.text)
if natural_width > 0 and word_width_pt > 0:
word_tz = (word_width_pt / natural_width) * 100
else:
word_tz = 100.0
word_render_data.append((word.text, box_llx, font_family, word_tz))
if not word_render_data:
return
# Emit single BT block for the entire line using raw PDF operators.
# This avoids a poppler bug where Tz (horizontal scaling) is not
# carried across BT/ET boundaries, affecting all poppler-based tools
# and viewers (Evince, pdftotext, etc.). By keeping all words in a
# single BT block with relative Td positioning and per-word Tz, we
# ensure correct inter-word spacing.
self._emit_line_bt_block(
pdf,
word_render_data,
baseline_matrix,
font_size,
total_rotation_deg,
)
def _check_aspect_ratio_plausible(
self,
pdf: FPDF,
words: list[OcrElement | None],
font_size: float,
slope_angle_deg: float,
line_size_width: float,
line_size_height: float,
line_language: str | None,
) -> bool:
"""Check whether the line's aspect ratio is plausible for its text.
Compares the aspect ratio of the OCR bounding box to the aspect ratio
the text would have if rendered normally (accounting for baseline
slope). A large mismatch indicates Tesseract misread rotated text
without detecting the rotation.
Returns:
True if plausible (rendering should proceed), False to suppress.
"""
if line_size_width <= 0 or line_size_height <= 0 or font_size <= 0:
return True
# Fast path: most lines are wider than they are tall, which is
# the normal shape for horizontal text. Only tall-narrow boxes
# (height > width) need the expensive font measurement check.
if line_size_width >= line_size_height:
return True
line_text = ' '.join(
w.text for w in words if w is not None and w.text
)
if not line_text:
return True
# Measure the natural rendered width of the line text
font_manager = self.multi_font_manager.select_font_for_word(
line_text, line_language
)
font_family = self._register_font(pdf, font_manager)
pdf.set_font(font_family, size=round(font_size))
natural_width = pdf.get_string_width(line_text)
if natural_width <= 0:
return True
# Compute the AABB the text would occupy considering baseline slope
theta = radians(abs(slope_angle_deg))
expected_w = natural_width * cos(theta) + font_size * sin(theta)
expected_h = natural_width * sin(theta) + font_size * cos(theta)
if expected_h <= 0:
return True
actual_aspect = line_size_width / line_size_height
expected_aspect = expected_w / expected_h
ratio = actual_aspect / expected_aspect
if ratio >= 0.1:
return True
# Implausible aspect ratio — suppress this line
log.debug(
"Suppressing text with improbable aspect ratio: "
"actual=%.3f expected=%.3f ratio=%.4f text=%r",
actual_aspect,
expected_aspect,
ratio,
line_text[:80],
)
if not self._logged_aspect_ratio_suppression:
log.info(
"Suppressing OCR output text with improbable aspect ratio"
)
self._logged_aspect_ratio_suppression = True
return False
def _emit_line_bt_block(
self,
pdf: FPDF,
word_render_data: list[tuple[str, float, str, float]],
baseline_matrix: Matrix,
font_size: float,
total_rotation_deg: float,
) -> None:
"""Emit a single BT block for the entire line using raw PDF operators.
Writes all words in a single BT..ET block with relative Td positioning
and per-word Tz. Each non-last word gets a trailing space appended, with
Tz calculated so the rendered width of "word " spans from the current
word's start to the next word's start. This works around a poppler bug
where Tz is not carried across BT/ET boundaries, which affects all
poppler-based viewers and tools (Evince, pdftotext, etc.).
Args:
pdf: FPDF instance
word_render_data: List of (text, x_baseline, font_family, word_tz)
tuples, one per word on this line
baseline_matrix: Transform from baseline coords to page coords
font_size: Font size in points
total_rotation_deg: Total rotation angle (textangle + slope)
"""
page_height = self.coord_transform.page_height_pt
# Compute baseline direction in PDF coordinates for rotation
has_rotation = abs(total_rotation_deg) > 0.01
bx0, by0_fpdf = transform_point(baseline_matrix, 0, 0)
by0_pdf = page_height - by0_fpdf
ops: list[str] = []
if has_rotation:
# Compute direction vector along the baseline in PDF coordinates
bx1, by1_fpdf = transform_point(baseline_matrix, 100, 0)
by1_pdf = page_height - by1_fpdf
dx = bx1 - bx0
dy = by1_pdf - by0_pdf
length = sqrt(dx * dx + dy * dy)
if length > 0:
cos_a = dx / length
sin_a = dy / length
else:
cos_a = 1.0
sin_a = 0.0
# Save graphics state, apply rotation+translation via cm.
# The cm maps local coordinates (baseline-aligned, x along text)
# to PDF page coordinates.
ops.append('q')
ops.append(
f'{cos_a:.6f} {sin_a:.6f} {-sin_a:.6f} {cos_a:.6f} '
f'{bx0:.2f} {by0_pdf:.2f} cm'
)
# Begin text object
ops.append('BT')
# Text render mode: 3 = invisible, 0 = fill
tr = 3 if self.invisible_text else 0
ops.append(f'{tr} Tr')
# Initial text position
first_x_baseline = word_render_data[0][1]
if has_rotation:
# In the cm-transformed space, origin is at the baseline start
ops.append(f'{first_x_baseline:.2f} 0 Td')
else:
# Direct PDF coordinates
page_x, page_y_fpdf = transform_point(
baseline_matrix, first_x_baseline, 0
)
page_y_pdf = page_height - page_y_fpdf
ops.append(f'{page_x:.2f} {page_y_pdf:.2f} Td')
prev_font_family: str | None = None
prev_x_baseline = first_x_baseline
for i, (text, x_baseline, font_family, word_tz) in enumerate(
word_render_data
):
is_last = i == len(word_render_data) - 1
# Set font if changed
if font_family != prev_font_family:
pdf.set_font(font_family, size=font_size)
# Register font resource on this page
pdf._resource_catalog.add(
PDFResourceType.FONT, pdf.current_font.i, pdf.page
)
ops.append(
f'/F{pdf.current_font.i} {pdf.font_size_pt:.2f} Tf'
)
prev_font_family = font_family
# Relative positioning (for words after the first)
if i > 0:
if has_rotation:
# In rotated space, advance is purely along x-axis
dx_baseline = x_baseline - prev_x_baseline
ops.append(f'{dx_baseline:.2f} 0 Td')
else:
# Non-rotated: compute delta in PDF coordinates
px_prev, py_prev_f = transform_point(
baseline_matrix, prev_x_baseline, 0
)
px_curr, py_curr_f = transform_point(
baseline_matrix, x_baseline, 0
)
dx_pdf = px_curr - px_prev
# Flip y delta for PDF coordinates (y-up)
dy_pdf = -(py_curr_f - py_prev_f)
ops.append(f'{dx_pdf:.2f} {dy_pdf:.2f} Td')
# Determine text to render and compute Tz
if not is_last:
next_text, next_x_baseline, _, _ = word_render_data[i + 1]
advance = next_x_baseline - x_baseline
# Add trailing space unless both words are CJK-only
if (
advance > 0
and not (
self._is_cjk_only(text)
and self._is_cjk_only(next_text)
)
):
text_to_render = text + ' '
natural_w = pdf.get_string_width(text_to_render)
render_tz = (
(advance / natural_w) * 100
if natural_w > 0
else word_tz
)
else:
text_to_render = text
render_tz = word_tz
else:
text_to_render = text
render_tz = word_tz
ops.append(f'{render_tz:.2f} Tz')
ops.append(self._encode_shaped_text(pdf, text_to_render))
prev_x_baseline = x_baseline
# End text object
ops.append('ET')
if has_rotation:
ops.append('Q')
pdf._out('\n'.join(ops))
# Reset fpdf2's internal stretching tracking so subsequent API calls
# don't think Tz is still set from our raw operators
pdf.font_stretching = 100
def _encode_shaped_text(self, pdf: FPDF, text: str) -> str:
"""Encode text using HarfBuzz text shaping for complex script support.
Unlike font.encode_text() which maps unicode characters one-by-one to
glyph IDs, this uses HarfBuzz to handle BiDi reordering, Arabic joining
forms, Devanagari conjuncts, and other complex script shaping. Falls
back to encode_text() when text shaping is not enabled.
"""
font = pdf.current_font
if pdf.text_shaping and pdf.text_shaping.get("use_shaping_engine"):
shaped = font.shape_text(text, pdf.font_size_pt, pdf.text_shaping)
if shaped:
mapped = "".join(
chr(ti["mapped_char"])
for ti in shaped
if ti["mapped_char"] is not None
)
if mapped:
return f"({font.escape_text(mapped)}) Tj"
return font.encode_text(text)
def _is_cjk_only(self, text: str) -> bool:
"""Check if text contains only CJK characters.
CJK scripts don't use spaces between words, so we should not insert
spaces between adjacent CJK words.
Args:
text: Text to check
Returns:
True if text contains only CJK characters
"""
for char in text:
cp = ord(char)
# Check if character is in CJK ranges
if not (
0x4E00 <= cp <= 0x9FFF # CJK Unified Ideographs
or 0x3400 <= cp <= 0x4DBF # CJK Extension A
or 0x20000 <= cp <= 0x2A6DF # CJK Extension B
or 0x2A700 <= cp <= 0x2B73F # CJK Extension C
or 0x2B740 <= cp <= 0x2B81F # CJK Extension D
or 0x2B820 <= cp <= 0x2CEAF # CJK Extension E
or 0x2CEB0 <= cp <= 0x2EBEF # CJK Extension F
or 0x30000 <= cp <= 0x3134F # CJK Extension G
or 0x3040 <= cp <= 0x309F # Hiragana
or 0x30A0 <= cp <= 0x30FF # Katakana
or 0x31F0 <= cp <= 0x31FF # Katakana Phonetic Extensions
or 0xAC00 <= cp <= 0xD7AF # Hangul Syllables
or 0x1100 <= cp <= 0x11FF # Hangul Jamo
or 0x3130 <= cp <= 0x318F # Hangul Compatibility Jamo
or 0xA960 <= cp <= 0xA97F # Hangul Jamo Extended-A
or 0xD7B0 <= cp <= 0xD7FF # Hangul Jamo Extended-B
or 0x3000 <= cp <= 0x303F # CJK Symbols and Punctuation
or 0xFF00 <= cp <= 0xFFEF # Halfwidth and Fullwidth Forms
):
return False
return True
def _render_debug_line_bbox(
self,
pdf: FPDF,
left: float,
top: float,
right: float,
bottom: float,
) -> None:
"""Draw a blue box around the line bbox."""
pdf.set_draw_color(0, 0, 255) # Blue
pdf.set_line_width(0.5)
pdf.rect(left, top, right - left, bottom - top)
def _render_debug_baseline(
self,
pdf: FPDF,
x: float,
y: float,
width: float,
rotation_deg: float,
) -> None:
"""Draw a magenta line along the baseline."""
pdf.set_draw_color(255, 0, 255) # Magenta
pdf.set_line_width(0.75)
if abs(rotation_deg) > 0.1:
with pdf.rotation(rotation_deg, x=x, y=y):
pdf.line(x, y, x + width, y)
else:
pdf.line(x, y, x + width, y)
def _render_debug_word_bbox(
self,
pdf: FPDF,
left: float,
top: float,
right: float,
bottom: float,
) -> None:
"""Draw a green box around the word bbox."""
pdf.set_draw_color(0, 255, 0) # Green
pdf.set_line_width(0.3)
pdf.rect(left, top, right - left, bottom - top)
class Fpdf2MultiPageRenderer:
"""Renders multiple OcrElement pages into a single PDF.
This class handles multi-page documents by delegating to Fpdf2PdfRenderer
for each page while sharing a single FPDF instance and font registration.
"""
def __init__(
self,
pages_data: list[tuple[int, OcrElement, float]],
multi_font_manager: MultiFontManager,
invisible_text: bool = True,
debug_render_options: DebugRenderOptions | None = None,
):
"""Initialize multi-page renderer.
Args:
pages_data: List of (pageno, ocr_tree, dpi) tuples
multi_font_manager: Shared multi-font manager for all pages
invisible_text: Whether to render invisible text
debug_render_options: Options for debug visualization
"""
self.pages_data = pages_data
self.multi_font_manager = multi_font_manager
self.invisible_text = invisible_text
self.debug_options = debug_render_options or DebugRenderOptions()
def render(self, output_path: Path) -> None:
"""Render all pages to a single multi-page PDF.
Args:
output_path: Output PDF file path
"""
if not self.pages_data:
raise ValueError("No pages to render")
# Create PDF (page size will be set per-page)
pdf = FPDF(unit="pt")
pdf.set_auto_page_break(auto=False)
pdf.set_text_shaping(True)
# Disable cell margin to ensure precise text positioning
# fpdf2's cell() adds c_margin padding by default, which shifts text
pdf.c_margin = 0
# Set text mode for invisible text
if self.invisible_text:
pdf.text_mode = TextMode.INVISIBLE
else:
pdf.text_mode = TextMode.FILL
# Shared font registration across all pages
shared_registered_fonts: dict[str, str] = {}
# Render each page using Fpdf2PdfRenderer
for _pageno, page, dpi in self.pages_data:
if page.bbox is None:
continue
# Create a renderer for this page
page_renderer = Fpdf2PdfRenderer(
page=page,
dpi=dpi,
multi_font_manager=self.multi_font_manager,
invisible_text=self.invisible_text,
debug_render_options=self.debug_options,
)
# Share font registration to avoid re-registering fonts
page_renderer._registered_fonts = shared_registered_fonts
# Render page content to the shared PDF
page_renderer.render_to_pdf(pdf)
# Write PDF
pdf.output(str(output_path))
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/fpdf_renderer/renderer.py",
"license": "Mozilla Public License 2.0",
"lines": 758,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/hocrtransform/hocr_parser.py | # SPDX-FileCopyrightText: 2010 Jonathan Brinley
# SPDX-FileCopyrightText: 2013-2014 Julien Pfefferkorn
# SPDX-FileCopyrightText: 2023-2025 James R. Barlow
# SPDX-License-Identifier: MIT
"""Parser for hOCR format files.
This module provides functionality to parse hOCR files (HTML-based OCR format)
and convert them to the engine-agnostic OcrElement tree structure.
For details of the hOCR format, see:
http://kba.github.io/hocr-spec/1.2/
"""
from __future__ import annotations
import logging
import os
import re
import unicodedata
from pathlib import Path
from typing import Literal, cast
from xml.etree import ElementTree as ET
from ocrmypdf.models.ocr_element import (
Baseline,
BoundingBox,
FontInfo,
OcrClass,
OcrElement,
)
TextDirection = Literal["ltr", "rtl"]
log = logging.getLogger(__name__)
Element = ET.Element
class HocrParseError(Exception):
"""Error while parsing hOCR file."""
class HocrParser:
"""Parser for hOCR format files.
Converts hOCR XML/HTML files into OcrElement trees.
The hOCR format uses HTML with special class attributes (ocr_page, ocr_line,
ocrx_word, etc.) and a title attribute containing properties like bbox,
baseline, and confidence scores.
"""
# Regex patterns for parsing hOCR title attributes
_bbox_pattern = re.compile(
r'''
bbox \s+
(\d+) \s+ # left: uint
(\d+) \s+ # top: uint
(\d+) \s+ # right: uint
(\d+) # bottom: uint
''',
re.VERBOSE,
)
_baseline_pattern = re.compile(
r'''
baseline \s+
([\-\+]?\d*\.?\d*) \s+ # slope: +/- decimal float
([\-\+]?\d+) # intercept: +/- int
''',
re.VERBOSE,
)
_textangle_pattern = re.compile(
r'''
textangle \s+
([\-\+]?\d*\.?\d*) # angle: +/- decimal float
''',
re.VERBOSE,
)
_x_wconf_pattern = re.compile(
r'''
x_wconf \s+
(\d+) # confidence: uint (0-100)
''',
re.VERBOSE,
)
_x_fsize_pattern = re.compile(
r'''
x_fsize \s+
(\d*\.?\d+) # font size: float
''',
re.VERBOSE,
)
_x_font_pattern = re.compile(
r'''
x_font \s+
([^\s;]+) # font name: non-whitespace, non-semicolon string
''',
re.VERBOSE,
)
_ppageno_pattern = re.compile(
r'''
ppageno \s+
(\d+) # page number: uint
''',
re.VERBOSE,
)
_scan_res_pattern = re.compile(
r'''
scan_res \s+
(\d+) \s+ # x resolution
(\d+) # y resolution
''',
re.VERBOSE,
)
def __init__(self, hocr_file: str | Path):
"""Initialize the parser with an hOCR file.
Args:
hocr_file: Path to the hOCR file to parse
Raises:
HocrParseError: If the file cannot be parsed
"""
self._hocr_path = Path(hocr_file)
try:
self._tree = ET.parse(os.fspath(hocr_file))
except ET.ParseError as e:
raise HocrParseError(f"Failed to parse hOCR file: {e}") from e
# Detect XML namespace
root_tag = self._tree.getroot().tag
matches = re.match(r'({.*})html', root_tag)
self._xmlns = matches.group(1) if matches else ''
def parse(self) -> OcrElement:
"""Parse the hOCR file and return an OcrElement tree.
Returns:
The root OcrElement (ocr_page) containing the document structure
Raises:
HocrParseError: If no ocr_page element is found
"""
# Find the first ocr_page element
page_div = self._tree.find(self._xpath('div', 'ocr_page'))
if page_div is None:
raise HocrParseError("No ocr_page element found in hOCR file")
return self._parse_page(page_div)
def _xpath(self, html_tag: str, html_class: str | None = None) -> str:
"""Build an XPath expression for finding elements.
Args:
html_tag: HTML tag name (e.g., 'div', 'span', 'p')
html_class: Optional class attribute to match
Returns:
XPath expression string
"""
xpath = f".//{self._xmlns}{html_tag}"
if html_class:
xpath += f"[@class='{html_class}']"
return xpath
def _parse_page(self, page_elem: Element) -> OcrElement:
"""Parse an ocr_page element.
Args:
page_elem: The XML element with class="ocr_page"
Returns:
OcrElement representing the page
"""
title = page_elem.attrib.get('title', '')
bbox = self._parse_bbox(title)
if bbox is None:
raise HocrParseError("ocr_page missing bbox")
# Parse page-level properties
page_number = self._parse_ppageno(title)
dpi = self._parse_scan_res(title)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=bbox,
page_number=page_number,
dpi=dpi,
)
# Parse child paragraphs
for par_elem in page_elem.iterfind(self._xpath('p', 'ocr_par')):
paragraph = self._parse_paragraph(par_elem)
if paragraph is not None:
page.children.append(paragraph)
# If no paragraphs found, check for words directly under page
# (some Tesseract output structures)
if not page.children:
for word_elem in page_elem.iterfind(self._xpath('span', 'ocrx_word')):
word = self._parse_word(word_elem)
if word is not None:
page.children.append(word)
return page
def _parse_paragraph(self, par_elem: Element) -> OcrElement | None:
"""Parse an ocr_par element.
Args:
par_elem: The XML element with class="ocr_par"
Returns:
OcrElement representing the paragraph, or None if empty
"""
title = par_elem.attrib.get('title', '')
bbox = self._parse_bbox(title)
# Get direction and language from attributes
dir_attr = par_elem.attrib.get('dir')
direction: TextDirection | None = (
cast(TextDirection, dir_attr) if dir_attr in ('ltr', 'rtl') else None
)
language = par_elem.attrib.get('lang')
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=bbox,
direction=direction,
language=language,
)
# Parse child lines
line_classes = {
'ocr_line',
'ocr_header',
'ocr_footer',
'ocr_caption',
'ocr_textfloat',
}
for span_elem in par_elem.iterfind(self._xpath('span')):
elem_class = span_elem.attrib.get('class', '')
if elem_class in line_classes:
line = self._parse_line(span_elem, elem_class, direction, language)
if line is not None:
paragraph.children.append(line)
# Return None if paragraph is empty
if not paragraph.children:
return None
return paragraph
def _parse_line(
self,
line_elem: Element,
ocr_class: str,
parent_direction: TextDirection | None,
parent_language: str | None,
) -> OcrElement | None:
"""Parse a line element (ocr_line, ocr_header, etc.).
Args:
line_elem: The XML element representing the line
ocr_class: The hOCR class of the line
parent_direction: Text direction inherited from parent
parent_language: Language inherited from parent
Returns:
OcrElement representing the line, or None if empty
"""
title = line_elem.attrib.get('title', '')
bbox = self._parse_bbox(title)
if bbox is None:
return None
baseline = self._parse_baseline(title)
textangle = self._parse_textangle(title)
# Inherit direction and language from parent if not specified
dir_attr = line_elem.attrib.get('dir')
if dir_attr in ('ltr', 'rtl'):
direction: TextDirection | None = cast(TextDirection, dir_attr)
else:
direction = parent_direction
language = line_elem.attrib.get('lang') or parent_language
line = OcrElement(
ocr_class=ocr_class,
bbox=bbox,
baseline=baseline,
textangle=textangle,
direction=direction,
language=language,
)
# Parse child words
for word_elem in line_elem.iterfind(self._xpath('span', 'ocrx_word')):
word = self._parse_word(word_elem)
if word is not None:
line.children.append(word)
# Return None if line has no words
if not line.children:
return None
return line
def _parse_word(self, word_elem: Element) -> OcrElement | None:
"""Parse an ocrx_word element.
Args:
word_elem: The XML element with class="ocrx_word"
Returns:
OcrElement representing the word, or None if empty
"""
title = word_elem.attrib.get('title', '')
bbox = self._parse_bbox(title)
# Get the text content
text = self._get_element_text(word_elem)
text = self._normalize_text(text)
if not text:
return None
# Parse confidence (x_wconf is 0-100, convert to 0.0-1.0)
confidence = self._parse_x_wconf(title)
if confidence is not None:
confidence = confidence / 100.0
# Parse font info
font = self._parse_font_info(title)
return OcrElement(
ocr_class=OcrClass.WORD,
bbox=bbox,
text=text,
confidence=confidence,
font=font,
)
def _get_element_text(self, element: Element) -> str:
"""Get the full text content of an element including children.
Args:
element: XML element
Returns:
Combined text content
"""
text = element.text if element.text is not None else ''
for child in element:
text += self._get_element_text(child)
text += element.tail if element.tail is not None else ''
return text
@staticmethod
def _normalize_text(text: str) -> str:
"""Normalize text using NFKC normalization.
This splits ligatures and combines diacritics.
Args:
text: Raw text
Returns:
Normalized text, stripped of leading/trailing whitespace
"""
return unicodedata.normalize("NFKC", text).strip()
def _parse_bbox(self, title: str) -> BoundingBox | None:
"""Parse a bbox from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
BoundingBox or None if not found
"""
match = self._bbox_pattern.search(title)
if not match:
return None
try:
return BoundingBox(
left=float(match.group(1)),
top=float(match.group(2)),
right=float(match.group(3)),
bottom=float(match.group(4)),
)
except ValueError:
return None
def _parse_baseline(self, title: str) -> Baseline | None:
"""Parse baseline from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
Baseline or None if not found
"""
match = self._baseline_pattern.search(title)
if not match:
return None
try:
return Baseline(
slope=float(match.group(1)) if match.group(1) else 0.0,
intercept=float(match.group(2)),
)
except ValueError:
return None
def _parse_textangle(self, title: str) -> float | None:
"""Parse textangle from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
Angle in degrees or None if not found
"""
match = self._textangle_pattern.search(title)
if not match:
return None
try:
return float(match.group(1))
except ValueError:
return None
def _parse_x_wconf(self, title: str) -> float | None:
"""Parse word confidence from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
Confidence (0-100) or None if not found
"""
match = self._x_wconf_pattern.search(title)
if not match:
return None
try:
return float(match.group(1))
except ValueError:
return None
def _parse_ppageno(self, title: str) -> int | None:
"""Parse physical page number from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
Page number or None if not found
"""
match = self._ppageno_pattern.search(title)
if not match:
return None
try:
return int(match.group(1))
except ValueError:
return None
def _parse_scan_res(self, title: str) -> float | None:
"""Parse scan resolution (DPI) from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
DPI (using first value if x and y differ) or None if not found
"""
match = self._scan_res_pattern.search(title)
if not match:
return None
try:
# Use the first (x) resolution value
return float(match.group(1))
except ValueError:
return None
def _parse_font_info(self, title: str) -> FontInfo | None:
"""Parse font information from an hOCR title attribute.
Args:
title: The title attribute value
Returns:
FontInfo or None if no font info found
"""
font_match = self._x_font_pattern.search(title)
size_match = self._x_fsize_pattern.search(title)
if not font_match and not size_match:
return None
return FontInfo(
name=font_match.group(1) if font_match else None,
size=float(size_match.group(1)) if size_match else None,
)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/hocrtransform/hocr_parser.py",
"license": "Mozilla Public License 2.0",
"lines": 414,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/pdfinfo/_contentstream.py | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""PDF content stream interpretation."""
from __future__ import annotations
import re
from collections import defaultdict
from collections.abc import Mapping
from math import hypot, inf, isclose
from typing import NamedTuple
from warnings import warn
from pikepdf import Matrix, Object, PdfInlineImage, parse_content_stream
from ocrmypdf.exceptions import InputFileError
from ocrmypdf.helpers import Resolution
from ocrmypdf.pdfinfo._types import UNIT_SQUARE
class XobjectSettings(NamedTuple):
"""Info about an XObject found in a PDF."""
name: str
shorthand: tuple[float, float, float, float, float, float]
stack_depth: int
class InlineSettings(NamedTuple):
"""Info about an inline image found in a PDF."""
iimage: PdfInlineImage
shorthand: tuple[float, float, float, float, float, float]
stack_depth: int
class ContentsInfo(NamedTuple):
"""Info about various objects found in a PDF."""
xobject_settings: list[XobjectSettings]
inline_images: list[InlineSettings]
found_vector: bool
found_text: bool
name_index: Mapping[str, list[XobjectSettings]]
class TextboxInfo(NamedTuple):
"""Info about a text box found in a PDF."""
bbox: tuple[float, float, float, float]
is_visible: bool
is_corrupt: bool
class VectorMarker:
"""Sentinel indicating vector drawing operations were found on a page."""
class TextMarker:
"""Sentinel indicating text drawing operations were found on a page."""
def _is_unit_square(shorthand):
"""Check if the shorthand represents a unit square transformation."""
values = map(float, shorthand)
pairwise = zip(values, UNIT_SQUARE, strict=False)
return all(isclose(a, b, rel_tol=1e-3) for a, b in pairwise)
def _normalize_stack(graphobjs):
"""Convert runs of qQ's in the stack into single graphobjs."""
for operands, operator in graphobjs:
operator = str(operator)
if re.match(r'Q*q+$', operator): # Zero or more Q, one or more q
for char in operator: # Split into individual
yield ([], char) # Yield individual
else:
yield (operands, operator)
def _interpret_contents(contentstream: Object, initial_shorthand=UNIT_SQUARE):
"""Interpret the PDF content stream.
The stack represents the state of the PDF graphics stack. We are only
interested in the current transformation matrix (CTM) so we only track
this object; a full implementation would need to track many other items.
The CTM is initialized to the mapping from user space to device space.
PDF units are 1/72". In a PDF viewer or printer this matrix is initialized
to the transformation to device space. For example if set to
(1/72, 0, 0, 1/72, 0, 0) then all units would be calculated in inches.
Images are always considered to be (0, 0) -> (1, 1). Before drawing an
image there should be a 'cm' that sets up an image coordinate system
where drawing from (0, 0) -> (1, 1) will draw on the desired area of the
page.
PDF units suit our needs so we initialize ctm to the identity matrix.
According to the PDF specification, the maximum stack depth is 32. Other
viewers tolerate some amount beyond this. We issue a warning if the
stack depth exceeds the spec limit and set a hard limit beyond this to
bound our memory requirements. If the stack underflows behavior is
undefined in the spec, but we just pretend nothing happened and leave the
CTM unchanged.
"""
stack = []
ctm = Matrix(initial_shorthand)
xobject_settings: list[XobjectSettings] = []
inline_images: list[InlineSettings] = []
name_index = defaultdict(lambda: [])
found_vector = False
found_text = False
vector_ops = set('S s f F f* B B* b b*'.split())
text_showing_ops = set("""TJ Tj " '""".split())
image_ops = set('BI ID EI q Q Do cm'.split())
operator_whitelist = ' '.join(vector_ops | text_showing_ops | image_ops)
for n, graphobj in enumerate(
_normalize_stack(parse_content_stream(contentstream, operator_whitelist))
):
operands, operator = graphobj
if operator == 'q':
stack.append(ctm)
if len(stack) > 32: # See docstring
if len(stack) > 128:
raise RuntimeError(
f"PDF graphics stack overflowed hard limit at operator {n}"
)
warn("PDF graphics stack overflowed spec limit")
elif operator == 'Q':
try:
ctm = stack.pop()
except IndexError:
# Keeping the ctm the same seems to be the only sensible thing
# to do. Just pretend nothing happened, keep calm and carry on.
warn("PDF graphics stack underflowed - PDF may be malformed")
elif operator == 'cm':
try:
ctm = Matrix(operands) @ ctm
except ValueError as e:
raise InputFileError(
"PDF content stream is corrupt - this PDF is malformed. "
"Use a PDF editor that is capable of visually inspecting the PDF."
) from e
elif operator == 'Do':
image_name = operands[0]
settings = XobjectSettings(
name=image_name, shorthand=ctm.shorthand, stack_depth=len(stack)
)
xobject_settings.append(settings)
name_index[str(image_name)].append(settings)
elif operator == 'INLINE IMAGE': # BI/ID/EI are grouped into this
iimage = operands[0]
inline = InlineSettings(
iimage=iimage, shorthand=ctm.shorthand, stack_depth=len(stack)
)
inline_images.append(inline)
elif operator in vector_ops:
found_vector = True
elif operator in text_showing_ops:
found_text = True
return ContentsInfo(
xobject_settings=xobject_settings,
inline_images=inline_images,
found_vector=found_vector,
found_text=found_text,
name_index=name_index,
)
def _get_dpi(ctm_shorthand, image_size) -> Resolution:
"""Given the transformation matrix and image size, find the image DPI.
PDFs do not include image resolution information within image data.
Instead, the PDF page content stream describes the location where the
image will be rasterized, and the effective resolution is the ratio of the
pixel size to raster target size.
Normally a scanned PDF has the paper size set appropriately but this is
not guaranteed. The most common case is a cropped image will change the
page size (/CropBox) without altering the page content stream. That means
it is not sufficient to assume that the image fills the page, even though
that is the most common case.
A PDF image may be scaled (always), cropped, translated, rotated in place
to an arbitrary angle (rarely) and skewed. Only equal area mappings can
be expressed, that is, it is not necessary to consider distortions where
the effective DPI varies with position.
To determine the image scale, transform an offset axis vector v0 (0, 0),
width-axis vector v0 (1, 0), height-axis vector vh (0, 1) with the matrix,
which gives the dimensions of the image in PDF units. From there we can
compare to actual image dimensions. PDF uses
row vector * matrix_transposed unlike the traditional
matrix * column vector.
The offset, width and height vectors can be combined in a matrix and
multiplied by the transform matrix. Then we want to calculated
magnitude(width_vector - offset_vector)
and
magnitude(height_vector - offset_vector)
When the above is worked out algebraically, the effect of translation
cancels out, and the vector magnitudes become functions of the nonzero
transformation matrix indices. The results of the derivation are used
in this code.
pdfimages -list does calculate the DPI in some way that is not completely
naive, but it does not get the DPI of rotated images right, so cannot be
used anymore to validate this. Photoshop works, or using Acrobat to
rotate the image back to normal.
It does not matter if the image is partially cropped, or even out of the
/MediaBox.
"""
a, b, c, d, _, _ = ctm_shorthand # pylint: disable=invalid-name
# Calculate the width and height of the image in PDF units
image_drawn = hypot(a, b), hypot(c, d)
def calc(drawn, pixels, inches_per_pt=72.0):
# The scale of the image is pixels per unit of default user space (1/72")
scale = pixels / drawn if drawn != 0 else inf
dpi = scale * inches_per_pt
return dpi
dpi_w, dpi_h = (calc(image_drawn[n], image_size[n]) for n in range(2))
return Resolution(dpi_w, dpi_h)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/pdfinfo/_contentstream.py",
"license": "Mozilla Public License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/pdfinfo/_image.py | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""PDF image analysis."""
from __future__ import annotations
import logging
from collections.abc import Iterator
from decimal import Decimal
from pikepdf import (
Dictionary,
Matrix,
Name,
Object,
Pdf,
PdfImage,
PdfInlineImage,
Stream,
UnsupportedImageTypeError,
)
from ocrmypdf.helpers import Resolution
from ocrmypdf.pdfinfo._contentstream import (
ContentsInfo,
TextMarker,
VectorMarker,
_get_dpi,
_interpret_contents,
_is_unit_square,
)
from ocrmypdf.pdfinfo._types import (
FRIENDLY_COLORSPACE,
FRIENDLY_COMP,
FRIENDLY_ENCODING,
UNIT_SQUARE,
Colorspace,
Encoding,
)
logger = logging.getLogger()
class ImageInfo:
"""Information about an image found in a PDF.
This gathers information from pikepdf and pdfminer.six, and is pickle-able
so that it can be passed to a worker process, unlike objects from those
libraries.
"""
DPI_PREC = Decimal('1.000')
_comp: int | None
_name: str
def __init__(
self,
*,
name='',
pdfimage: Object | None = None,
inline: PdfInlineImage | None = None,
shorthand=None,
):
"""Initialize an ImageInfo."""
self._name = str(name)
self._shorthand = shorthand
pim: PdfInlineImage | PdfImage
if inline is not None:
self._origin = 'inline'
pim = inline
elif pdfimage is not None and isinstance(pdfimage, Stream):
self._origin = 'xobject'
pim = PdfImage(pdfimage)
else:
raise ValueError("Either pdfimage or inline must be set")
self._width = pim.width
self._height = pim.height
if (smask := pim.obj.get(Name.SMask, None)) is not None and isinstance(
smask, Stream | Dictionary
):
# SMask is pretty much an alpha channel, but in PDF it's possible
# for channel to have different dimensions than the image
# itself. Some PDF writers use this to create a grayscale stencil
# mask. For our purposes, the effective size is the size of the
# larger component (image or smask).
self._width = max(smask.get(Name.Width, 0), self._width)
self._height = max(smask.get(Name.Height, 0), self._height)
if (mask := pim.obj.get(Name.Mask, None)) is not None and isinstance(
mask, Stream | Dictionary
):
# If the image has a /Mask entry, it has an explicit mask.
# /Mask can be a Stream or an Array. If it's a Stream,
# use its /Width and /Height if they are larger than the main
# image's.
self._width = max(mask.get(Name.Width, 0), self._width)
self._height = max(mask.get(Name.Height, 0), self._height)
# If /ImageMask is true, then this image is a stencil mask
# (Images that draw with this stencil mask will have a reference to
# it in their /Mask, but we don't actually need that information)
if pim.image_mask:
self._type = 'stencil'
else:
self._type = 'image'
self._bpc = int(pim.bits_per_component)
if (
len(pim.filters) == 2
and pim.filters[0] == '/FlateDecode'
and pim.filters[1] == '/DCTDecode'
):
# Special case: FlateDecode followed by DCTDecode
self._enc = Encoding.flate_jpeg
else:
try:
self._enc = FRIENDLY_ENCODING.get(pim.filters[0])
except IndexError:
self._enc = None
try:
self._color = FRIENDLY_COLORSPACE.get(pim.colorspace or '')
except NotImplementedError:
self._color = None
if self._enc == Encoding.jpeg2000:
self._color = Colorspace.jpeg2000
self._comp = None
if self._color == Colorspace.icc and isinstance(pim, PdfImage):
self._comp = self._init_icc(pim)
else:
if isinstance(self._color, Colorspace):
self._comp = FRIENDLY_COMP.get(self._color)
# Bit of a hack... infer grayscale if component count is uncertain
# but encoding only supports monochrome.
if self._comp is None and self._enc in (Encoding.ccitt, Encoding.jbig2):
self._comp = FRIENDLY_COMP[Colorspace.gray]
def _init_icc(self, pim: PdfImage):
try:
icc = pim.icc
except UnsupportedImageTypeError as e:
logger.warning(
f"An image with a corrupt or unreadable ICC profile was found. "
f"Output PDF may not match the input PDF visually: {e}. {self}"
)
return None
# Check the ICC profile to determine actual colorspace
if icc is None or not hasattr(icc, 'profile'):
logger.warning(
f"An image with an ICC profile but no ICC profile data was found. "
f"The output PDF may not match the input PDF visually. {self}"
)
return None
try:
if icc.profile.xcolor_space == 'GRAY':
return 1
elif icc.profile.xcolor_space == 'CMYK':
return 4
else:
return 3
except AttributeError:
return None
@property
def name(self):
"""Name of the image as it appears in the PDF."""
return self._name
@property
def type_(self):
"""Type of image, either 'image' or 'stencil'."""
return self._type
@property
def width(self) -> int:
"""Width of the image in pixels."""
return self._width
@property
def height(self) -> int:
"""Height of the image in pixels."""
return self._height
@property
def bpc(self):
"""Bits per component."""
return self._bpc
@property
def color(self):
"""Colorspace of the image."""
return self._color if self._color is not None else '?'
@property
def comp(self):
"""Number of components/channels in the image."""
return self._comp if self._comp is not None else '?'
@property
def enc(self):
"""Encoding of the image."""
return self._enc if self._enc is not None else 'image'
@property
def renderable(self) -> bool:
"""Whether the image is renderable.
Some PDFs in the wild have invalid images that are not renderable,
due to unusual dimensions.
Stencil masks are not also not renderable, since they are not
drawn, but rather they control how rendering happens.
"""
return (
self.dpi.is_finite
and self.width >= 0
and self.height >= 0
and self.type_ != 'stencil'
)
@property
def dpi(self) -> Resolution:
"""Dots per inch of the image.
Calculated based on where and how the image is drawn in the PDF.
"""
return _get_dpi(self._shorthand, (self._width, self._height))
@property
def printed_area(self) -> float:
"""Physical area of the image in square inches."""
if not self.renderable:
return 0.0
return float((self.width / self.dpi.x) * (self.height / self.dpi.y))
def __repr__(self):
"""Return a string representation of the image."""
return (
f"<ImageInfo '{self.name}' {self.type_} {self.width}×{self.height} "
f"{self.color} {self.comp} {self.bpc} {self.enc} {self.dpi}>"
)
def _find_inline_images(contentsinfo: ContentsInfo) -> Iterator[ImageInfo]:
"""Find inline images in the contentstream."""
for n, inline in enumerate(contentsinfo.inline_images):
yield ImageInfo(
name=f'inline-{n:02d}', shorthand=inline.shorthand, inline=inline.iimage
)
def _image_xobjects(container) -> Iterator[tuple[Object, str]]:
"""Search for all XObject-based images in the container.
Usually the container is a page, but it could also be a Form XObject
that contains images. Filter out the Form XObjects which are dealt with
elsewhere.
Generate a sequence of tuples (image, xobj container), where container,
where xobj is the name of the object and image is the object itself,
since the object does not know its own name.
"""
if Name.Resources not in container:
return
resources = container[Name.Resources]
if Name.XObject not in resources:
return
for key, candidate in resources[Name.XObject].items():
if candidate is None or Name.Subtype not in candidate:
continue
if candidate[Name.Subtype] == Name.Image:
pdfimage = candidate
yield (pdfimage, key)
def _find_regular_images(
container: Object, contentsinfo: ContentsInfo
) -> Iterator[ImageInfo]:
"""Find images stored in the container's /Resources /XObject.
Usually the container is a page, but it could also be a Form XObject
that contains images.
Generates images with their DPI at time of drawing.
"""
for pdfimage, xobj in _image_xobjects(container):
if xobj not in contentsinfo.name_index:
continue
for draw in contentsinfo.name_index[xobj]:
if draw.stack_depth == 0 and _is_unit_square(draw.shorthand):
# At least one PDF in the wild (and test suite) draws an image
# when the graphics stack depth is 0, meaning that the image
# gets drawn into a square of 1x1 PDF units (or 1/72",
# or 0.35 mm). The equivalent DPI will be >100,000. Exclude
# these from our DPI calculation for the page.
continue
yield ImageInfo(name=draw.name, pdfimage=pdfimage, shorthand=draw.shorthand)
def _find_form_xobject_images(pdf: Pdf, container: Object, contentsinfo: ContentsInfo):
"""Find any images that are in Form XObjects in the container.
The container may be a page, or a parent Form XObject.
"""
if Name.Resources not in container:
return
resources = container[Name.Resources]
if Name.XObject not in resources:
return
xobjs = resources[Name.XObject].as_dict()
for xobj in xobjs:
candidate = xobjs[xobj]
if candidate is None or candidate.get(Name.Subtype) != Name.Form:
continue
form_xobject = candidate
for settings in contentsinfo.xobject_settings:
if settings.name != xobj:
continue
# Find images once for each time this Form XObject is drawn.
# This could be optimized to cache the multiple drawing events
# but in practice both Form XObjects and multiple drawing of the
# same object are both very rare.
ctm_shorthand = settings.shorthand
yield from _process_content_streams(
pdf=pdf, container=form_xobject, shorthand=ctm_shorthand
)
def _process_content_streams(
*, pdf: Pdf, container: Object, shorthand=None
) -> Iterator[VectorMarker | TextMarker | ImageInfo]:
"""Find all individual instances of images drawn in the container.
Usually the container is a page, but it may also be a Form XObject.
On a typical page images are stored inline or as regular images
in an XObject.
Form XObjects may include inline images, XObject images,
and recursively, other Form XObjects; and also vector graphic objects.
Every instance of an image being drawn somewhere is flattened and
treated as a unique image, since if the same image is drawn multiple times
on one page it may be drawn at differing resolutions, and our objective
is to find the resolution at which the page can be rastered without
downsampling.
"""
if container.get(Name.Type) == Name.Page and Name.Contents in container:
initial_shorthand = shorthand or UNIT_SQUARE
elif (
container.get(Name.Type) == Name.XObject
and container[Name.Subtype] == Name.Form
):
# Set the CTM to the state it was when the "Do" operator was
# encountered that is drawing this instance of the Form XObject
ctm = Matrix(shorthand) if shorthand else Matrix()
# A Form XObject may provide its own matrix to map form space into
# user space. Get this if one exists
form_shorthand = container.get(Name.Matrix, Matrix())
form_matrix = Matrix(form_shorthand)
# Concatenate form matrix with CTM to ensure CTM is correct for
# drawing this instance of the XObject
ctm = form_matrix @ ctm
initial_shorthand = ctm.shorthand
else:
return
contentsinfo = _interpret_contents(container, initial_shorthand)
if contentsinfo.found_vector:
yield VectorMarker()
if contentsinfo.found_text:
yield TextMarker()
yield from _find_inline_images(contentsinfo)
yield from _find_regular_images(container, contentsinfo)
yield from _find_form_xobject_images(pdf, container, contentsinfo)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/pdfinfo/_image.py",
"license": "Mozilla Public License 2.0",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/pdfinfo/_types.py | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""PDF type definitions and constants."""
from __future__ import annotations
from enum import Enum, auto
class Colorspace(Enum):
"""Description of common image colorspaces in a PDF."""
# pylint: disable=invalid-name
gray = auto()
rgb = auto()
cmyk = auto()
lab = auto()
icc = auto()
index = auto()
sep = auto()
devn = auto()
pattern = auto()
jpeg2000 = auto()
class Encoding(Enum):
"""Description of common image encodings in a PDF."""
# pylint: disable=invalid-name
ccitt = auto()
jpeg = auto()
jpeg2000 = auto()
jbig2 = auto()
asciihex = auto()
ascii85 = auto()
lzw = auto()
flate = auto()
runlength = auto()
flate_jpeg = auto()
FloatRect = tuple[float, float, float, float]
FRIENDLY_COLORSPACE: dict[str, Colorspace] = {
'/DeviceGray': Colorspace.gray,
'/CalGray': Colorspace.gray,
'/DeviceRGB': Colorspace.rgb,
'/CalRGB': Colorspace.rgb,
'/DeviceCMYK': Colorspace.cmyk,
'/Lab': Colorspace.lab,
'/ICCBased': Colorspace.icc,
'/Indexed': Colorspace.index,
'/Separation': Colorspace.sep,
'/DeviceN': Colorspace.devn,
'/Pattern': Colorspace.pattern,
'/G': Colorspace.gray, # Abbreviations permitted in inline images
'/RGB': Colorspace.rgb,
'/CMYK': Colorspace.cmyk,
'/I': Colorspace.index,
}
FRIENDLY_ENCODING: dict[str, Encoding] = {
'/CCITTFaxDecode': Encoding.ccitt,
'/DCTDecode': Encoding.jpeg,
'/JPXDecode': Encoding.jpeg2000,
'/JBIG2Decode': Encoding.jbig2,
'/CCF': Encoding.ccitt, # Abbreviations permitted in inline images
'/DCT': Encoding.jpeg,
'/AHx': Encoding.asciihex,
'/A85': Encoding.ascii85,
'/LZW': Encoding.lzw,
'/Fl': Encoding.flate,
'/RL': Encoding.runlength,
}
FRIENDLY_COMP: dict[Colorspace, int] = {
Colorspace.gray: 1,
Colorspace.rgb: 3,
Colorspace.cmyk: 4,
Colorspace.lab: 3,
Colorspace.index: 1,
}
UNIT_SQUARE = (1.0, 0.0, 0.0, 1.0, 0.0, 0.0)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/pdfinfo/_types.py",
"license": "Mozilla Public License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:src/ocrmypdf/pdfinfo/_worker.py | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""PDF page info worker process handling."""
from __future__ import annotations
import atexit
import logging
from collections.abc import Container, Sequence
from contextlib import contextmanager
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING
from pikepdf import Pdf
from ocrmypdf._concurrent import Executor
from ocrmypdf._progressbar import ProgressBar
from ocrmypdf.exceptions import InputFileError
from ocrmypdf.helpers import available_cpu_count, pikepdf_enable_mmap
if TYPE_CHECKING:
from ocrmypdf.pdfinfo.info import PageInfo
from ocrmypdf.pdfinfo.layout import PdfMinerState
logger = logging.getLogger()
worker_pdf = None # pylint: disable=invalid-name
def _pdf_pageinfo_sync_init(pdf: Pdf, infile: Path, pdfminer_loglevel):
global worker_pdf # pylint: disable=global-statement,invalid-name
pikepdf_enable_mmap()
logging.getLogger('pdfminer').setLevel(pdfminer_loglevel)
# If the pdf is not opened, open a copy for our worker process to use
if pdf is None:
worker_pdf = Pdf.open(infile)
def on_process_close():
worker_pdf.close()
# Close when this process exits
atexit.register(on_process_close)
@contextmanager
def _pdf_pageinfo_sync_pdf(thread_pdf: Pdf | None, infile: Path):
if thread_pdf is not None:
yield thread_pdf
elif worker_pdf is not None:
yield worker_pdf
else:
with Pdf.open(infile) as pdf:
yield pdf
def _pdf_pageinfo_sync(
pageno: int,
thread_pdf: Pdf | None,
infile: Path,
check_pages: Container[int],
detailed_analysis: bool,
miner_state: PdfMinerState | None,
) -> PageInfo:
# Import here to avoid circular import - info.py imports this module,
# but PageInfo is defined in info.py
from ocrmypdf.pdfinfo.info import PageInfo
with _pdf_pageinfo_sync_pdf(thread_pdf, infile) as pdf:
return PageInfo(
pdf, pageno, infile, check_pages, detailed_analysis, miner_state
)
def _pdf_pageinfo_concurrent(
pdf,
executor: Executor,
max_workers: int,
use_threads: bool,
infile,
progbar,
check_pages,
detailed_analysis: bool = False,
miner_state: PdfMinerState | None = None,
) -> Sequence[PageInfo | None]:
pages: list[PageInfo | None] = [None] * len(pdf.pages)
def update_pageinfo(page: PageInfo, pbar: ProgressBar):
if not page:
raise InputFileError("Could read a page in the PDF")
pages[page.pageno] = page
pbar.update()
if max_workers is None:
max_workers = available_cpu_count()
total = len(pdf.pages)
n_workers = min(1 + len(pages) // 4, max_workers)
if n_workers == 1:
# If we decided on only one worker, there is no point in using
# a separate process.
use_threads = True
if use_threads and n_workers > 1:
# If we are using threads, there is no point in using more than one
# worker thread - they will just fight over the GIL.
n_workers = 1
# If we use a thread, we can pass the already-open Pdf for them to use
# If we use processes, we pass a None which tells the init function to open its
# own
initial_pdf = pdf if use_threads else None
contexts = (
(n, initial_pdf, infile, check_pages, detailed_analysis, miner_state)
for n in range(total)
)
assert n_workers == 1 if use_threads else n_workers >= 1, "Not multithreadable"
logger.debug(
f"Gathering info with {n_workers} "
+ ('thread' if use_threads else 'process')
+ " workers"
)
executor(
use_threads=use_threads,
max_workers=n_workers,
progress_kwargs=dict(
total=total, desc="Scanning contents", unit='page', disable=not progbar
),
worker_initializer=partial(
_pdf_pageinfo_sync_init,
initial_pdf,
infile,
logging.getLogger('pdfminer').level,
),
task=_pdf_pageinfo_sync,
task_arguments=contexts,
task_finished=update_pageinfo,
)
return pages
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "src/ocrmypdf/pdfinfo/_worker.py",
"license": "Mozilla Public License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
ocrmypdf/OCRmyPDF:tests/test_fpdf_renderer.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Tests for fpdf2-based PDF renderer."""
from __future__ import annotations
from pathlib import Path
import pytest
from ocrmypdf.font import MultiFontManager
from ocrmypdf.fpdf_renderer import (
DebugRenderOptions,
Fpdf2MultiPageRenderer,
Fpdf2PdfRenderer,
)
from ocrmypdf.hocrtransform.hocr_parser import HocrParser
from ocrmypdf.models.ocr_element import OcrClass
@pytest.fixture
def font_dir():
"""Return path to font directory."""
return Path(__file__).parent.parent / "src" / "ocrmypdf" / "data"
@pytest.fixture
def multi_font_manager(font_dir):
"""Create MultiFontManager instance for testing."""
return MultiFontManager(font_dir)
@pytest.fixture
def resources():
"""Return path to test resources directory."""
return Path(__file__).parent / "resources"
class TestFpdf2RendererImports:
"""Test that all fpdf2 renderer modules can be imported."""
def test_imports(self):
"""Test that all fpdf_renderer modules can be imported."""
from ocrmypdf.fpdf_renderer import (
DebugRenderOptions,
Fpdf2MultiPageRenderer,
Fpdf2PdfRenderer,
)
assert DebugRenderOptions is not None
assert Fpdf2PdfRenderer is not None
assert Fpdf2MultiPageRenderer is not None
class TestDebugRenderOptions:
"""Test DebugRenderOptions dataclass."""
def test_defaults(self):
"""Test default values."""
opts = DebugRenderOptions()
assert opts.render_baseline is False
assert opts.render_line_bbox is False
assert opts.render_word_bbox is False
def test_custom_values(self):
"""Test custom values."""
opts = DebugRenderOptions(
render_baseline=True,
render_line_bbox=True,
render_word_bbox=True,
)
assert opts.render_baseline is True
assert opts.render_line_bbox is True
assert opts.render_word_bbox is True
class TestFpdf2PdfRenderer:
"""Test Fpdf2PdfRenderer."""
def test_requires_page_element(self, multi_font_manager):
"""Test that renderer requires ocr_page element."""
from ocrmypdf.models.ocr_element import BoundingBox, OcrElement
# Create a non-page element
word = OcrElement(
ocr_class=OcrClass.WORD,
text="test",
bbox=BoundingBox(left=0, top=0, right=100, bottom=20),
)
with pytest.raises(ValueError, match="Root element must be ocr_page"):
Fpdf2PdfRenderer(
page=word,
dpi=300,
multi_font_manager=multi_font_manager,
)
def test_requires_bbox(self, multi_font_manager):
"""Test that renderer requires page with bounding box."""
from ocrmypdf.models.ocr_element import OcrElement
page = OcrElement(ocr_class=OcrClass.PAGE)
with pytest.raises(ValueError, match="Page must have bounding box"):
Fpdf2PdfRenderer(
page=page,
dpi=300,
multi_font_manager=multi_font_manager,
)
def test_render_simple_page(self, multi_font_manager, tmp_path):
"""Test rendering a simple page with one word."""
from ocrmypdf.models.ocr_element import BoundingBox, OcrElement
# Create a simple page with one word
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Hello",
bbox=BoundingBox(left=100, top=100, right=200, bottom=130),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=200, bottom=130),
children=[word],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=612, bottom=792),
children=[line],
)
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72, # 1:1 mapping to PDF points
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "test_simple.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
def test_render_invisible_text(self, multi_font_manager, tmp_path):
"""Test rendering invisible text (OCR layer)."""
from ocrmypdf.models.ocr_element import BoundingBox, OcrElement
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Invisible",
bbox=BoundingBox(left=100, top=100, right=250, bottom=130),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=250, bottom=130),
children=[word],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=612, bottom=792),
children=[line],
)
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72,
multi_font_manager=multi_font_manager,
invisible_text=True, # This is the default
)
output_path = tmp_path / "test_invisible.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
class TestFpdf2MultiPageRenderer:
"""Test Fpdf2MultiPageRenderer."""
def test_requires_pages(self, multi_font_manager):
"""Test that renderer requires at least one page."""
with pytest.raises(ValueError, match="No pages to render"):
renderer = Fpdf2MultiPageRenderer(
pages_data=[],
multi_font_manager=multi_font_manager,
)
renderer.render(Path("/tmp/test.pdf"))
def test_render_multiple_pages(self, multi_font_manager, tmp_path):
"""Test rendering multiple pages."""
from ocrmypdf.models.ocr_element import BoundingBox, OcrElement
pages_data = []
for i in range(3):
word = OcrElement(
ocr_class=OcrClass.WORD,
text=f"Page{i+1}",
bbox=BoundingBox(left=100, top=100, right=200, bottom=130),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=200, bottom=130),
children=[word],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=612, bottom=792),
children=[line],
)
pages_data.append((i + 1, page, 72))
renderer = Fpdf2MultiPageRenderer(
pages_data=pages_data,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "test_multipage.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
class TestFpdf2RendererWithHocr:
"""Test fpdf2 renderer with actual hOCR files."""
def test_render_latin_hocr(self, resources, multi_font_manager, tmp_path):
"""Test rendering Latin text from hOCR."""
hocr_path = resources / "latin.hocr"
if not hocr_path.exists():
pytest.skip("latin.hocr not found")
parser = HocrParser(hocr_path)
page = parser.parse()
# Ensure we got a page
assert page.ocr_class == OcrClass.PAGE
assert page.bbox is not None
renderer = Fpdf2PdfRenderer(
page=page,
dpi=300,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "latin_fpdf2.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
def test_render_cjk_hocr(self, resources, multi_font_manager, tmp_path):
"""Test rendering CJK text from hOCR."""
hocr_path = resources / "cjk.hocr"
if not hocr_path.exists():
pytest.skip("cjk.hocr not found")
parser = HocrParser(hocr_path)
page = parser.parse()
renderer = Fpdf2PdfRenderer(
page=page,
dpi=300,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "cjk_fpdf2.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
def test_render_arabic_hocr(self, resources, multi_font_manager, tmp_path):
"""Test rendering Arabic text from hOCR."""
hocr_path = resources / "arabic.hocr"
if not hocr_path.exists():
pytest.skip("arabic.hocr not found")
parser = HocrParser(hocr_path)
page = parser.parse()
renderer = Fpdf2PdfRenderer(
page=page,
dpi=300,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "arabic_fpdf2.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
def test_render_hello_world_scripts_hocr(
self, resources, multi_font_manager, tmp_path
):
"""Test rendering comprehensive multilingual 'Hello!' hOCR file.
This tests all major scripts including:
- Latin (English, Spanish, French, German, Italian, Polish, Portuguese, Turkish)
- Cyrillic (Russian)
- Greek
- CJK (Chinese Simplified, Chinese Traditional, Japanese, Korean)
- Devanagari (Hindi)
- Arabic (RTL)
- Hebrew (RTL)
Also includes rotated baselines to exercise skew handling.
"""
hocr_path = resources / "hello_world_scripts.hocr"
if not hocr_path.exists():
pytest.skip("hello_world_scripts.hocr not found")
parser = HocrParser(hocr_path)
page = parser.parse()
# Verify we parsed the page correctly
assert page.ocr_class == OcrClass.PAGE
assert page.bbox is not None
# Should have 2550x3300 at 300 DPI
assert page.bbox.right == 2550
assert page.bbox.bottom == 3300
# Test with visible text for visual inspection
renderer = Fpdf2PdfRenderer(
page=page,
dpi=300,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "hello_world_scripts_fpdf2.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
def test_render_hello_world_scripts_multipage(
self, resources, multi_font_manager, tmp_path
):
"""Test rendering hello_world_scripts.hocr using MultiPageRenderer.
Uses Fpdf2MultiPageRenderer to render the multilingual test file,
demonstrating font handling across all major writing systems.
"""
hocr_path = resources / "hello_world_scripts.hocr"
if not hocr_path.exists():
pytest.skip("hello_world_scripts.hocr not found")
parser = HocrParser(hocr_path)
page = parser.parse()
# Build pages_data list as expected by MultiPageRenderer
pages_data = [(1, page, 300)] # (page_number, page_element, dpi)
renderer = Fpdf2MultiPageRenderer(
pages_data=pages_data,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "hello_world_scripts_multipage.pdf"
renderer.render(output_path)
assert output_path.exists()
assert output_path.stat().st_size > 0
class TestWordSegmentation:
"""Test that rendered PDFs have proper word segmentation for pdfminer.six."""
def test_word_segmentation_with_pdfminer(self, multi_font_manager, tmp_path):
"""Test that pdfminer.six can extract words with proper spacing.
This test verifies that explicit space characters are inserted between
words so that pdfminer.six (and similar PDF readers) can properly
segment words during text extraction.
"""
from pdfminer.high_level import extract_text
from ocrmypdf.models.ocr_element import BoundingBox, OcrElement
# Create a page with multiple words on one line
word1 = OcrElement(
ocr_class=OcrClass.WORD,
text="Hello",
bbox=BoundingBox(left=100, top=100, right=200, bottom=130),
)
word2 = OcrElement(
ocr_class=OcrClass.WORD,
text="World",
bbox=BoundingBox(left=220, top=100, right=320, bottom=130),
)
word3 = OcrElement(
ocr_class=OcrClass.WORD,
text="Test",
bbox=BoundingBox(left=340, top=100, right=420, bottom=130),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=420, bottom=130),
children=[word1, word2, word3],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=612, bottom=792),
children=[line],
)
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72, # 1:1 mapping to PDF points
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "test_word_segmentation.pdf"
renderer.render(output_path)
# Extract text using pdfminer.six
extracted_text = extract_text(str(output_path))
# Verify words are separated by spaces
assert "Hello" in extracted_text
assert "World" in extracted_text
assert "Test" in extracted_text
# The text should NOT be run together like "HelloWorldTest"
assert "HelloWorld" not in extracted_text
assert "WorldTest" not in extracted_text
# Verify proper word segmentation - words should be separated
# (allowing for whitespace variations)
words_found = extracted_text.split()
assert "Hello" in words_found
assert "World" in words_found
assert "Test" in words_found
def test_cjk_no_spurious_spaces(self, multi_font_manager, tmp_path):
"""Test that CJK text does not get spurious spaces inserted.
CJK scripts don't use spaces between characters/words, so we should
not insert spaces between adjacent CJK words.
"""
from pdfminer.high_level import extract_text
from ocrmypdf.models.ocr_element import BoundingBox, OcrElement
# Create a page with CJK words (Chinese characters)
# 你好 = "Hello" in Chinese
# 世界 = "World" in Chinese
word1 = OcrElement(
ocr_class=OcrClass.WORD,
text="你好",
bbox=BoundingBox(left=100, top=100, right=160, bottom=130),
)
word2 = OcrElement(
ocr_class=OcrClass.WORD,
text="世界",
bbox=BoundingBox(left=170, top=100, right=230, bottom=130),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=230, bottom=130),
children=[word1, word2],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=612, bottom=792),
children=[line],
)
renderer = Fpdf2PdfRenderer(
page=page,
dpi=72,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "test_cjk_segmentation.pdf"
renderer.render(output_path)
# Extract text using pdfminer.six
extracted_text = extract_text(str(output_path))
# CJK text should be present
assert "你好" in extracted_text
assert "世界" in extracted_text
# There should NOT be spaces between CJK characters
# (but pdfminer may add some whitespace, so we check the raw chars)
extracted_chars = extracted_text.replace(" ", "").replace("\n", "")
assert "你好世界" in extracted_chars or (
"你好" in extracted_chars and "世界" in extracted_chars
)
def test_latin_hocr_word_segmentation(
self, resources, multi_font_manager, tmp_path
):
"""Test word segmentation with real Latin hOCR file."""
from pdfminer.high_level import extract_text
hocr_path = resources / "latin.hocr"
if not hocr_path.exists():
pytest.skip("latin.hocr not found")
parser = HocrParser(hocr_path)
page = parser.parse()
renderer = Fpdf2PdfRenderer(
page=page,
dpi=300,
multi_font_manager=multi_font_manager,
invisible_text=False,
)
output_path = tmp_path / "latin_segmentation.pdf"
renderer.render(output_path)
# Extract text using pdfminer.six
extracted_text = extract_text(str(output_path))
# The Latin text should have proper word segmentation
# Words should be separable
words = extracted_text.split()
assert len(words) > 0
# Check that common English words are properly segmented
# (not stuck together)
text_no_newlines = extracted_text.replace("\n", " ")
# There should be spaces in the extracted text
assert " " in text_no_newlines
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_fpdf_renderer.py",
"license": "Mozilla Public License 2.0",
"lines": 432,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_hocr_parser.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for HocrParser class."""
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
import pytest
from ocrmypdf.hocrtransform import (
HocrParseError,
HocrParser,
OcrClass,
)
@pytest.fixture
def simple_hocr(tmp_path) -> Path:
"""Create a simple valid hOCR file."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>Test</title>
</head>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500; ppageno 0'>
<p class='ocr_par' lang='eng' dir='ltr'>
<span class='ocr_line' title='bbox 100 100 900 150; baseline 0.01 -5'>
<span class='ocrx_word' title='bbox 100 100 200 150; x_wconf 95'>Hello</span>
<span class='ocrx_word' title='bbox 250 100 350 150; x_wconf 90'>World</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "simple.hocr"
hocr_file.write_text(content, encoding='utf-8')
return hocr_file
@pytest.fixture
def multiline_hocr(tmp_path) -> Path:
"""Create an hOCR file with multiple lines and paragraphs."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 1000'>
<p class='ocr_par' lang='eng'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150'>Line</span>
<span class='ocrx_word' title='bbox 210 100 280 150'>one</span>
</span>
<span class='ocr_line' title='bbox 100 200 900 250'>
<span class='ocrx_word' title='bbox 100 200 200 250'>Line</span>
<span class='ocrx_word' title='bbox 210 200 280 250'>two</span>
</span>
</p>
<p class='ocr_par' lang='deu'>
<span class='ocr_line' title='bbox 100 400 900 450'>
<span class='ocrx_word' title='bbox 100 400 200 450'>German</span>
<span class='ocrx_word' title='bbox 210 400 280 450'>text</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "multiline.hocr"
hocr_file.write_text(content, encoding='utf-8')
return hocr_file
@pytest.fixture
def rtl_hocr(tmp_path) -> Path:
"""Create an hOCR file with RTL text."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par' lang='ara' dir='rtl'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150'>مرحبا</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "rtl.hocr"
hocr_file.write_text(content, encoding='utf-8')
return hocr_file
@pytest.fixture
def rotated_hocr(tmp_path) -> Path:
"""Create an hOCR file with rotated text (textangle)."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par' lang='eng'>
<span class='ocr_line' title='bbox 100 100 900 150; textangle 5.5'>
<span class='ocrx_word' title='bbox 100 100 200 150'>Rotated</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "rotated.hocr"
hocr_file.write_text(content, encoding='utf-8')
return hocr_file
@pytest.fixture
def header_hocr(tmp_path) -> Path:
"""Create an hOCR file with different line types."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par' lang='eng'>
<span class='ocr_header' title='bbox 100 50 900 100'>
<span class='ocrx_word' title='bbox 100 50 300 100'>Chapter</span>
<span class='ocrx_word' title='bbox 310 50 400 100'>One</span>
</span>
<span class='ocr_line' title='bbox 100 150 900 200'>
<span class='ocrx_word' title='bbox 100 150 200 200'>Body</span>
<span class='ocrx_word' title='bbox 210 150 280 200'>text</span>
</span>
<span class='ocr_caption' title='bbox 100 300 900 350'>
<span class='ocrx_word' title='bbox 100 300 200 350'>Figure</span>
<span class='ocrx_word' title='bbox 210 300 250 350'>1</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "header.hocr"
hocr_file.write_text(content, encoding='utf-8')
return hocr_file
@pytest.fixture
def font_info_hocr(tmp_path) -> Path:
"""Create an hOCR file with font information."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par' lang='eng'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150; x_font Arial; x_fsize 12.5'>Styled</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "font_info.hocr"
hocr_file.write_text(content, encoding='utf-8')
return hocr_file
class TestHocrParserBasic:
"""Basic HocrParser functionality tests."""
def test_parse_simple_hocr(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
assert page.ocr_class == OcrClass.PAGE
assert page.bbox is not None
assert page.bbox.width == 1000
assert page.bbox.height == 500
def test_parse_page_number(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
assert page.page_number == 0
def test_parse_paragraphs(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
assert len(page.paragraphs) == 1
paragraph = page.paragraphs[0]
assert paragraph.ocr_class == OcrClass.PARAGRAPH
assert paragraph.language == "eng"
assert paragraph.direction == "ltr"
def test_parse_lines(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
lines = page.lines
assert len(lines) == 1
line = lines[0]
assert line.ocr_class == OcrClass.LINE
assert line.bbox is not None
assert line.baseline is not None
assert line.baseline.slope == pytest.approx(0.01)
assert line.baseline.intercept == -5
def test_parse_words(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
words = page.words
assert len(words) == 2
assert words[0].text == "Hello"
assert words[1].text == "World"
def test_parse_word_confidence(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
words = page.words
assert words[0].confidence == pytest.approx(0.95)
assert words[1].confidence == pytest.approx(0.90)
def test_parse_word_bbox(self, simple_hocr):
parser = HocrParser(simple_hocr)
page = parser.parse()
word = page.words[0]
assert word.bbox is not None
assert word.bbox.left == 100
assert word.bbox.top == 100
assert word.bbox.right == 200
assert word.bbox.bottom == 150
class TestHocrParserMultiline:
"""Test parsing of multi-line/multi-paragraph hOCR."""
def test_multiple_lines(self, multiline_hocr):
parser = HocrParser(multiline_hocr)
page = parser.parse()
assert len(page.paragraphs) == 2
assert len(page.lines) == 3 # 2 in first par, 1 in second
def test_multiple_paragraphs_languages(self, multiline_hocr):
parser = HocrParser(multiline_hocr)
page = parser.parse()
paragraphs = page.paragraphs
assert paragraphs[0].language == "eng"
assert paragraphs[1].language == "deu"
def test_word_count(self, multiline_hocr):
parser = HocrParser(multiline_hocr)
page = parser.parse()
assert len(page.words) == 6 # 2 + 2 + 2
class TestHocrParserRTL:
"""Test parsing of RTL text."""
def test_rtl_direction(self, rtl_hocr):
parser = HocrParser(rtl_hocr)
page = parser.parse()
paragraph = page.paragraphs[0]
assert paragraph.direction == "rtl"
assert paragraph.language == "ara"
def test_rtl_line_inherits_direction(self, rtl_hocr):
parser = HocrParser(rtl_hocr)
page = parser.parse()
line = page.lines[0]
assert line.direction == "rtl"
class TestHocrParserRotation:
"""Test parsing of rotated text."""
def test_textangle(self, rotated_hocr):
parser = HocrParser(rotated_hocr)
page = parser.parse()
line = page.lines[0]
assert line.textangle == pytest.approx(5.5)
class TestHocrParserLineTypes:
"""Test parsing of different line types."""
def test_header_line(self, header_hocr):
parser = HocrParser(header_hocr)
page = parser.parse()
lines = page.lines
assert len(lines) == 3
# Check line types
line_classes = [line.ocr_class for line in lines]
assert OcrClass.HEADER in line_classes
assert OcrClass.LINE in line_classes
assert OcrClass.CAPTION in line_classes
def test_all_line_types_have_words(self, header_hocr):
parser = HocrParser(header_hocr)
page = parser.parse()
for line in page.lines:
assert len(line.children) > 0
class TestHocrParserFontInfo:
"""Test parsing of font information."""
def test_font_name_and_size(self, font_info_hocr):
parser = HocrParser(font_info_hocr)
page = parser.parse()
word = page.words[0]
assert word.font is not None
assert word.font.name == "Arial"
assert word.font.size == pytest.approx(12.5)
class TestHocrParserErrors:
"""Test error handling in HocrParser."""
def test_missing_file(self, tmp_path):
with pytest.raises(FileNotFoundError):
HocrParser(tmp_path / "nonexistent.hocr")
def test_invalid_xml(self, tmp_path):
hocr_file = tmp_path / "invalid.hocr"
hocr_file.write_text("<html><body>not closed", encoding='utf-8')
with pytest.raises(HocrParseError):
HocrParser(hocr_file)
def test_missing_ocr_page(self, tmp_path):
hocr_file = tmp_path / "no_page.hocr"
hocr_file.write_text(
"<html><body><p>No ocr_page</p></body></html>", encoding='utf-8'
)
parser = HocrParser(hocr_file)
with pytest.raises(HocrParseError, match="No ocr_page"):
parser.parse()
def test_missing_page_bbox(self, tmp_path):
hocr_file = tmp_path / "no_bbox.hocr"
hocr_file.write_text(
"<html><body><div class='ocr_page'>No bbox</div></body></html>",
encoding='utf-8',
)
parser = HocrParser(hocr_file)
with pytest.raises(HocrParseError, match="bbox"):
parser.parse()
class TestHocrParserEdgeCases:
"""Test edge cases in HocrParser."""
def test_empty_word_text(self, tmp_path):
"""Words with empty text should be skipped."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150'></span>
<span class='ocrx_word' title='bbox 210 100 300 150'>Valid</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "empty_word.hocr"
hocr_file.write_text(content, encoding='utf-8')
parser = HocrParser(hocr_file)
page = parser.parse()
# Only the non-empty word should be parsed
assert len(page.words) == 1
assert page.words[0].text == "Valid"
def test_whitespace_only_word(self, tmp_path):
"""Words with only whitespace should be skipped."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150'> </span>
<span class='ocrx_word' title='bbox 210 100 300 150'>Valid</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "whitespace_word.hocr"
hocr_file.write_text(content, encoding='utf-8')
parser = HocrParser(hocr_file)
page = parser.parse()
assert len(page.words) == 1
assert page.words[0].text == "Valid"
def test_line_without_bbox(self, tmp_path):
"""Lines without bbox should be skipped."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par'>
<span class='ocr_line'>
<span class='ocrx_word' title='bbox 100 100 200 150'>Word</span>
</span>
<span class='ocr_line' title='bbox 100 200 900 250'>
<span class='ocrx_word' title='bbox 100 200 200 250'>Valid</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "no_line_bbox.hocr"
hocr_file.write_text(content, encoding='utf-8')
parser = HocrParser(hocr_file)
page = parser.parse()
# Only line with bbox should be parsed
assert len(page.lines) == 1
assert page.words[0].text == "Valid"
def test_unicode_normalization(self, tmp_path):
"""Text should be NFKC normalized."""
# Use a string with combining characters
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150'>fi</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "unicode.hocr"
hocr_file.write_text(content, encoding='utf-8')
parser = HocrParser(hocr_file)
page = parser.parse()
# fi ligature should be normalized to "fi"
assert page.words[0].text == "fi"
def test_words_directly_under_page(self, tmp_path):
"""Test fallback for words directly under page (no paragraph structure)."""
content = dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<span class='ocrx_word' title='bbox 100 100 200 150'>Direct</span>
<span class='ocrx_word' title='bbox 210 100 300 150'>Word</span>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "direct_words.hocr"
hocr_file.write_text(content, encoding='utf-8')
parser = HocrParser(hocr_file)
page = parser.parse()
# Words should be parsed as direct children
assert len(page.children) == 2
assert page.children[0].text == "Direct"
assert page.children[1].text == "Word"
def test_no_namespace(self, tmp_path):
"""Test parsing hOCR without XHTML namespace."""
content = dedent("""\
<html>
<body>
<div class='ocr_page' title='bbox 0 0 1000 500'>
<p class='ocr_par'>
<span class='ocr_line' title='bbox 100 100 900 150'>
<span class='ocrx_word' title='bbox 100 100 200 150'>NoNS</span>
</span>
</p>
</div>
</body>
</html>
""")
hocr_file = tmp_path / "no_namespace.hocr"
hocr_file.write_text(content, encoding='utf-8')
parser = HocrParser(hocr_file)
page = parser.parse()
assert len(page.words) == 1
assert page.words[0].text == "NoNS"
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_hocr_parser.py",
"license": "Mozilla Public License 2.0",
"lines": 442,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_json_serialization.py | """Test JSON serialization of OcrOptions for multiprocessing compatibility."""
from __future__ import annotations
import multiprocessing
from io import BytesIO
from pathlib import Path, PurePath
import pytest
from ocrmypdf._options import OcrOptions
from ocrmypdf.builtin_plugins.tesseract_ocr import TesseractOptions
@pytest.fixture(autouse=True)
def register_plugin_models():
"""Register plugin models for tests."""
OcrOptions.register_plugin_models({'tesseract': TesseractOptions})
yield
# Clean up after test (optional, but good practice)
def worker_function(options_json: str) -> str:
"""Worker function that deserializes OcrOptions from JSON and returns a result."""
# Register plugin models in worker process
from ocrmypdf._options import OcrOptions
from ocrmypdf.builtin_plugins.tesseract_ocr import TesseractOptions
OcrOptions.register_plugin_models({'tesseract': TesseractOptions})
# Reconstruct OcrOptions from JSON in worker process
options = OcrOptions.model_validate_json_safe(options_json)
# Verify we can access various option types
# Count only user-added extra_attrs (exclude plugin cache keys starting with '_')
user_attrs_count = len(
[k for k in options.extra_attrs.keys() if not k.startswith('_')]
)
result = {
'input_file': str(options.input_file),
'output_file': str(options.output_file),
'languages': options.languages,
'optimize': options.optimize,
'tesseract_timeout': options.tesseract.timeout,
'fast_web_view': options.fast_web_view,
'extra_attrs_count': user_attrs_count,
}
# Return as JSON string
import json
return json.dumps(result)
def test_json_serialization_multiprocessing():
"""Test that OcrOptions can be JSON serialized and used in multiprocessing."""
# Create OcrOptions with various field types
options = OcrOptions(
input_file=Path('/test/input.pdf'),
output_file=Path('/test/output.pdf'),
languages=['eng', 'deu'],
optimize=2,
tesseract_timeout=120.0,
fast_web_view=2.5,
deskew=True,
clean=False,
)
# Add some extra attributes
options.extra_attrs['custom_field'] = 'test_value'
options.extra_attrs['numeric_field'] = 42
# Serialize to JSON
options_json = options.model_dump_json_safe()
# Test that we can deserialize in the main process
reconstructed = OcrOptions.model_validate_json_safe(options_json)
assert reconstructed.input_file == options.input_file
assert reconstructed.output_file == options.output_file
assert reconstructed.languages == options.languages
assert reconstructed.optimize == options.optimize
assert reconstructed.tesseract_timeout == options.tesseract.timeout
assert reconstructed.fast_web_view == options.fast_web_view
assert reconstructed.deskew == options.deskew
assert reconstructed.clean == options.clean
# Compare user-added extra_attrs (excluding plugin cache keys)
user_attrs = {k: v for k, v in options.extra_attrs.items() if not k.startswith('_')}
reconstructed_attrs = {
k: v for k, v in reconstructed.extra_attrs.items() if not k.startswith('_')
}
assert reconstructed_attrs == user_attrs
# Test multiprocessing with JSON serialization
with multiprocessing.Pool(processes=2) as pool:
# Send the JSON string to worker processes
results = pool.map(worker_function, [options_json, options_json])
# Verify results from worker processes
import json
for result_json in results:
result = json.loads(result_json)
assert PurePath(result['input_file']) == PurePath('/test/input.pdf')
assert PurePath(result['output_file']) == PurePath('/test/output.pdf')
assert result['languages'] == ['eng', 'deu']
assert result['optimize'] == 2
assert result['tesseract_timeout'] == 120.0
assert result['fast_web_view'] == 2.5
assert result['extra_attrs_count'] == 2 # custom_field and numeric_field
def test_json_serialization_with_streams():
"""Test JSON serialization with stream objects."""
input_stream = BytesIO(b'fake pdf data')
output_stream = BytesIO()
options = OcrOptions(
input_file=input_stream,
output_file=output_stream,
languages=['eng'],
optimize=1,
)
# Serialize to JSON (streams should be converted to placeholders)
options_json = options.model_dump_json_safe()
# Deserialize (streams will be placeholder strings)
reconstructed = OcrOptions.model_validate_json_safe(options_json)
# Streams should be converted to placeholder strings
assert reconstructed.input_file == 'stream'
assert reconstructed.output_file == 'stream'
assert reconstructed.languages == ['eng']
assert reconstructed.optimize == 1
def test_json_serialization_with_none_values():
"""Test JSON serialization handles None values correctly."""
options = OcrOptions(
input_file=Path('/test/input.pdf'),
output_file=Path('/test/output.pdf'),
languages=['eng'],
# Many fields will be None by default
)
# Serialize to JSON
options_json = options.model_dump_json_safe()
# Deserialize
reconstructed = OcrOptions.model_validate_json_safe(options_json)
# Verify None values are preserved (check actual defaults from model)
assert reconstructed.tesseract_timeout is None # Default value
assert reconstructed.fast_web_view == 1.0 # Default value, not None
assert (
reconstructed.color_conversion_strategy == "LeaveColorUnchanged"
) # Default value
assert reconstructed.pdfa_image_compression is None # This one is actually None
# Verify non-None values are preserved
assert reconstructed.input_file == options.input_file
assert reconstructed.output_file == options.output_file
assert reconstructed.languages == options.languages
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_json_serialization.py",
"license": "Mozilla Public License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_multi_font_manager.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for MultiFontManager and FontProvider."""
from __future__ import annotations
import logging
from pathlib import Path
import pytest
from ocrmypdf.font import BuiltinFontProvider, FontManager, MultiFontManager
@pytest.fixture
def font_dir():
"""Return path to font directory."""
return Path(__file__).parent.parent / "src" / "ocrmypdf" / "data"
@pytest.fixture
def multi_font_manager(font_dir):
"""Create MultiFontManager instance for testing."""
return MultiFontManager(font_dir)
def has_cjk_font(manager: MultiFontManager) -> bool:
"""Check if CJK font is available (from system)."""
return 'NotoSansCJK-Regular' in manager.fonts
def has_arabic_font(manager: MultiFontManager) -> bool:
"""Check if Arabic font is available (from system)."""
return 'NotoSansArabic-Regular' in manager.fonts
def has_devanagari_font(manager: MultiFontManager) -> bool:
"""Check if Devanagari font is available (from system)."""
return 'NotoSansDevanagari-Regular' in manager.fonts
# Marker for tests that require CJK fonts
requires_cjk = pytest.mark.skipif(
"not has_cjk_font(MultiFontManager())",
reason="CJK font not available (not installed on system)"
)
# --- MultiFontManager Initialization Tests ---
def test_init_loads_builtin_fonts(multi_font_manager):
"""Test that initialization loads all expected builtin fonts."""
# Only NotoSans-Regular and Occulta are bundled
assert 'NotoSans-Regular' in multi_font_manager.fonts
assert 'Occulta' in multi_font_manager.fonts
# At least 2 builtin fonts should be loaded
assert len(multi_font_manager.fonts) >= 2
# Arabic, Devanagari, CJK are optional (system fonts)
def test_missing_font_directory():
"""Test that missing font directory raises error for fallback font."""
with pytest.raises(FileNotFoundError):
MultiFontManager(Path("/nonexistent/path"))
# --- Arabic Script Language Tests ---
# These tests require Arabic fonts to be installed on the system
def test_select_font_for_arabic_language(multi_font_manager):
"""Test font selection with Arabic language hint."""
if not has_arabic_font(multi_font_manager):
pytest.skip("Arabic font not available")
font_manager = multi_font_manager.select_font_for_word("مرحبا", "ara")
assert font_manager == multi_font_manager.fonts['NotoSansArabic-Regular']
def test_select_font_for_persian_language(multi_font_manager):
"""Test font selection with Persian language hint."""
if not has_arabic_font(multi_font_manager):
pytest.skip("Arabic font not available")
font_manager = multi_font_manager.select_font_for_word("سلام", "per")
assert font_manager == multi_font_manager.fonts['NotoSansArabic-Regular']
def test_select_font_for_urdu_language(multi_font_manager):
"""Test font selection with Urdu language hint."""
if not has_arabic_font(multi_font_manager):
pytest.skip("Arabic font not available")
font_manager = multi_font_manager.select_font_for_word("ہیلو", "urd")
assert font_manager == multi_font_manager.fonts['NotoSansArabic-Regular']
def test_farsi_language_code(multi_font_manager):
"""Test that 'fas' (Farsi alternative code) maps to Arabic font."""
if not has_arabic_font(multi_font_manager):
pytest.skip("Arabic font not available")
font_manager = multi_font_manager.select_font_for_word("سلام", "fas")
assert font_manager == multi_font_manager.fonts['NotoSansArabic-Regular']
# --- Devanagari Script Language Tests ---
# These tests require Devanagari fonts to be installed on the system
def test_select_font_for_hindi_language(multi_font_manager):
"""Test font selection with Hindi language hint."""
if not has_devanagari_font(multi_font_manager):
pytest.skip("Devanagari font not available")
font_manager = multi_font_manager.select_font_for_word("नमस्ते", "hin")
assert font_manager == multi_font_manager.fonts['NotoSansDevanagari-Regular']
def test_select_font_for_sanskrit_language(multi_font_manager):
"""Test font selection with Sanskrit language hint."""
if not has_devanagari_font(multi_font_manager):
pytest.skip("Devanagari font not available")
font_manager = multi_font_manager.select_font_for_word("संस्कृतम्", "san")
assert font_manager == multi_font_manager.fonts['NotoSansDevanagari-Regular']
def test_select_font_for_marathi_language(multi_font_manager):
"""Test font selection with Marathi language hint."""
if not has_devanagari_font(multi_font_manager):
pytest.skip("Devanagari font not available")
font_manager = multi_font_manager.select_font_for_word("मराठी", "mar")
assert font_manager == multi_font_manager.fonts['NotoSansDevanagari-Regular']
def test_select_font_for_nepali_language(multi_font_manager):
"""Test font selection with Nepali language hint."""
if not has_devanagari_font(multi_font_manager):
pytest.skip("Devanagari font not available")
font_manager = multi_font_manager.select_font_for_word("नेपाली", "nep")
assert font_manager == multi_font_manager.fonts['NotoSansDevanagari-Regular']
# --- CJK Language Tests ---
# These tests require CJK fonts to be installed on the system
def test_select_font_for_chinese_language(multi_font_manager):
"""Test font selection with Chinese language hint (ISO 639-3)."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("你好", "zho")
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
def test_select_font_for_chinese_generic(multi_font_manager):
"""Test font selection with generic Chinese language code."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("中文", "chi")
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
def test_select_font_for_chinese_simplified(multi_font_manager):
"""Test font selection with Tesseract's chi_sim language code."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("简体字", "chi_sim")
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
def test_select_font_for_chinese_traditional(multi_font_manager):
"""Test font selection with Tesseract's chi_tra language code."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("漢字", "chi_tra")
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
def test_select_font_for_japanese_language(multi_font_manager):
"""Test font selection with Japanese language hint."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("こんにちは", "jpn")
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
def test_select_font_for_korean_language(multi_font_manager):
"""Test font selection with Korean language hint."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("안녕하세요", "kor")
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
# --- Latin/English Tests ---
def test_select_font_for_english_text(multi_font_manager):
"""Test font selection for English text."""
font_manager = multi_font_manager.select_font_for_word("Hello World", "eng")
assert font_manager == multi_font_manager.fonts['NotoSans-Regular']
def test_select_font_without_language_hint(multi_font_manager):
"""Test font selection without language hint falls back to glyph checking."""
font_manager = multi_font_manager.select_font_for_word("Hello", None)
assert font_manager == multi_font_manager.fonts['NotoSans-Regular']
# --- Fallback Behavior Tests ---
def test_select_font_arabic_text_without_language_hint(multi_font_manager):
"""Test that Arabic text is handled via fallback without language hint."""
if not has_arabic_font(multi_font_manager):
pytest.skip("Arabic font not available")
font_manager = multi_font_manager.select_font_for_word("مرحبا", None)
# Should get NotoSansArabic-Regular via fallback chain glyph checking
assert font_manager == multi_font_manager.fonts['NotoSansArabic-Regular']
def test_devanagari_text_without_language_hint(multi_font_manager):
"""Test that Devanagari text is handled via fallback without language hint."""
# NotoSans-Regular includes Devanagari glyphs, so it's selected first in fallback
font_manager = multi_font_manager.select_font_for_word("नमस्ते", None)
# Could be NotoSans-Regular or NotoSansDevanagari-Regular depending on availability
assert font_manager is not None
def test_cjk_text_without_language_hint(multi_font_manager):
"""Test that CJK text is handled via fallback without language hint."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
font_manager = multi_font_manager.select_font_for_word("你好", None)
assert font_manager == multi_font_manager.fonts['NotoSansCJK-Regular']
def test_fallback_to_occulta_font(multi_font_manager):
"""Test that unsupported characters fall back to Occulta.ttf."""
# Use a character unlikely to be in any standard font
font_manager = multi_font_manager.select_font_for_word("test", "xyz")
# Should return some valid font
assert font_manager in multi_font_manager.fonts.values()
def test_fallback_fonts_constant(multi_font_manager):
"""Test that FALLBACK_FONTS contains expected fonts."""
# Check that core fonts are in fallback list
assert 'NotoSans-Regular' in MultiFontManager.FALLBACK_FONTS
assert 'NotoSansArabic-Regular' in MultiFontManager.FALLBACK_FONTS
assert 'NotoSansDevanagari-Regular' in MultiFontManager.FALLBACK_FONTS
assert 'NotoSansCJK-Regular' in MultiFontManager.FALLBACK_FONTS
# Only NotoSans-Regular is bundled; other scripts are system fonts
assert 'NotoSans-Regular' in multi_font_manager.fonts
# --- Glyph Coverage Tests ---
def test_has_all_glyphs_for_english(multi_font_manager):
"""Test glyph coverage checking for English text."""
assert multi_font_manager.has_all_glyphs('NotoSans-Regular', "Hello World")
assert multi_font_manager.has_all_glyphs('NotoSans-Regular', "café")
def test_has_all_glyphs_for_arabic(multi_font_manager):
"""Test glyph coverage checking for Arabic text."""
if not has_arabic_font(multi_font_manager):
pytest.skip("Arabic font not available")
assert multi_font_manager.has_all_glyphs('NotoSansArabic-Regular', "مرحبا")
def test_has_all_glyphs_for_devanagari(multi_font_manager):
"""Test glyph coverage checking for Devanagari text."""
if not has_devanagari_font(multi_font_manager):
pytest.skip("Devanagari font not available")
assert multi_font_manager.has_all_glyphs('NotoSansDevanagari-Regular', "नमस्ते")
def test_has_all_glyphs_for_cjk(multi_font_manager):
"""Test glyph coverage checking for CJK text."""
if not has_cjk_font(multi_font_manager):
pytest.skip("CJK font not available")
assert multi_font_manager.has_all_glyphs('NotoSansCJK-Regular', "你好")
def test_empty_text_has_all_glyphs(multi_font_manager):
"""Test that empty text returns True for glyph coverage."""
assert multi_font_manager.has_all_glyphs('NotoSans-Regular', "")
def test_has_all_glyphs_missing_font(multi_font_manager):
"""Test that has_all_glyphs returns False for non-existent font."""
assert not multi_font_manager.has_all_glyphs('NonExistentFont', "test")
# --- Caching Tests ---
def test_font_selection_caching(multi_font_manager):
"""Test that font selection results are cached."""
font1 = multi_font_manager.select_font_for_word("Hello", "eng")
cache_key = ("Hello", "eng")
assert cache_key in multi_font_manager._selection_cache
font2 = multi_font_manager.select_font_for_word("Hello", "eng")
assert font1 == font2
# --- Language Font Map Tests ---
def test_language_font_map_coverage():
"""Test that LANGUAGE_FONT_MAP has valid structure."""
# Only NotoSans-Regular is bundled now
# This test just verifies the structure is valid
for font_name in MultiFontManager.LANGUAGE_FONT_MAP.values():
# All font names should be valid strings
assert isinstance(font_name, str)
assert font_name.startswith('NotoSans')
# --- get_all_fonts Tests ---
def test_get_all_fonts(multi_font_manager):
"""Test get_all_fonts returns all loaded fonts."""
all_fonts = multi_font_manager.get_all_fonts()
assert isinstance(all_fonts, dict)
# At least 2 builtin fonts should be loaded (NotoSans-Regular and Occulta)
assert len(all_fonts) >= 2
assert 'NotoSans-Regular' in all_fonts
assert 'Occulta' in all_fonts
# Arabic, Devanagari, CJK are optional (system fonts)
# --- FontProvider Tests ---
class MockFontProvider:
"""Mock FontProvider for testing missing fonts."""
def __init__(
self, available_fonts: dict[str, FontManager], fallback: FontManager
):
"""Initialize mock font provider with given fonts."""
self._fonts = available_fonts
self._fallback = fallback
def get_font(self, font_name: str) -> FontManager | None:
return self._fonts.get(font_name)
def get_available_fonts(self) -> list[str]:
return list(self._fonts.keys())
def get_fallback_font(self) -> FontManager:
return self._fallback
def test_custom_font_provider(font_dir):
"""Test that custom FontProvider can be injected."""
fonts = {
'NotoSans-Regular': FontManager(font_dir / 'NotoSans-Regular.ttf'),
'Occulta': FontManager(font_dir / 'Occulta.ttf'),
}
provider = MockFontProvider(fonts, fonts['Occulta'])
manager = MultiFontManager(font_provider=provider)
# Should only have the fonts we provided
assert len(manager.fonts) == 2
assert 'NotoSans-Regular' in manager.fonts
assert 'Occulta' in manager.fonts
def test_missing_font_uses_fallback(font_dir):
"""Test that missing fonts gracefully fall back."""
fonts = {
'NotoSans-Regular': FontManager(font_dir / 'NotoSans-Regular.ttf'),
'Occulta': FontManager(font_dir / 'Occulta.ttf'),
}
provider = MockFontProvider(fonts, fonts['Occulta'])
manager = MultiFontManager(font_provider=provider)
# Arabic text should fall back to Occulta since NotoSansArabic is missing
font = manager.select_font_for_word("مرحبا", "ara")
assert font == fonts['Occulta']
def test_builtin_font_provider_loads_expected_fonts(font_dir):
"""Test BuiltinFontProvider loads all expected builtin fonts."""
provider = BuiltinFontProvider(font_dir)
available = provider.get_available_fonts()
assert 'NotoSans-Regular' in available
assert 'Occulta' in available
# Only Latin (NotoSans) and glyphless fallback (Occulta) are bundled.
# All other scripts (Arabic, Devanagari, CJK, etc.) are discovered
# from system fonts by SystemFontProvider to reduce package size.
assert len(available) == 2
def test_builtin_font_provider_get_font(font_dir):
"""Test BuiltinFontProvider.get_font returns correct fonts."""
provider = BuiltinFontProvider(font_dir)
font = provider.get_font('NotoSans-Regular')
assert font is not None
assert isinstance(font, FontManager)
missing = provider.get_font('NonExistent')
assert missing is None
def test_builtin_font_provider_get_fallback(font_dir):
"""Test BuiltinFontProvider.get_fallback_font returns Occulta font."""
provider = BuiltinFontProvider(font_dir)
fallback = provider.get_fallback_font()
assert fallback is not None
assert fallback == provider.get_font('Occulta')
def test_builtin_font_provider_missing_font_logs_warning(tmp_path, font_dir, caplog):
"""Test that missing expected fonts log a warning."""
# Create minimal font directory with only Occulta.ttf
(tmp_path / 'Occulta.ttf').write_bytes((font_dir / 'Occulta.ttf').read_bytes())
with caplog.at_level(logging.WARNING):
provider = BuiltinFontProvider(tmp_path)
# Should have logged warnings for missing fonts
assert 'NotoSans-Regular' in caplog.text
assert 'not found' in caplog.text
# But Occulta should be loaded
assert provider.get_fallback_font() is not None
def test_builtin_font_provider_missing_occulta_raises(tmp_path):
"""Test that missing Occulta.ttf raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="Required fallback font"):
BuiltinFontProvider(tmp_path)
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_multi_font_manager.py",
"license": "Mozilla Public License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_null_ocr_engine.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for NullOcrEngine (--ocr-engine none).
Tests verify that the Null OCR engine exists and functions correctly
for scenarios where users want PDF processing without OCR.
"""
from __future__ import annotations
from pathlib import Path
from unittest.mock import MagicMock
import pytest
class TestNullOcrEngineExists:
"""Test that NullOcrEngine plugin exists and is loadable."""
def test_null_ocr_module_importable(self):
"""null_ocr module should be importable."""
from ocrmypdf.builtin_plugins import null_ocr
assert null_ocr is not None
def test_null_ocr_engine_class_exists(self):
"""NullOcrEngine class should exist."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
assert NullOcrEngine is not None
class TestNullOcrEngineInterface:
"""Test NullOcrEngine implements OcrEngine interface."""
def test_version_returns_none(self):
"""NullOcrEngine.version() should return 'none'."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
assert NullOcrEngine.version() == "none"
def test_creator_tag(self):
"""NullOcrEngine.creator_tag() should indicate no OCR."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
tag = NullOcrEngine.creator_tag(MagicMock())
tag_lower = tag.lower()
assert "no ocr" in tag_lower or "null" in tag_lower or "none" in tag_lower
def test_languages_returns_empty_set(self):
"""NullOcrEngine.languages() should return empty set."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
langs = NullOcrEngine.languages(MagicMock())
assert langs == set()
def test_supports_generate_ocr_returns_true(self):
"""NullOcrEngine should support generate_ocr()."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
assert NullOcrEngine.supports_generate_ocr() is True
def test_get_orientation_returns_zero(self):
"""NullOcrEngine.get_orientation() should return angle=0."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
result = NullOcrEngine.get_orientation(Path("test.png"), MagicMock())
assert result.angle == 0
def test_get_deskew_returns_zero(self):
"""NullOcrEngine.get_deskew() should return 0.0."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
result = NullOcrEngine.get_deskew(Path("test.png"), MagicMock())
assert result == 0.0
class TestNullOcrEngineGenerateOcr:
"""Test NullOcrEngine.generate_ocr() output."""
@pytest.fixture
def sample_image(self, tmp_path):
"""Create a simple test image."""
from PIL import Image
img_path = tmp_path / "test.png"
img = Image.new('RGB', (100, 100), color='white')
img.save(img_path, dpi=(300, 300))
return img_path
def test_generate_ocr_returns_tuple(self, sample_image):
"""generate_ocr() should return (OcrElement, str) tuple."""
from ocrmypdf import OcrElement
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
result = NullOcrEngine.generate_ocr(sample_image, MagicMock(), 0)
assert isinstance(result, tuple)
assert len(result) == 2
assert isinstance(result[0], OcrElement)
assert isinstance(result[1], str)
def test_generate_ocr_returns_empty_text(self, sample_image):
"""generate_ocr() should return empty text string."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
_, text = NullOcrEngine.generate_ocr(sample_image, MagicMock(), 0)
assert text == ""
def test_generate_ocr_returns_page_element(self, sample_image):
"""generate_ocr() should return OcrElement with ocr_class PAGE."""
from ocrmypdf import OcrClass
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
ocr_tree, _ = NullOcrEngine.generate_ocr(sample_image, MagicMock(), 0)
assert ocr_tree.ocr_class == OcrClass.PAGE
def test_generate_ocr_page_has_correct_dimensions(self, sample_image):
"""generate_ocr() page element should have image dimensions."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
ocr_tree, _ = NullOcrEngine.generate_ocr(sample_image, MagicMock(), 0)
# Image is 100x100
assert ocr_tree.bbox.right == 100
assert ocr_tree.bbox.bottom == 100
class TestOcrEngineOption:
"""Test --ocr-engine CLI option."""
def test_ocr_engine_option_accepted(self):
"""CLI should accept --ocr-engine option."""
from ocrmypdf.cli import get_parser
parser = get_parser()
# Should not raise
args = parser.parse_args(['--ocr-engine', 'none', 'in.pdf', 'out.pdf'])
assert args.ocr_engine == 'none'
def test_ocr_engine_choices_include_none(self):
"""--ocr-engine should include 'none' as a choice."""
from ocrmypdf.cli import get_parser
parser = get_parser()
# Find the --ocr-engine action
for action in parser._actions:
if '--ocr-engine' in action.option_strings:
assert 'none' in action.choices
break
else:
pytest.fail("--ocr-engine option not found")
def test_ocr_engine_choices_include_auto(self):
"""--ocr-engine should include 'auto' as default."""
from ocrmypdf.cli import get_parser
parser = get_parser()
for action in parser._actions:
if '--ocr-engine' in action.option_strings:
assert 'auto' in action.choices
assert action.default == 'auto'
break
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_null_ocr_engine.py",
"license": "Mozilla Public License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
ocrmypdf/OCRmyPDF:tests/test_ocr_element.py | # SPDX-FileCopyrightText: 2025 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Unit tests for OcrElement dataclass and related classes."""
from __future__ import annotations
import pytest
from ocrmypdf.hocrtransform import (
Baseline,
BoundingBox,
FontInfo,
OcrClass,
OcrElement,
)
class TestBoundingBox:
"""Tests for BoundingBox dataclass."""
def test_basic_creation(self):
bbox = BoundingBox(left=10, top=20, right=100, bottom=50)
assert bbox.left == 10
assert bbox.top == 20
assert bbox.right == 100
assert bbox.bottom == 50
def test_width_height(self):
bbox = BoundingBox(left=10, top=20, right=110, bottom=70)
assert bbox.width == 100
assert bbox.height == 50
def test_zero_size_box(self):
bbox = BoundingBox(left=10, top=20, right=10, bottom=20)
assert bbox.width == 0
assert bbox.height == 0
def test_invalid_left_right(self):
with pytest.raises(ValueError, match="right.*left"):
BoundingBox(left=100, top=20, right=10, bottom=50)
def test_invalid_top_bottom(self):
with pytest.raises(ValueError, match="bottom.*top"):
BoundingBox(left=10, top=50, right=100, bottom=20)
class TestBaseline:
"""Tests for Baseline dataclass."""
def test_defaults(self):
baseline = Baseline()
assert baseline.slope == 0.0
assert baseline.intercept == 0.0
def test_with_values(self):
baseline = Baseline(slope=0.01, intercept=-5)
assert baseline.slope == 0.01
assert baseline.intercept == -5
class TestFontInfo:
"""Tests for FontInfo dataclass."""
def test_defaults(self):
font = FontInfo()
assert font.name is None
assert font.size is None
assert font.bold is False
assert font.italic is False
def test_with_values(self):
font = FontInfo(name="Arial", size=12.0, bold=True)
assert font.name == "Arial"
assert font.size == 12.0
assert font.bold is True
assert font.italic is False
class TestOcrElement:
"""Tests for OcrElement dataclass."""
def test_minimal_element(self):
elem = OcrElement(ocr_class=OcrClass.WORD, text="hello")
assert elem.ocr_class == "ocrx_word"
assert elem.text == "hello"
assert elem.bbox is None
assert elem.children == []
def test_element_with_bbox(self):
bbox = BoundingBox(left=0, top=0, right=100, bottom=50)
elem = OcrElement(ocr_class=OcrClass.LINE, bbox=bbox)
assert elem.bbox == bbox
assert elem.bbox.width == 100
def test_element_hierarchy(self):
word1 = OcrElement(ocr_class=OcrClass.WORD, text="Hello")
word2 = OcrElement(ocr_class=OcrClass.WORD, text="World")
line = OcrElement(ocr_class=OcrClass.LINE, children=[word1, word2])
paragraph = OcrElement(ocr_class=OcrClass.PARAGRAPH, children=[line])
page = OcrElement(ocr_class=OcrClass.PAGE, children=[paragraph])
assert len(page.children) == 1
assert len(page.children[0].children) == 1
assert len(page.children[0].children[0].children) == 2
def test_iter_by_class_single(self):
word = OcrElement(ocr_class=OcrClass.WORD, text="test")
line = OcrElement(ocr_class=OcrClass.LINE, children=[word])
page = OcrElement(ocr_class=OcrClass.PAGE, children=[line])
words = page.iter_by_class(OcrClass.WORD)
assert len(words) == 1
assert words[0].text == "test"
def test_iter_by_class_multiple(self):
words = [
OcrElement(ocr_class=OcrClass.WORD, text="one"),
OcrElement(ocr_class=OcrClass.WORD, text="two"),
OcrElement(ocr_class=OcrClass.WORD, text="three"),
]
line = OcrElement(ocr_class=OcrClass.LINE, children=words)
page = OcrElement(ocr_class=OcrClass.PAGE, children=[line])
result = page.iter_by_class(OcrClass.WORD)
assert len(result) == 3
assert [w.text for w in result] == ["one", "two", "three"]
def test_iter_by_class_multiple_types(self):
line = OcrElement(ocr_class=OcrClass.LINE)
header = OcrElement(ocr_class=OcrClass.HEADER)
caption = OcrElement(ocr_class=OcrClass.CAPTION)
page = OcrElement(ocr_class=OcrClass.PAGE, children=[line, header, caption])
result = page.iter_by_class(OcrClass.LINE, OcrClass.HEADER)
assert len(result) == 2
def test_find_by_class(self):
word = OcrElement(ocr_class=OcrClass.WORD, text="found")
line = OcrElement(ocr_class=OcrClass.LINE, children=[word])
page = OcrElement(ocr_class=OcrClass.PAGE, children=[line])
result = page.find_by_class(OcrClass.WORD)
assert result is not None
assert result.text == "found"
def test_find_by_class_not_found(self):
line = OcrElement(ocr_class=OcrClass.LINE)
page = OcrElement(ocr_class=OcrClass.PAGE, children=[line])
result = page.find_by_class(OcrClass.WORD)
assert result is None
def test_get_text_recursive_leaf(self):
word = OcrElement(ocr_class=OcrClass.WORD, text="hello")
assert word.get_text_recursive() == "hello"
def test_get_text_recursive_nested(self):
word1 = OcrElement(ocr_class=OcrClass.WORD, text="Hello")
word2 = OcrElement(ocr_class=OcrClass.WORD, text="World")
line = OcrElement(ocr_class=OcrClass.LINE, children=[word1, word2])
assert line.get_text_recursive() == "Hello World"
def test_words_property(self):
words = [
OcrElement(ocr_class=OcrClass.WORD, text="a"),
OcrElement(ocr_class=OcrClass.WORD, text="b"),
]
line = OcrElement(ocr_class=OcrClass.LINE, children=words)
page = OcrElement(ocr_class=OcrClass.PAGE, children=[line])
assert len(page.words) == 2
assert page.words[0].text == "a"
def test_lines_property(self):
line1 = OcrElement(ocr_class=OcrClass.LINE)
line2 = OcrElement(ocr_class=OcrClass.HEADER) # Also a line type
par = OcrElement(ocr_class=OcrClass.PARAGRAPH, children=[line1, line2])
page = OcrElement(ocr_class=OcrClass.PAGE, children=[par])
assert len(page.lines) == 2
def test_paragraphs_property(self):
par1 = OcrElement(ocr_class=OcrClass.PARAGRAPH)
par2 = OcrElement(ocr_class=OcrClass.PARAGRAPH)
page = OcrElement(ocr_class=OcrClass.PAGE, children=[par1, par2])
assert len(page.paragraphs) == 2
def test_direction_ltr(self):
elem = OcrElement(ocr_class=OcrClass.PARAGRAPH, direction="ltr")
assert elem.direction == "ltr"
def test_direction_rtl(self):
elem = OcrElement(ocr_class=OcrClass.PARAGRAPH, direction="rtl")
assert elem.direction == "rtl"
def test_language(self):
elem = OcrElement(ocr_class=OcrClass.PARAGRAPH, language="eng")
assert elem.language == "eng"
def test_baseline(self):
baseline = Baseline(slope=0.01, intercept=-3)
elem = OcrElement(ocr_class=OcrClass.LINE, baseline=baseline)
assert elem.baseline.slope == 0.01
assert elem.baseline.intercept == -3
def test_textangle(self):
elem = OcrElement(ocr_class=OcrClass.LINE, textangle=5.0)
assert elem.textangle == 5.0
def test_confidence(self):
elem = OcrElement(ocr_class=OcrClass.WORD, confidence=0.95)
assert elem.confidence == 0.95
def test_page_properties(self):
elem = OcrElement(
ocr_class=OcrClass.PAGE,
dpi=300.0,
page_number=0,
logical_page_number=1,
)
assert elem.dpi == 300.0
assert elem.page_number == 0
assert elem.logical_page_number == 1
class TestOcrClass:
"""Tests for OcrClass constants."""
def test_class_values(self):
assert OcrClass.PAGE == "ocr_page"
assert OcrClass.PARAGRAPH == "ocr_par"
assert OcrClass.LINE == "ocr_line"
assert OcrClass.WORD == "ocrx_word"
assert OcrClass.HEADER == "ocr_header"
assert OcrClass.CAPTION == "ocr_caption"
def test_line_types_frozenset(self):
assert OcrClass.LINE in OcrClass.LINE_TYPES
assert OcrClass.HEADER in OcrClass.LINE_TYPES
assert OcrClass.CAPTION in OcrClass.LINE_TYPES
assert OcrClass.WORD not in OcrClass.LINE_TYPES
| {
"repo_id": "ocrmypdf/OCRmyPDF",
"file_path": "tests/test_ocr_element.py",
"license": "Mozilla Public License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.