repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wandb/client | wandb/sdk/launch/runner/kubernetes.py | 1 | 18347 | import base64
import json
import time
from typing import Any, Dict, List, Optional
from kubernetes import client # type: ignore
from kubernetes.client.api.batch_v1_api import BatchV1Api # type: ignore
from kubernetes.client.api.core_v1_api import CoreV1Api # type: ignore
from kubernetes.client.models.v1_job import V1Job # type: ignore
from kubernetes.client.models.v1_secret import V1Secret # type: ignore
import wandb
from wandb.errors import LaunchError
from wandb.sdk.launch.builder.abstract import AbstractBuilder
from wandb.util import get_module, load_json_yaml_dict
from .abstract import AbstractRun, AbstractRunner, Status
from .._project_spec import get_entry_point_command, LaunchProject
from ..builder.build import get_env_vars_dict
from ..utils import (
get_kube_context_and_api_client,
PROJECT_DOCKER_ARGS,
PROJECT_SYNCHRONOUS,
)
TIMEOUT = 5
MAX_KUBERNETES_RETRIES = (
60 # default 10 second loop time on the agent, this is 10 minutes
)
class KubernetesSubmittedRun(AbstractRun):
def __init__(
self,
batch_api: "BatchV1Api",
core_api: "CoreV1Api",
name: str,
pod_names: List[str],
namespace: Optional[str] = "default",
secret: Optional["V1Secret"] = None,
) -> None:
self.batch_api = batch_api
self.core_api = core_api
self.name = name
self.namespace = namespace
self.job = self.batch_api.read_namespaced_job(
name=self.name, namespace=self.namespace
)
self._fail_count = 0
self.pod_names = pod_names
self.secret = secret
@property
def id(self) -> str:
return self.name
def get_job(self) -> "V1Job":
return self.batch_api.read_namespaced_job(
name=self.name, namespace=self.namespace
)
def wait(self) -> bool:
while True:
status = self.get_status()
wandb.termlog(f"Job {self.name} status: {status}")
if status.state != "running":
break
time.sleep(5)
return (
status.state == "finished"
) # todo: not sure if this (copied from aws runner) is the right approach? should we return false on failure
def get_status(self) -> Status:
job_response = self.batch_api.read_namespaced_job_status(
name=self.name, namespace=self.namespace
)
status = job_response.status
try:
self.core_api.read_namespaced_pod_log(
name=self.pod_names[0], namespace=self.namespace
)
except Exception as e:
if self._fail_count == 1:
wandb.termlog(
"Failed to get pod status for job: {}. Will wait up to 10 minutes for job to start.".format(
self.name
)
)
self._fail_count += 1
if self._fail_count > MAX_KUBERNETES_RETRIES:
raise LaunchError(
f"Failed to start job {self.name}, because of error {str(e)}"
)
# todo: we only handle the 1 pod case. see https://kubernetes.io/docs/concepts/workloads/controllers/job/#parallel-jobs for multipod handling
return_status = None
if status.succeeded == 1:
return_status = Status("finished")
elif status.failed is not None and status.failed >= 1:
return_status = Status("failed")
elif status.active == 1:
return Status("running")
elif status.conditions is not None and status.conditions[0].type == "Suspended":
return_status = Status("stopped")
else:
return_status = Status("unknown")
if (
return_status.state in ["stopped", "failed", "finished"]
and self.secret is not None
):
try:
self.core_api.delete_namespaced_secret(
self.secret.metadata.name, self.namespace
)
except Exception as e:
wandb.termerror(
f"Error deleting secret {self.secret.metadata.name}: {str(e)}"
)
return return_status
def suspend(self) -> None:
self.job.spec.suspend = True
self.batch_api.patch_namespaced_job(
name=self.name, namespace=self.namespace, body=self.job
)
timeout = TIMEOUT
job_response = self.batch_api.read_namespaced_job_status(
name=self.name, namespace=self.namespace
)
while job_response.status.conditions is None and timeout > 0:
time.sleep(1)
timeout -= 1
job_response = self.batch_api.read_namespaced_job_status(
name=self.name, namespace=self.namespace
)
if timeout == 0 or job_response.status.conditions[0].type != "Suspended":
raise LaunchError(
"Failed to suspend job {}. Check Kubernetes dashboard for more info.".format(
self.name
)
)
def cancel(self) -> None:
self.suspend()
self.batch_api.delete_namespaced_job(name=self.name, namespace=self.namespace)
class KubernetesRunner(AbstractRunner):
def populate_job_spec(
self, job_spec: Dict[str, Any], resource_args: Dict[str, Any]
) -> None:
if resource_args.get("backoff_limit"):
job_spec["backoffLimit"] = resource_args.get("backoff_limit")
if resource_args.get("completions"):
job_spec["completions"] = resource_args.get("completions")
if resource_args.get("parallelism"):
job_spec["parallelism"] = resource_args.get("parallelism")
if resource_args.get("suspend"):
job_spec["suspend"] = resource_args.get("suspend")
def populate_pod_spec(
self, pod_spec: Dict[str, Any], resource_args: Dict[str, Any]
) -> None:
pod_spec["restartPolicy"] = resource_args.get("restart_policy", "Never")
if resource_args.get("preemption_policy"):
pod_spec["preemptionPolicy"] = resource_args.get("preemption_policy")
if resource_args.get("node_name"):
pod_spec["nodeName"] = resource_args.get("node_name")
if resource_args.get("node_selectors"):
pod_spec["nodeSelectors"] = resource_args.get("node_selectors")
def populate_container_resources(
self, containers: List[Dict[str, Any]], resource_args: Dict[str, Any]
) -> None:
if resource_args.get("container_name"):
if len(containers) > 1:
raise LaunchError(
"Container name override not supported for multiple containers. Specify in yaml file supplied via job_spec."
)
containers[0]["name"] = resource_args["container_name"]
else:
for i, cont in enumerate(containers):
cont["name"] = cont.get("name", "launch" + str(i))
multi_container_override = len(containers) > 1
for cont in containers:
container_resources = cont.get("resources", {})
if resource_args.get("resource_requests"):
container_resources["requests"] = resource_args.get("resource_requests")
if resource_args.get("resource_limits"):
container_resources["limits"] = resource_args.get("resource_limits")
if container_resources:
multi_container_override &= (
cont.get("resources") != container_resources
) # if multiple containers and we changed something
cont["resources"] = container_resources
cont["security_context"] = {
"allowPrivilegeEscalation": False,
"capabilities": {"drop": ["ALL"]},
"seccompProfile": {"type": "RuntimeDefault"},
}
if multi_container_override:
wandb.termwarn(
"Container overrides (e.g. resource limits) were provided with multiple containers specified: overrides will be applied to all containers."
)
def wait_job_launch(
self, job_name: str, namespace: str, core_api: "CoreV1Api"
) -> List[str]:
pods = core_api.list_namespaced_pod(
label_selector=f"job-name={job_name}", namespace=namespace
)
timeout = TIMEOUT
while len(pods.items) == 0 and timeout > 0:
time.sleep(1)
timeout -= 1
pods = core_api.list_namespaced_pod(
label_selector=f"job-name={job_name}", namespace=namespace
)
if timeout == 0:
raise LaunchError(
"No pods found for job {}. Check dashboard to see if job was launched successfully.".format(
job_name
)
)
pod_names = [pi.metadata.name for pi in pods.items]
wandb.termlog(
"Job {job} created on pod(s) {pod_names}. See logs with e.g. `kubectl logs {first_pod}`.".format(
job=job_name, pod_names=", ".join(pod_names), first_pod=pod_names[0]
)
)
return pod_names
def run(
self,
launch_project: LaunchProject,
builder: AbstractBuilder,
registry_config: Dict[str, Any],
) -> Optional[AbstractRun]: # noqa: C901
kubernetes = get_module( # noqa: F811
"kubernetes", "KubernetesRunner requires kubernetes to be installed"
)
resource_args = launch_project.resource_args.get("kubernetes", {})
if not resource_args:
wandb.termlog(
"Note: no resource args specified. Add a Kubernetes yaml spec or other options in a json file with --resource-args <json>."
)
context, api_client = get_kube_context_and_api_client(kubernetes, resource_args)
batch_api = kubernetes.client.BatchV1Api(api_client)
core_api = kubernetes.client.CoreV1Api(api_client)
# allow users to specify template or entire spec
if resource_args.get("job_spec"):
job_dict = load_json_yaml_dict(resource_args["job_spec"])
else:
# begin constructing job sped
job_dict = {"apiVersion": "batch/v1", "kind": "Job"}
# extract job spec component parts for convenience
job_metadata = job_dict.get("metadata", {})
job_spec = job_dict.get("spec", {})
pod_template = job_spec.get("template", {})
pod_metadata = pod_template.get("metadata", {})
pod_spec = pod_template.get("spec", {})
containers = pod_spec.get("containers", [{}])
job_status = job_dict.get("status", {})
# begin pulling resource arg overrides. all of these are optional
# allow top-level namespace override, otherwise take namespace specified at the job level, or default in current context
default = (
context["context"].get("namespace", "default") if context else "default"
)
namespace = resource_args.get(
"namespace",
job_metadata.get("namespace", default),
)
# name precedence: resource args override > name in spec file > generated name
job_metadata["name"] = resource_args.get("job_name", job_metadata.get("name"))
if not job_metadata.get("name"):
job_metadata["generateName"] = "launch-"
if resource_args.get("job_labels"):
job_metadata["labels"] = resource_args.get("job_labels")
self.populate_job_spec(job_spec, resource_args)
self.populate_pod_spec(pod_spec, resource_args)
self.populate_container_resources(containers, resource_args)
# cmd
entry_point = launch_project.get_single_entry_point()
# env vars
env_vars = get_env_vars_dict(launch_project, self._api)
docker_args: Dict[str, Any] = self.backend_config[PROJECT_DOCKER_ARGS]
secret = None
if docker_args and list(docker_args) != ["docker_image"]:
wandb.termwarn(
"Docker args are not supported for Kubernetes. Not using docker args"
)
# only need to do this if user is providing image, on build, our image sets an entrypoint
entry_cmd = get_entry_point_command(entry_point, launch_project.override_args)
if launch_project.docker_image and entry_cmd:
# if user hardcodes cmd into their image, we don't need to run on top of that
for cont in containers:
cont["command"] = entry_cmd
if launch_project.docker_image:
if len(containers) > 1:
raise LaunchError(
"Multiple container configurations should be specified in a yaml file supplied via job_spec."
)
# dont specify run id if user provided image, could have multiple runs
env_vars.pop("WANDB_RUN_ID")
containers[0]["image"] = launch_project.docker_image
image_uri = launch_project.docker_image
# TODO: handle secret pulling image from registry
elif any(["image" in cont for cont in containers]):
# user specified image configurations via kubernetes yaml, could have multiple images
# dont specify run id if user provided image, could have multiple runs
env_vars.pop("WANDB_RUN_ID")
# TODO: handle secret pulling image from registries?
else:
if len(containers) > 1:
raise LaunchError(
"Launch only builds one container at a time. Multiple container configurations should be pre-built and specified in a yaml file supplied via job_spec."
)
given_reg = resource_args.get("registry", "")
repository: Optional[str] = (
given_reg if given_reg != "" else registry_config.get("url")
)
if repository is None:
# allow local registry usage for eg local clusters but throw a warning
wandb.termwarn(
"Warning: No Docker repository specified. Image will be hosted on local registry, which may not be accessible to your training cluster."
)
assert entry_point is not None
image_uri = builder.build_image(
launch_project, repository, entry_point, docker_args
)
# in the non instance case we need to make an imagePullSecret
# so the new job can pull the image
secret = maybe_create_imagepull_secret(
core_api, registry_config, launch_project.run_id, namespace
)
containers[0]["image"] = image_uri
# reassemble spec
given_env_vars = resource_args.get("env", {})
merged_env_vars = {**env_vars, **given_env_vars}
for cont in containers:
cont["env"] = [{"name": k, "value": v} for k, v in merged_env_vars.items()]
pod_spec["containers"] = containers
pod_template["spec"] = pod_spec
pod_template["metadata"] = pod_metadata
if secret is not None:
pod_spec["imagePullSecrets"] = [
{"name": f"regcred-{launch_project.run_id}"}
]
job_spec["template"] = pod_template
job_dict["spec"] = job_spec
job_dict["metadata"] = job_metadata
job_dict["status"] = job_status
if not self.ack_run_queue_item(launch_project):
return None
job_response = kubernetes.utils.create_from_yaml(
api_client, yaml_objects=[job_dict], namespace=namespace
)[0][
0
] # create_from_yaml returns a nested list of k8s objects
job_name = job_response.metadata.labels["job-name"]
pod_names = self.wait_job_launch(job_name, namespace, core_api)
submitted_job = KubernetesSubmittedRun(
batch_api, core_api, job_name, pod_names, namespace, secret
)
if self.backend_config[PROJECT_SYNCHRONOUS]:
submitted_job.wait()
return submitted_job
def maybe_create_imagepull_secret(
core_api: "CoreV1Api",
registry_config: Dict[str, Any],
run_id: str,
namespace: str,
) -> Optional["V1Secret"]:
secret = None
ecr_provider = registry_config.get("ecr-provider", "").lower()
if (
ecr_provider
and ecr_provider == "aws"
and registry_config.get("url") is not None
and registry_config.get("credentials") is not None
):
boto3 = get_module(
"boto3", "AWS ECR requires boto3, install with pip install wandb[launch]"
)
ecr_client = boto3.client("ecr")
try:
encoded_token = ecr_client.get_authorization_token()["authorizationData"][
0
]["authorizationToken"]
decoded_token = base64.b64decode(encoded_token.encode()).decode()
uname, token = decoded_token.split(":")
except Exception as e:
raise LaunchError(f"Could not get authorization token for ECR, error: {e}")
creds_info = {
"auths": {
registry_config.get("url"): {
"username": uname,
"password": token,
# need an email but the use is deprecated
"email": "deprecated@wandblaunch.com",
"auth": encoded_token,
}
}
}
secret_data = {
".dockerconfigjson": base64.b64encode(
json.dumps(creds_info).encode()
).decode()
}
secret = client.V1Secret(
data=secret_data,
metadata=client.V1ObjectMeta(name=f"regcred-{run_id}", namespace=namespace),
kind="Secret",
type="kubernetes.io/dockerconfigjson",
)
try:
core_api.create_namespaced_secret(namespace, secret)
except Exception as e:
raise LaunchError(f"Exception when creating Kubernetes secret: {str(e)}\n")
# TODO: support other ecr providers
elif ecr_provider and ecr_provider != "aws":
raise LaunchError(f"Registry provider not supported: {ecr_provider}")
return secret
| mit | c0c78750dbf7ada63a54994e9d628d65 | 39.771111 | 171 | 0.583202 | 4.177368 | false | false | false | false |
wandb/client | wandb/sdk/internal/sample.py | 1 | 2444 | """
sample.
"""
import math
class UniformSampleAccumulator:
def __init__(self, min_samples=None):
self._samples = min_samples or 64
# force power of 2 samples
self._samples = 2 ** int(math.ceil(math.log(self._samples, 2)))
# target oversample by factor of 2
self._samples2 = self._samples * 2
# max size of each buffer
self._max = self._samples2 // 2
self._shift = 0
self._mask = (1 << self._shift) - 1
self._buckets = int(math.log(self._samples2, 2))
self._buckets_bits = int(math.log(self._buckets, 2))
self._buckets_mask = (1 << self._buckets_bits + 1) - 1
self._buckets_index = 0
self._bucket = []
self._index = [0] * self._buckets
self._count = 0
self._log2 = [0]
# pre-allocate buckets
for _ in range(self._buckets):
self._bucket.append([0] * self._max)
# compute integer log2
self._log2 += [int(math.log(i, 2)) for i in range(1, 2**self._buckets + 1)]
def _show(self):
print("=" * 20)
for b in range(self._buckets):
b = (b + self._buckets_index) % self._buckets
vals = [self._bucket[b][i] for i in range(self._index[b])]
print(f"{b}: {vals}")
def add(self, val):
self._count += 1
cnt = self._count
if cnt & self._mask:
return
b = cnt >> self._shift
b = self._log2[b] # b = int(math.log(b, 2))
if b >= self._buckets:
self._index[self._buckets_index] = 0
self._buckets_index = (self._buckets_index + 1) % self._buckets
self._shift += 1
self._mask = (self._mask << 1) | 1
b += self._buckets - 1
b = (b + self._buckets_index) % self._buckets
self._bucket[b][self._index[b]] = val
self._index[b] += 1
def get(self):
full = []
sampled = []
# self._show()
for b in range(self._buckets):
max_num = 2**b
b = (b + self._buckets_index) % self._buckets
modb = self._index[b] // max_num
for i in range(self._index[b]):
if not modb or i % modb == 0:
sampled.append(self._bucket[b][i])
full.append(self._bucket[b][i])
if len(sampled) < self._samples:
return tuple(full)
return tuple(sampled)
| mit | 7fd4f004a836f6a0e21321a8f552a0e3 | 32.944444 | 83 | 0.498363 | 3.578331 | false | false | false | false |
gae-init/gae-init-docs | main/config.py | 1 | 6466 | # coding: utf-8
import os
PRODUCTION = os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Eng')
DEBUG = DEVELOPMENT = not PRODUCTION
try:
# This part is surrounded in try/except because the config.py file is
# also used in the run.py script which is used to compile/minify the client
# side files (*.less, *.coffee, *.js) and is not aware of the GAE
from google.appengine.api import app_identity
APPLICATION_ID = app_identity.get_application_id()
except (ImportError, AttributeError):
APPLICATION_ID = 'Testing'
else:
from datetime import datetime
CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID') or APPLICATION_ID
CURRENT_VERSION_NAME = CURRENT_VERSION_ID.split('.')[0]
if DEVELOPMENT:
import calendar
CURRENT_VERSION_TIMESTAMP = calendar.timegm(datetime.utcnow().timetuple())
else:
CURRENT_VERSION_TIMESTAMP = long(CURRENT_VERSION_ID.split('.')[1]) >> 28
CURRENT_VERSION_DATE = datetime.utcfromtimestamp(CURRENT_VERSION_TIMESTAMP)
USER_AGENT = '%s/%s' % (APPLICATION_ID, CURRENT_VERSION_ID)
import model
try:
CONFIG_DB = model.Config.get_master_db()
SECRET_KEY = CONFIG_DB.flask_secret_key.encode('ascii')
RECAPTCHA_PUBLIC_KEY = CONFIG_DB.recaptcha_public_key
RECAPTCHA_PRIVATE_KEY = CONFIG_DB.recaptcha_private_key
TRUSTED_HOSTS = CONFIG_DB.trusted_hosts
except AssertionError:
CONFIG_DB = model.Config()
DEFAULT_DB_LIMIT = 64
RECAPTCHA_LIMIT = 8
SIGNIN_RETRY_LIMIT = 4
TAG_SEPARATOR = ' '
################################################################################
# SITE
################################################################################
REQUIREMENT = [
('python', 'Python 2.7', 'Required'),
('gcloud', 'Google Cloud SDK', 'Required'),
('gae', 'Google App Engine', 'Required'),
('nodejs', 'Node.js', 'Required'),
('gulp', 'Gulp', 'Required'),
('pip', 'pip', 'Required'),
('virtualenv', 'virtualenv', 'Required'),
('git', 'Git', 'Required'),
]
QUICKSTART = [
('before', 'Before you begin'),
('download', 'Download the gae-init app'),
('test', 'Test the app'),
('make', 'Make a change'),
('deploy', 'Deploy your app'),
('congratulations', 'Congratulations!'),
('clean', 'Clean up'),
]
HOWTO = [
('start', 'Getting Started'),
('add_styles', 'Add Custom Styles'),
('add_scripts', 'Add Custom Scripts'),
('add_frontend', 'Add Frontend Library'),
('add_python', 'Add Python Library'),
('add_config', 'Add New Config'),
('letsencrypt', u'Let’s Encrypt Setup'),
('deploy', 'Deploy'),
]
REFERENCE = [
('util', 'Utilities', 'util.py'),
('task', 'Tasks', 'task.py'),
('auth', 'User related', 'auth.py'),
('decorator', 'Decorators', 'auth.py'),
('config', 'Config', 'config.py'),
('utils', 'Utilities (HTML)', 'utils.html'),
('forms', 'Forms (HTML)', 'forms.html'),
]
REFERENCE_DEF = {
'util': [
('param', '(name, cast=None)'),
('get_next_url', "(next_url='')"),
('get_dbs', '(query, order=None, limit=None, cursor=None, prev_cursor=False, keys_only=None, **filters)'),
('get_keys', '(*args, **kwargs)'),
('jsonpify', '(*args, **kwargs)'),
('is_iterable', '(value)'),
('check_form_fields', '(*fields)'),
('generate_next_url', '(next_cursor, base_url=None)'),
('uuid', '()'),
('slugify', '(text)'),
('is_valid_username', '(username)'),
('create_name_from_email', '(email)'),
('password_hash', '(user_db, password)'),
('update_query_argument', '(name, value=None, ignore=[])'),
('parse_tags', '(tags, separator=None)'),
('strip_filter', '()'),
('email_filter', '()'),
('sort_filter', '()'),
],
'task': [
('send_mail_notification', '(subject, body, **kwargs)'),
('new_user_notification', '(user_db)'),
('verify_email_notification', '(user_db)'),
('reset_password_notification', '(user_db)'),
('email_conflict_notification', '(email)'),
],
'auth': [
('current_user_id', '()'),
('current_user_key', '()'),
('current_user_db', '()'),
('is_logged_in', '()'),
],
'decorator': [
('login_required', ''),
('admin_required', ''),
('permission_required', '(permission=None, methods=None)'),
('cron_required', ''),
],
'config': [
('config_db', ''),
('secret_key', ''),
('current_version_id', ''),
('current_version_name', ''),
('current_version_timestamp', ''),
('current_version_date', ''),
('application_id', ''),
('development', ''),
('production', ''),
('debug', ''),
('default_db_limit', ''),
('signin_retry_limit', ''),
('tag_separator', ''),
],
'utils': [
('order_by_link', "(property, title, ignore=['cursor'])"),
('filter_by_link', "(property, value, icon=None, ignore=['cursor'])"),
('next_link', "(next_url, caption='Next Page')"),
('prefetch_link', '(url)'),
('auth_icon', '(auth_id)'),
('auth_icons', '(auth_ids)'),
('html_element', '(name, content)'),
],
'forms': [
('field_errors', '(field)'),
('field_description', '(field)'),
('input_field', "(field, type='text')"),
('text_field', '(field)'),
('password_field', '(field)'),
('number_field', '(field)'),
('date_field', '(field)'),
('email_field', '(field)'),
('select_field', '(field)'),
('hidden_field', '(field)'),
('textarea_field', '(field, rows=4)'),
('checkbox_field', '(field)'),
('multiple_checkbox_field', '(field)'),
('oauth_fields', '(name, fields, checkmark, help)'),
],
}
TUTORIAL = [
('introduction', 'Introduction'),
('code', 'Get the code'),
('run', 'Run the application'),
('model', 'Add the new model'),
('create', 'Add contacts'),
('list', 'List contacts'),
('view', 'View contacts'),
('update', 'Update contacts'),
# ('delete', 'Deleting contacts'),
# ('theme', 'Change theme'),
('deploy', 'Deploy'),
]
CONVENTION = [
('global', 'Global', ''),
('python', 'Python', ''),
('html', 'HTML / Jinja2', ''),
('less', 'Less / CSS', ''),
('coffee', 'CoffeeScript', ''),
('markdown', 'Markdown', 'for gae-init documentation'),
]
| mit | 663be227cab8a5be7212c3587ec3cbec | 31.979592 | 114 | 0.530322 | 3.643743 | false | true | false | false |
datosgobar/pydatajson | pydatajson/ckan_reader.py | 1 | 15331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Extensión de pydatajson para leer catálogos de metadatos a través de la API
de CKAN v3.
"""
from __future__ import unicode_literals, print_function,\
with_statement, absolute_import
import os.path
import logging
import json
import time
from six.moves.urllib_parse import urljoin
from six import iteritems
from requests.exceptions import RequestException
from ckanapi import RemoteCKAN
from ckanapi.errors import CKANAPIError
from .helpers import clean_str, title_to_name
from .custom_exceptions import NonParseableCatalog
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger('pydatajson')
ABSOLUTE_PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(ABSOLUTE_PROJECT_DIR, "schemas",
"accrualPeriodicity.json")) as frequencies:
RAW_FREQUENCIES = json.load(frequencies)
FREQUENCIES = {row["description"]: row["id"] for row in RAW_FREQUENCIES}
with open(os.path.join(ABSOLUTE_PROJECT_DIR, "schemas",
"superThemeTaxonomy.json")) as super_themes:
RAW_SUPER_THEMES = json.load(super_themes)
SUPER_THEMES = {row["label"]: row["id"] for row in RAW_SUPER_THEMES}
def read_ckan_catalog(portal_url):
"""Convierte los metadatos de un portal disponibilizados por la Action API
v3 de CKAN al estándar data.json.
Args:
portal_url (str): URL de un portal de datos CKAN que soporte la API v3.
Returns:
dict: Representación interna de un catálogo para uso en las funciones
de esta librería.
"""
portal = RemoteCKAN(portal_url)
try:
status = portal.call_action(
'status_show', requests_kwargs={"verify": False})
packages_list = portal.call_action(
'package_list', requests_kwargs={"verify": False})
groups_list = portal.call_action(
'group_list', requests_kwargs={"verify": False})
# itera leyendo todos los datasets del portal
packages = []
num_packages = len(packages_list)
for index, pkg in enumerate(packages_list):
# progreso (necesario cuando son muchos)
msg = "Leyendo dataset {} de {}".format(index + 1, num_packages)
logger.info(msg)
# agrega un nuevo dataset a la lista
packages.append(portal.call_action(
'package_show', {'id': pkg},
requests_kwargs={"verify": False}
))
# tiempo de espera padra evitar baneos
time.sleep(0.2)
# itera leyendo todos los temas del portal
groups = [portal.call_action(
'group_show', {'id': grp},
requests_kwargs={"verify": False})
for grp in groups_list]
catalog = map_status_to_catalog(status)
catalog["dataset"] = map_packages_to_datasets(
packages, portal_url)
catalog["themeTaxonomy"] = map_groups_to_themes(groups)
except (CKANAPIError, RequestException) as e:
logger.exception(
'Error al procesar el portal %s', portal_url, exc_info=True)
raise NonParseableCatalog(portal_url, e)
return catalog
def map_status_to_catalog(status):
"""Convierte el resultado de action.status_show() en metadata a nivel de
catálogo."""
catalog = dict()
catalog_mapping = {
"site_title": "title",
"site_description": "description"
}
for status_key, catalog_key in iteritems(catalog_mapping):
try:
catalog[catalog_key] = status[status_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint de status. No se puede
completar catalog['%s'].""", status_key, catalog_key)
publisher_mapping = {
"site_title": "name",
"error_emails_to": "mbox"
}
if any([k in status for k in publisher_mapping.keys()]):
catalog["publisher"] = dict()
for status_key, publisher_key in iteritems(publisher_mapping):
try:
catalog['publisher'][publisher_key] = status[status_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint de status. No se puede
completar catalog['publisher'['%s'].""",
status_key, publisher_key)
else:
logger.info("""
No hay ninguna información sobre catalog['publisher'] en el endpoint
de 'status'.""")
catalog['superThemeTaxonomy'] = (
'http://datos.gob.ar/superThemeTaxonomy.json')
return catalog
def map_packages_to_datasets(packages, portal_url):
"""Mapea una lista de 'packages' de CKAN a 'datasets' de data.json."""
return [map_package_to_dataset(pkg, portal_url)
for pkg in packages]
def map_package_to_dataset(package, portal_url):
"""Mapea un diccionario con metadatos de cierto 'package' de CKAN a un
diccionario con metadatos de un 'dataset' según el estándar data.json."""
dataset = dict()
resources = package["resources"]
groups = package["groups"]
tags = package["tags"]
dataset_mapping = {
'title': 'title',
'notes': 'description',
'metadata_created': 'issued',
'metadata_modified': 'modified',
'license_title': 'license',
'id': 'identifier',
'url': 'landingPage'
}
for package_key, dataset_key in iteritems(dataset_mapping):
try:
dataset[dataset_key] = package[package_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint 'package_show' para el
package '%s'. No se puede completar dataset['%s'].""",
package_key, package['name'], dataset_key)
publisher_mapping = {
'author': 'name',
'author_email': 'mbox'
}
if any([k in package for k in publisher_mapping.keys()]):
dataset["publisher"] = dict()
for package_key, publisher_key in iteritems(publisher_mapping):
try:
dataset['publisher'][publisher_key] = package[package_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint 'package_show' para el
package '%s'. No se puede completar
dataset['publisher']['%s'].""",
package_key, package['name'], publisher_key)
contact_point_mapping = {
'maintainer': 'fn',
'maintainer_email': 'hasEmail'
}
if any([k in package for k in contact_point_mapping.keys()]):
dataset["contactPoint"] = dict()
for package_key, contact_key in iteritems(contact_point_mapping):
try:
dataset['contactPoint'][contact_key] = package[package_key]
except BaseException:
logger.exception("""
La clave '%s' no está en el endpoint 'package_show' para el
package '%s'. No se puede completar
dataset['contactPoint']['%s'].""",
package_key, package['name'], contact_key)
# Si existen campos extras en la información del package, busco las claves
# "Frecuencia de actualización" y "Temática global" para completar los
# campos "accrualPeriodicity" y "superTheme" del dataset, respectivamente.
if "extras" in package:
add_accrualPeriodicity(dataset, package)
add_superTheme(dataset, package)
add_temporal(dataset, package)
dataset["distribution"] = map_resources_to_distributions(resources,
portal_url)
dataset["theme"] = [grp['name'] for grp in groups]
dataset['keyword'] = [tag['name'] for tag in tags]
return dataset
def add_temporal(dataset, package):
# "Cobertura temporal" => "temporal"
temporal = [
extra["value"] for extra in package["extras"] if
title_to_name(extra["key"]) == title_to_name("Cobertura temporal")
]
if len(temporal) > 1:
logger.info("""
Se encontro mas de un valor de cobertura temporal en 'extras' para el
'package' '%s'. No se puede completar dataset['temporal'].\n %s""",
package['name'], temporal)
elif len(temporal) == 1:
try:
dataset["temporal"] = temporal[0]
except KeyError:
logger.exception("""
Se encontró '%s' como cobertura temporal, pero no es mapeable a un
'temporal' conocido. La clave no se pudo completar.""",
temporal[0])
# Busco claves que son casi "Cobertura temporal" para lanzar
# advertencias si las hay.
almost_temporal = [
extra for extra in package["extras"] if
clean_str(extra["key"]) == "cobertura temporal" and
extra["key"] != "Cobertura temporal"]
if almost_temporal:
logger.warn("""
Se encontraron claves con nombres similares pero no idénticos a
"Cobertura temporal" en 'extras' para el 'package' '%s'.
Por favor, considere corregirlas:
\n%s""", package['name'], almost_temporal)
def add_superTheme(dataset, package):
# "Temática global" => "superTheme"
super_theme = [
extra["value"] for extra in package["extras"] if
title_to_name(extra["key"]) == title_to_name("Temática global")
]
if len(super_theme) == 0:
logger.info("""
No se encontraron valores de temática global en 'extras' para el
'package' '%s'. No se puede completar dataset['superTheme'].""",
package['name'])
elif len(super_theme) > 1:
logger.info("""
Se encontro mas de un valor de temática global en 'extras' para el
'package' '%s'. No se puede completar dataset['superTheme'].\n %s""",
package['name'], super_theme)
else:
try:
dataset["superTheme"] = [SUPER_THEMES[super_theme[0]]]
except KeyError:
logger.exception("""
Se encontró '%s' como temática global, pero no es mapeable a un
'superTheme' conocido. La clave no se pudo completar.""",
super_theme[0])
# Busco claves que son casi "Temática global" para lanzar
# advertencias si las hay.
almost_super_theme = [
extra for extra in package["extras"] if
clean_str(extra["key"]) == "tematica global" and
extra["key"] != "Temática global"]
if almost_super_theme:
logger.warn("""
Se encontraron claves con nombres similares pero no idénticos a
"Temática global" en 'extras' para el 'package' '%s'. Por favor,
considere corregirlas: \n%s""",
package['name'], almost_super_theme)
def add_accrualPeriodicity(dataset, package):
# "Frecuencia de actualización" => "accrualPeriodicity"
accrual = [
extra["value"] for extra in package["extras"] if
title_to_name(extra["key"]) == title_to_name(
"Frecuencia de actualización")
]
if len(accrual) == 0:
logger.info("""
No se encontraron valores de frecuencia de actualización en 'extras'
para el 'package' '%s'. No se puede completar
dataset['accrualPeriodicity'].""", package['name'])
elif len(accrual) > 1:
logger.info("""
Se encontro mas de un valor de frecuencia de actualización en 'extras'
para el 'package' '%s'. No se puede completar
dataset['accrualPeriodicity'].\n %s""", package['name'], accrual)
else:
try:
dataset["accrualPeriodicity"] = FREQUENCIES[accrual[0]]
except KeyError:
logger.exception("""
Se encontró '%s' como frecuencia de actualización, pero no es
mapeable a una 'accrualPeriodicity' conocida. La clave no se
pudo completar.""", accrual[0])
# Busco claves que son casi "Frecuencia de actualización" para lanzar
# advertencias si las hay.
almost_accrual = [
extra for extra in package["extras"] if
clean_str(extra["key"]) == "frecuencia de actualizacion" and
extra["key"] != "Frecuencia de actualización"]
if almost_accrual:
logger.warn("""
Se encontraron claves con nombres similares pero no idénticos a
"Frecuencia de actualización" en 'extras' para el 'package' '%s'.
Por favor, considere corregirlas:\n%s""",
package['name'], almost_accrual)
def map_resources_to_distributions(resources, portal_url):
"""Mapea una lista de 'resources' CKAN a 'distributions' de data.json."""
return [map_resource_to_distribution(res, portal_url) for res in resources]
def map_resource_to_distribution(resource, portal_url):
"""Mapea un diccionario con metadatos de cierto 'resource' CKAN a dicts
con metadatos de una 'distribution' según el estándar data.json."""
distribution = dict()
distribution_mapping = {
'url': 'downloadURL',
'name': 'title',
'created': 'issued',
'description': 'description',
'format': 'format',
'last_modified': 'modified',
'mimetype': 'mediaType',
'size': 'byteSize',
'id': 'identifier' # No es parte del estandar de PAD pero es relevante
}
for resource_key, distribution_key in iteritems(distribution_mapping):
try:
distribution[distribution_key] = resource[resource_key]
except BaseException:
logger.exception("""
La clave '%s' no está en la metadata del 'resource' '%s'. No
se puede completar distribution['%s'].""",
resource_key, resource['name'], distribution_key)
if 'attributesDescription' in resource:
try:
distribution['field'] = json.loads(
resource['attributesDescription'])
except BaseException:
logger.exception(
"Error parseando los fields del resource '%s'",
resource['name'])
url_path = ['dataset', resource['package_id'], 'resource', resource['id']]
distribution["accessURL"] = urljoin(portal_url, "/".join(url_path))
return distribution
def map_groups_to_themes(groups):
"""Mapea una lista de 'groups' de CKAN a 'themes' de data.json."""
return [map_group_to_theme(grp) for grp in groups]
def map_group_to_theme(group):
"""Mapea un diccionario con metadatos de cierto 'group' de CKAN a un
diccionario con metadatos de un 'theme' según el estándar data.json."""
theme = dict()
theme_mapping = {
'name': 'id',
'title': 'label',
'description': 'description'
}
for group_key, theme_key in iteritems(theme_mapping):
try:
theme[theme_key] = group[group_key]
except BaseException:
logger.exception("""
La clave '%s' no está en la metadata del 'group' '%s'. No
se puede completar theme['%s'].""",
group_key, theme['name'], theme_key)
return theme
| mit | 0958a13db2a2804214a783918a997000 | 35.828916 | 79 | 0.59932 | 3.736006 | false | false | false | false |
datosgobar/pydatajson | pydatajson/validators/url_validator.py | 1 | 1314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import requests
from requests import RequestException, Timeout
from pydatajson.constants import EXCEPTION_STATUS_CODES, \
INVALID_STATUS_CODES_REGEX
from pydatajson.validators.simple_validator import SimpleValidator
class UrlValidator(SimpleValidator):
def __init__(self, catalog, verify_ssl, url_check_timeout, threads_count):
super(UrlValidator, self).__init__(catalog)
self.verify_ssl = verify_ssl
self.url_check_timeout = url_check_timeout
self.threads_count = threads_count
def validate(self):
raise NotImplementedError
def is_working_url(self, url):
try:
response = requests.head(url,
timeout=self.url_check_timeout,
verify=self.verify_ssl)
matches = []
if response.status_code not in EXCEPTION_STATUS_CODES:
matches = \
[re.match(pattern, str(response.status_code)) is not None
for pattern in INVALID_STATUS_CODES_REGEX]
return True not in matches, response.status_code
except Timeout:
return False, 408
except (RequestException, Exception):
return False, None
| mit | 2f0bbb0bec0c1bdaadc27fc11aeb39c7 | 33.578947 | 78 | 0.612633 | 4.484642 | false | false | false | false |
mozman/ezdxf | src/ezdxf/entities/spline.py | 1 | 23390 | # Copyright (c) 2019-2022 Manfred Moitzi
# License: MIT License
from __future__ import annotations
from typing import (
TYPE_CHECKING,
List,
Iterable,
Sequence,
cast,
Iterator,
Optional,
)
import array
import copy
from itertools import chain
from ezdxf.audit import AuditError
from ezdxf.lldxf import validator
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
XType,
RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf.lldxf.const import (
SUBCLASS_MARKER,
DXF2000,
DXFValueError,
DXFStructureError,
)
from ezdxf.lldxf.packedtags import VertexArray, Tags
from ezdxf.math import (
Vec3,
UVec,
Matrix44,
ConstructionEllipse,
Z_AXIS,
NULLVEC,
OCS,
uniform_knot_vector,
open_uniform_knot_vector,
BSpline,
required_knot_values,
required_fit_points,
required_control_points,
fit_points_to_cad_cv,
)
from .dxfentity import base_class, SubclassProcessor, DXFEntity
from .dxfgfx import DXFGraphic, acdb_entity
from .factory import register_entity
if TYPE_CHECKING:
from ezdxf.entities import DXFNamespace, Ellipse
from ezdxf.lldxf.tagwriter import AbstractTagWriter
from ezdxf.audit import Auditor
__all__ = ["Spline"]
# From the Autodesk ObjectARX reference:
# Objects of the AcDbSpline class use an embedded gelib object to maintain the
# actual spline information.
#
# Book recommendations:
#
# - "Curves and Surfaces for CAGD" by Gerald Farin
# - "Mathematical Elements for Computer Graphics"
# by David Rogers and Alan Adams
# - "An Introduction To Splines For Use In Computer Graphics & Geometric Modeling"
# by Richard H. Bartels, John C. Beatty, and Brian A Barsky
#
# http://help.autodesk.com/view/OARX/2018/ENU/?guid=OREF-AcDbSpline__setFitData_AcGePoint3dArray__AcGeVector3d__AcGeVector3d__AcGe__KnotParameterization_int_double
# Construction of a AcDbSpline entity from fit points:
# degree has no effect. A spline with degree=3 is always constructed when
# interpolating a series of fit points.
acdb_spline = DefSubclass(
"AcDbSpline",
{
# Spline flags:
# 1 = Closed spline
# 2 = Periodic spline
# 4 = Rational spline
# 8 = Planar
# 16 = Linear (planar bit is also set)
"flags": DXFAttr(70, default=0),
# degree: The degree can't be higher than 11 according to the Autodesk
# ObjectARX reference.
"degree": DXFAttr(71, default=3, validator=validator.is_positive),
"n_knots": DXFAttr(72, xtype=XType.callback, getter="knot_count"),
"n_control_points": DXFAttr(
73, xtype=XType.callback, getter="control_point_count"
),
"n_fit_points": DXFAttr(
74, xtype=XType.callback, getter="fit_point_count"
),
"knot_tolerance": DXFAttr(42, default=1e-10, optional=True),
"control_point_tolerance": DXFAttr(43, default=1e-10, optional=True),
"fit_tolerance": DXFAttr(44, default=1e-10, optional=True),
# Start- and end tangents should be normalized, but CAD applications do not
# crash if they are not normalized.
"start_tangent": DXFAttr(
12,
xtype=XType.point3d,
optional=True,
validator=validator.is_not_null_vector,
),
"end_tangent": DXFAttr(
13,
xtype=XType.point3d,
optional=True,
validator=validator.is_not_null_vector,
),
# Extrusion is the normal vector (omitted if the spline is non-planar)
"extrusion": DXFAttr(
210,
xtype=XType.point3d,
default=Z_AXIS,
optional=True,
validator=validator.is_not_null_vector,
fixer=RETURN_DEFAULT,
),
# 10: Control points (in WCS); one entry per control point
# 11: Fit points (in WCS); one entry per fit point
# 40: Knot value (one entry per knot)
# 41: Weight (if not 1); with multiple group pairs, they are present if all
# are not 1
},
)
acdb_spline_group_codes = group_code_mapping(acdb_spline)
class SplineData:
def __init__(self, spline: Spline):
self.fit_points = spline.fit_points
self.control_points = spline.control_points
self.knots = spline.knots
self.weights = spline.weights
REMOVE_CODES = {10, 11, 40, 41, 72, 73, 74}
Vertices = List[Sequence[float]]
@register_entity
class Spline(DXFGraphic):
"""DXF SPLINE entity"""
DXFTYPE = "SPLINE"
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_spline)
MIN_DXF_VERSION_FOR_EXPORT = DXF2000
CLOSED = 1 # closed b-spline
PERIODIC = 2 # uniform b-spline
RATIONAL = 4 # rational b-spline
PLANAR = 8 # all spline points in a plane, don't read or set this bit, just ignore like AutoCAD
LINEAR = 16 # always set with PLANAR, don't read or set this bit, just ignore like AutoCAD
def __init__(self):
super().__init__()
self.fit_points = VertexArray()
self.control_points = VertexArray()
self.knots = []
self.weights = []
def _copy_data(self, entity: DXFEntity) -> None:
"""Copy data: control_points, fit_points, weights, knot_values."""
assert isinstance(entity, Spline)
entity._control_points = copy.deepcopy(self._control_points)
entity._fit_points = copy.deepcopy(self._fit_points)
entity._knots = copy.deepcopy(self._knots)
entity._weights = copy.deepcopy(self._weights)
def load_dxf_attribs(
self, processor: Optional[SubclassProcessor] = None
) -> DXFNamespace:
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.subclass_by_index(2)
if tags:
tags = Tags(self.load_spline_data(tags))
processor.fast_load_dxfattribs(
dxf, acdb_spline_group_codes, subclass=tags, recover=True
)
else:
raise DXFStructureError(
f"missing 'AcDbSpline' subclass in SPLINE(#{dxf.handle})"
)
return dxf
def load_spline_data(self, tags) -> Iterator:
"""Load and set spline data (fit points, control points, weights,
knots) and remove invalid start- and end tangents.
Yields the remaining unprocessed tags.
"""
control_points = []
fit_points = []
knots = []
weights = []
for tag in tags:
code, value = tag
if code == 10:
control_points.append(value)
elif code == 11:
fit_points.append(value)
elif code == 40:
knots.append(value)
elif code == 41:
weights.append(value)
elif code in (12, 13) and NULLVEC.isclose(value):
# Tangent values equal to (0, 0, 0) are invalid and ignored at
# the loading stage!
pass
else:
yield tag
self.control_points = control_points
self.fit_points = fit_points
self.knots = knots
self.weights = weights
def export_entity(self, tagwriter: AbstractTagWriter) -> None:
"""Export entity specific data as DXF tags."""
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_spline.name)
self.dxf.export_dxf_attribs(tagwriter, ["extrusion", "flags", "degree"])
tagwriter.write_tag2(72, self.knot_count())
tagwriter.write_tag2(73, self.control_point_count())
tagwriter.write_tag2(74, self.fit_point_count())
self.dxf.export_dxf_attribs(
tagwriter,
[
"knot_tolerance",
"control_point_tolerance",
"fit_tolerance",
"start_tangent",
"end_tangent",
],
)
self.export_spline_data(tagwriter)
def export_spline_data(self, tagwriter: AbstractTagWriter):
for value in self._knots:
tagwriter.write_tag2(40, value)
if len(self._weights):
for value in self._weights:
tagwriter.write_tag2(41, value)
self._control_points.export_dxf(tagwriter, code=10) # type: ignore
self._fit_points.export_dxf(tagwriter, code=11) # type: ignore
@property
def closed(self) -> bool:
"""``True`` if spline is closed. A closed spline has a connection from
the last control point to the first control point. (read/write)
"""
return self.get_flag_state(self.CLOSED, name="flags")
@closed.setter
def closed(self, status: bool) -> None:
self.set_flag_state(self.CLOSED, state=status, name="flags")
@property
def knots(self) -> list[float]:
"""Knot values as :code:`array.array('d')`."""
return self._knots
@knots.setter
def knots(self, values: Iterable[float]) -> None:
self._knots: list[float] = cast(List[float], array.array("d", values))
# DXF callback attribute Spline.dxf.n_knots
def knot_count(self) -> int:
"""Count of knot values."""
return len(self._knots)
@property
def weights(self) -> list[float]:
"""Control point weights as :code:`array.array('d')`."""
return self._weights
@weights.setter
def weights(self, values: Iterable[float]) -> None:
self._weights: list[float] = cast(List[float], array.array("d", values))
@property
def control_points(self) -> Vertices:
""":class:`~ezdxf.lldxf.packedtags.VertexArray` of control points in
:ref:`WCS`.
"""
return self._control_points
@control_points.setter
def control_points(self, points: Iterable[UVec]) -> None:
self._control_points: Vertices = cast(
Vertices, VertexArray(chain.from_iterable(Vec3.generate(points)))
)
# DXF callback attribute Spline.dxf.n_control_points
def control_point_count(self) -> int:
"""Count of control points."""
return len(self.control_points)
@property
def fit_points(self) -> Vertices:
""":class:`~ezdxf.lldxf.packedtags.VertexArray` of fit points in
:ref:`WCS`.
"""
return self._fit_points
@fit_points.setter
def fit_points(self, points: Iterable[UVec]) -> None:
self._fit_points: Vertices = cast(
Vertices,
VertexArray(chain.from_iterable(Vec3.generate(points))),
)
# DXF callback attribute Spline.dxf.n_fit_points
def fit_point_count(self) -> int:
"""Count of fit points."""
return len(self.fit_points)
def construction_tool(self) -> BSpline:
"""Returns the construction tool :class:`ezdxf.math.BSpline`."""
if self.control_point_count():
weights = self.weights if len(self.weights) else None
knots = self.knots if len(self.knots) else None
return BSpline(
control_points=self.control_points,
order=self.dxf.degree + 1,
knots=knots,
weights=weights,
)
elif self.fit_point_count():
tangents = None
if self.dxf.hasattr("start_tangent") and self.dxf.hasattr(
"end_tangent"
):
tangents = [self.dxf.start_tangent, self.dxf.end_tangent]
# SPLINE from fit points has always a degree of 3!
return fit_points_to_cad_cv(
self.fit_points,
tangents=tangents,
)
else:
raise ValueError(
"Construction tool requires control- or fit points."
)
def apply_construction_tool(self, s) -> Spline:
"""Apply SPLINE data from a :class:`~ezdxf.math.BSpline` construction
tool or from a :class:`geomdl.BSpline.Curve` object.
"""
try:
self.control_points = s.control_points
except AttributeError: # maybe a geomdl.BSpline.Curve class
s = BSpline.from_nurbs_python_curve(s)
self.control_points = s.control_points
self.dxf.degree = s.degree
self.fit_points = [] # remove fit points
self.knots = s.knots()
self.weights = s.weights()
self.set_flag_state(Spline.RATIONAL, state=bool(len(self.weights)))
return self # floating interface
def flattening(self, distance: float, segments: int = 4) -> Iterator[Vec3]:
"""Adaptive recursive flattening. The argument `segments` is the
minimum count of approximation segments between two knots, if the
distance from the center of the approximation segment to the curve is
bigger than `distance` the segment will be subdivided.
Args:
distance: maximum distance from the projected curve point onto the
segment chord.
segments: minimum segment count between two knots
"""
return self.construction_tool().flattening(distance, segments)
@classmethod
def from_arc(cls, entity: DXFGraphic) -> Spline:
"""Create a new SPLINE entity from a CIRCLE, ARC or ELLIPSE entity.
The new SPLINE entity has no owner, no handle, is not stored in
the entity database nor assigned to any layout!
"""
dxftype = entity.dxftype()
if dxftype == "ELLIPSE":
ellipse = cast("Ellipse", entity).construction_tool()
elif dxftype == "CIRCLE":
ellipse = ConstructionEllipse.from_arc(
center=entity.dxf.get("center", NULLVEC),
radius=abs(entity.dxf.get("radius", 1.0)),
extrusion=entity.dxf.get("extrusion", Z_AXIS),
)
elif dxftype == "ARC":
ellipse = ConstructionEllipse.from_arc(
center=entity.dxf.get("center", NULLVEC),
radius=abs(entity.dxf.get("radius", 1.0)),
extrusion=entity.dxf.get("extrusion", Z_AXIS),
start_angle=entity.dxf.get("start_angle", 0),
end_angle=entity.dxf.get("end_angle", 360),
)
else:
raise TypeError("CIRCLE, ARC or ELLIPSE entity required.")
spline = Spline.new(
dxfattribs=entity.graphic_properties(), doc=entity.doc
)
s = BSpline.from_ellipse(ellipse)
spline.dxf.degree = s.degree
spline.dxf.flags = Spline.RATIONAL
spline.control_points = s.control_points # type: ignore
spline.knots = s.knots() # type: ignore
spline.weights = s.weights() # type: ignore
return spline
def set_open_uniform(
self, control_points: Sequence[UVec], degree: int = 3
) -> None:
"""Open B-spline with a uniform knot vector, start and end at your first
and last control points.
"""
self.dxf.flags = 0
self.dxf.degree = degree
self.control_points = control_points # type: ignore
self.knots = open_uniform_knot_vector(len(control_points), degree + 1)
def set_uniform(
self, control_points: Sequence[UVec], degree: int = 3
) -> None:
"""B-spline with a uniform knot vector, does NOT start and end at your
first and last control points.
"""
self.dxf.flags = 0
self.dxf.degree = degree
self.control_points = control_points # type: ignore
self.knots = uniform_knot_vector(len(control_points), degree + 1)
def set_closed(self, control_points: Sequence[UVec], degree=3) -> None:
"""Closed B-spline with a uniform knot vector, start and end at your
first control point.
"""
self.dxf.flags = self.PERIODIC | self.CLOSED
self.dxf.degree = degree
self.control_points = control_points # type: ignore
self.control_points.extend(control_points[:degree])
# AutoDesk Developer Docs:
# If the spline is periodic, the length of knot vector will be greater
# than length of the control array by 1, but this does not work with
# BricsCAD.
self.knots = uniform_knot_vector(len(self.control_points), degree + 1)
def set_open_rational(
self,
control_points: Sequence[UVec],
weights: Sequence[float],
degree: int = 3,
) -> None:
"""Open rational B-spline with a uniform knot vector, start and end at
your first and last control points, and has additional control
possibilities by weighting each control point.
"""
self.set_open_uniform(control_points, degree=degree)
self.dxf.flags = self.dxf.flags | self.RATIONAL
if len(weights) != len(self.control_points):
raise DXFValueError(
"Control point count must be equal to weights count."
)
self.weights = weights # type: ignore
def set_uniform_rational(
self,
control_points: Sequence[UVec],
weights: Sequence[float],
degree: int = 3,
) -> None:
"""Rational B-spline with a uniform knot vector, does NOT start and end
at your first and last control points, and has additional control
possibilities by weighting each control point.
"""
self.set_uniform(control_points, degree=degree)
self.dxf.flags = self.dxf.flags | self.RATIONAL
if len(weights) != len(self.control_points):
raise DXFValueError(
"Control point count must be equal to weights count."
)
self.weights = weights # type: ignore
def set_closed_rational(
self,
control_points: Sequence[UVec],
weights: Sequence[float],
degree: int = 3,
) -> None:
"""Closed rational B-spline with a uniform knot vector, start and end at
your first control point, and has additional control possibilities by
weighting each control point.
"""
self.set_closed(control_points, degree=degree)
self.dxf.flags = self.dxf.flags | self.RATIONAL
weights = list(weights)
weights.extend(weights[:degree])
if len(weights) != len(self.control_points):
raise DXFValueError(
"Control point count must be equal to weights count."
)
self.weights = weights
def transform(self, m: Matrix44) -> Spline:
"""Transform the SPLINE entity by transformation matrix `m` inplace."""
self._control_points.transform(m) # type: ignore
self._fit_points.transform(m) # type: ignore
# Transform optional attributes if they exist
dxf = self.dxf
for name in ("start_tangent", "end_tangent", "extrusion"):
if dxf.hasattr(name):
dxf.set(name, m.transform_direction(dxf.get(name)))
self.post_transform(m)
return self
def audit(self, auditor: Auditor) -> None:
"""Audit the SPLINE entity."""
super().audit(auditor)
degree = self.dxf.degree
name = str(self)
if degree < 1:
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_DEFINITION,
message=f"Removed {name} with invalid degree: {degree} < 1.",
)
auditor.trash(self)
return
n_control_points = len(self.control_points)
n_fit_points = len(self.fit_points)
if n_control_points == 0 and n_fit_points == 0:
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_DEFINITION,
message=f"Removed {name} without any points (no geometry).",
)
auditor.trash(self)
return
if n_control_points > 0:
self._audit_control_points(auditor)
# Ignore fit points if defined by control points
elif n_fit_points > 0:
self._audit_fit_points(auditor)
def _audit_control_points(self, auditor: Auditor):
name = str(self)
order = self.dxf.degree + 1
n_control_points = len(self.control_points)
# Splines with to few control points can't be processed:
n_control_points_required = required_control_points(order)
if n_control_points < n_control_points_required:
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_CONTROL_POINT_COUNT,
message=f"Removed {name} with invalid control point count: "
f"{n_control_points} < {n_control_points_required}",
)
auditor.trash(self)
return
n_weights = len(self.weights)
n_knots = len(self.knots)
n_knots_required = required_knot_values(n_control_points, order)
if n_knots < n_knots_required:
# Can not fix entity: because the knot values are basic
# values which define the geometry of SPLINE.
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_KNOT_VALUE_COUNT,
message=f"Removed {name} with invalid knot value count: "
f"{n_knots} < {n_knots_required}",
)
auditor.trash(self)
return
if n_weights and n_weights != n_control_points:
# Can not fix entity: because the weights are basic
# values which define the geometry of SPLINE.
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_WEIGHT_COUNT,
message=f"Removed {name} with invalid weight count: "
f"{n_weights} != {n_control_points}",
)
auditor.trash(self)
return
def _audit_fit_points(self, auditor: Auditor):
name = str(self)
order = self.dxf.degree + 1
# Assuming end tangents will be estimated if not present,
# like by ezdxf:
n_fit_points_required = required_fit_points(order, tangents=True)
# Splines with to few fit points can't be processed:
n_fit_points = len(self.fit_points)
if n_fit_points < n_fit_points_required:
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_FIT_POINT_COUNT,
message=f"Removed {name} with invalid fit point count: "
f"{n_fit_points} < {n_fit_points_required}",
)
auditor.trash(self)
return
# Knot values have no meaning for splines defined by fit points:
if len(self.knots):
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_KNOT_VALUE_COUNT,
message=f"Removed unused knot values for {name} "
f"defined by fit points.",
)
self.knots = []
# Weights have no meaning for splines defined by fit points:
if len(self.weights):
auditor.fixed_error(
code=AuditError.INVALID_SPLINE_WEIGHT_COUNT,
message=f"Removed unused weights for {name} "
f"defined by fit points.",
)
self.weights = []
def ocs(self) -> OCS:
# WCS entity which supports the "extrusion" attribute in a
# different way!
return OCS()
| mit | 07927fe39cc8ace1766e704c167cd076 | 35.319876 | 163 | 0.595212 | 3.787241 | false | false | false | false |
mozman/ezdxf | docs/source/tutorials/src/ucs/polyline3d.py | 1 | 1287 | # Copyright (c) 2020 Manfred Moitzi
# License: MIT License
from pathlib import Path
OUT_DIR = Path('~/Desktop/Outbox').expanduser()
import math
import ezdxf
from ezdxf.math import UCS
doc = ezdxf.new('R2010')
msp = doc.modelspace()
# using an UCS simplifies 3D operations, but UCS definition can happen later
# calculating corner points in local (UCS) coordinates without Vec3 class
angle = math.radians(360 / 5)
corners_ucs = [(math.cos(angle * n), math.sin(angle * n), 0) for n in range(5)]
# let's do some transformations by UCS
transformation_ucs = UCS().rotate_local_z(math.radians(15)) # 1. rotation around z-axis
transformation_ucs.shift((0, .333, .333)) # 2. translation (inplace)
corners_ucs = list(transformation_ucs.points_to_wcs(corners_ucs))
location_ucs = UCS(origin=(0, 2, 2)).rotate_local_x(math.radians(-45))
msp.add_polyline3d(
points=corners_ucs,
close=True,
dxfattribs={
'color': 1,
}
).transform(location_ucs.matrix)
# Add lines from the center of the POLYLINE to the corners
center_ucs = transformation_ucs.to_wcs((0, 0, 0))
for corner in corners_ucs:
msp.add_line(
center_ucs, corner, dxfattribs={'color': 1}
).transform(location_ucs.matrix)
location_ucs.render_axis(msp)
doc.saveas(OUT_DIR / 'ucs_polyline3d.dxf')
| mit | a580eb5a806b930dc45a840b399fc97f | 31.175 | 88 | 0.709402 | 3.071599 | false | false | false | false |
mozman/ezdxf | examples/addons/table_painter_addon.py | 1 | 7367 | # Copyright (c) 2010-2022, Manfred Moitzi
# License: MIT License
from __future__ import annotations
from typing import TYPE_CHECKING
import pathlib
import ezdxf
from ezdxf.enums import TextEntityAlignment, MTextEntityAlignment
from ezdxf.addons import TablePainter
if TYPE_CHECKING:
from ezdxf.document import Drawing
from ezdxf.layouts import BlockLayout
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# This add-ons shows how to draw tables by the TablePainter add-on. It's important to
# understand that this table rendering is build up only by DXF primitives which
# are supported by DXF R12.
#
# Features:
# - Text Cell: multiline text build up by TEXT entities (not MTEXT!)
# - Block Cell: block references with attributes
# - cells can span over multiples columns and/or rows
# - individual borderlines styles with render priority
# - background filling by SOLID entities
#
# Limitations:
# - uses the MText add-on to create multiline text out of TEXT entities
# - no automatically text wrapping at border cells
# - no clipping at cell borders
#
# The creation of ACAD_TABLE entities is not supported by ezdxf and probably
# will never be because of the complexity and a lack of usable documentation !
# ------------------------------------------------------------------------------
def get_mat_symbol(doc: Drawing) -> BlockLayout:
symbol = doc.blocks.new("matsymbol")
p1 = 0.5
p2 = 0.25
points = [
(p1, p2),
(p2, p1),
(-p2, p1),
(-p1, p2),
(-p1, -p2),
(-p2, -p1),
(p2, -p1),
(p1, -p2),
]
# should run with DXF R12, do not use add_lwpolyline()
symbol.add_polyline2d(
points,
close=True,
dxfattribs={
"color": 2,
},
)
symbol.add_attdef(
tag="num",
text="0",
dxfattribs={
"height": 0.7,
"color": 1,
},
).set_align_enum(TextEntityAlignment.MIDDLE)
return symbol
def table_tutorial():
doc = ezdxf.new("R2000") # required for lineweight support
doc.header["$LWDISPLAY"] = 1 # show lineweights
doc.styles.add("HEAD", font="OpenSans-ExtraBold.ttf")
doc.styles.add("CELL", font="OpenSans-Regular.ttf")
table = TablePainter(
insert=(0, 0), nrows=4, ncols=4, cell_width=6.0, cell_height=2.0
)
table.new_cell_style(
"head",
text_style="HEAD",
text_color=ezdxf.colors.BLUE,
char_height=0.7,
bg_color=ezdxf.colors.LIGHT_GRAY,
align=MTextEntityAlignment.MIDDLE_CENTER,
)
# reset default cell style
default_style = table.get_cell_style("default")
default_style.text_style = "CELL"
default_style.char_height = 0.5
default_style.align = MTextEntityAlignment.BOTTOM_LEFT
# set header cells
for col in range(4):
table.text_cell(0, col, f"Head[{col}]", style="head")
# set content cell
for row in range(1, 4):
for col in range(4):
# cell style is "default"
table.text_cell(row, col, f"Cell[{row}, {col}]")
# draw a red frame around the header
red_frame = table.new_cell_style("red-frame")
red_borderline = table.new_border_style(color=ezdxf.colors.RED, lineweight=35)
# set the red borderline style for all cell borders
red_frame.set_border_style(red_borderline)
# create the frame object
table.frame(0, 0, 4, style="red-frame")
# render the table, shifting the left-bottom of the table to the origin:
table.render(doc.modelspace(), insert=(0, table.table_height))
th = table.table_height
tw = table.table_width
doc.set_modelspace_vport(height=th * 1.5, center=(tw/2, th/2))
filepath = CWD / "table_tutorial.dxf"
doc.saveas(filepath)
def main():
doc = ezdxf.new("R12")
msp = doc.modelspace()
table = TablePainter(insert=(0, 0), nrows=20, ncols=10)
# create a new styles
ctext = table.new_cell_style(
name="ctext",
text_color=7,
char_height=0.5,
align=MTextEntityAlignment.MIDDLE_CENTER,
)
# modify border settings
border = table.new_border_style(color=6, linetype="DOT", priority=51)
ctext.set_border_style(border, right=False)
table.new_cell_style(
name="vtext",
text_color=3,
char_height=0.3,
align=MTextEntityAlignment.MIDDLE_CENTER,
rotation=90, # vertical written
bg_color=8,
)
# set column width, first column has index 0
table.set_col_width(1, 7)
# set row height, first row has index 0
table.set_row_height(1, 7)
# create a text cell with the default style
cell1 = table.text_cell(0, 0, "Zeile1\nZeile2", style="ctext")
# cell spans over 2 rows and 2 cols
cell1.span = (2, 2)
table.text_cell(4, 0, "VERTICAL\nTEXT", style="vtext", span=(4, 1))
# create frames
table.frame(0, 0, 10, 2, "framestyle")
# the style can be defined later because it is referenced by the name
x_border = table.new_border_style(color=4)
y_border = table.new_border_style(color=17)
table.new_cell_style(
name="framestyle",
left=x_border,
right=x_border,
top=y_border,
bottom=y_border,
)
mat_symbol = get_mat_symbol(doc)
table.new_cell_style(
name="matsym",
align=MTextEntityAlignment.MIDDLE_CENTER,
scale_x=0.6,
scale_y=0.6,
)
# 1st TablePainter rendering
# Render the table to a layout: the modelspace, a paperspace layout or a
# block definition.
table.render(msp, insert=(40, 20))
# It's not necessary to copy a table for multiple renderings but changes to the
# table do not affect previous renderings.
table.new_cell_style(
name="57deg",
text_color=2,
char_height=0.5,
rotation=57,
align=MTextEntityAlignment.MIDDLE_CENTER,
bg_color=123,
)
table.text_cell(
6, 3, "line one\nline two\nand line three", span=(3, 3), style="57deg"
)
# 2nd TablePainter rendering
# create an anonymous block
block = doc.blocks.new_anonymous_block()
# Render the table into the block layout at insert location (0, 0):
table.render(block, insert=(0, 0))
# add a block reference to the modelspace at location (80, 20)
msp.add_blockref(block.name, insert=(80, 20))
# Stacked text: letters are stacked top-to-bottom, but not rotated
table.new_cell_style(
name="stacked",
text_color=6,
char_height=0.25,
align=MTextEntityAlignment.MIDDLE_CENTER,
stacked=True,
)
table.text_cell(6, 3, "STACKED FIELD", span=(7, 1), style="stacked")
for pos in [3, 4, 5, 6]:
table.block_cell(
pos, 1, mat_symbol, attribs={"num": pos}, style="matsym"
)
# 3rd TablePainter rendering
# Render table to a layout: the modelspace, a paperspace layout or a block
# definition.
table.render(msp, insert=(0, 0))
doc.set_modelspace_vport(height=70, center=(50, 0))
filepath = CWD / "table_example2.dxf"
doc.saveas(filepath)
print(f"drawing '{filepath}' created.")
if __name__ == "__main__":
table_tutorial()
main()
| mit | d4969d23292a2af2ae034f11caeb2b1d | 29.442149 | 85 | 0.61694 | 3.394931 | false | false | false | false |
mozman/ezdxf | tests/test_00_dxf_low_level_structs/test_053_embedded_object_tags.py | 1 | 2167 | # Copyright (c) 2018-2019 Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.lldxf.extendedtags import ExtendedTags
from ezdxf.lldxf.types import is_embedded_object_marker
from ezdxf.entities.mtext import MText
MTEXT = r"""0
MTEXT
5
278
330
1F
100
AcDbEntity
8
0
100
AcDbMText
10
2762.147
20
2327.073
30
0.0
40
2.5
41
18.851
46
0.0
71
1
72
5
1
{\fArial|b0|i0|c162|p34;CHANGE;\P\P\PTEXT}
73
1
44
1.0
101
Embedded Object
70
1
10
1.0
20
0.0
30
0.0
11
2762.147
21
2327.073
31
0.0
40
18.851
41
0.0
42
15.428
43
15.042
71
2
72
1
44
18.851
45
12.5
73
0
74
0
46
0.0
"""
@pytest.fixture
def mtext_tags():
return ExtendedTags.from_text(MTEXT)
def test_parse_embedded_object(mtext_tags):
tags = mtext_tags
assert tags.embedded_objects is not None
assert len(tags.embedded_objects) == 1
def test_embedded_object_structure(mtext_tags):
emb_obj = mtext_tags.embedded_objects[0]
assert is_embedded_object_marker(emb_obj[0])
assert len(emb_obj) == 15
assert emb_obj[-1] == (46, 0.0)
def test_mtext_structure(mtext_tags):
assert len(mtext_tags.subclasses[2]) == 10
mtext = MText.from_text(MTEXT)
assert mtext.dxf.handle == "278"
assert mtext.dxf.line_spacing_factor == 1.0
def test_mtext_set_text():
mtext = MText.from_text(MTEXT)
mtext.text = "Hello?"
assert mtext.text == "Hello?"
assert mtext.dxf.line_spacing_factor == 1.0
@pytest.fixture
def two_embedded_objects():
return ExtendedTags.from_text(
"""0
TEST
5
FFFF
101
Embedded Object
1
Text
101
Embedded Object
2
Text2
"""
)
def test_two_embedded_objects(two_embedded_objects):
tags = two_embedded_objects
assert len(tags.embedded_objects) == 2
emb_obj = tags.embedded_objects[0]
assert is_embedded_object_marker(emb_obj[0])
assert emb_obj[1] == (1, "Text")
emb_obj = tags.embedded_objects[1]
assert is_embedded_object_marker(emb_obj[0])
assert emb_obj[1] == (2, "Text2")
def test_iter_tags(two_embedded_objects):
tags = two_embedded_objects
flat_tags = list(tags)
assert len(flat_tags) == 6
assert flat_tags[0] == (0, "TEST")
assert flat_tags[-1] == (2, "Text2")
| mit | a47c90eb66354b9e76e89dea1039c2df | 13.071429 | 55 | 0.679742 | 2.601441 | false | true | false | false |
mozman/ezdxf | examples/addons/text_string_to_path.py | 1 | 1337 | # Copyright (c) 2021-2022, Manfred Moitzi
# License: MIT License
import pathlib
import ezdxf
from ezdxf import path, zoom
from ezdxf.tools import fonts
from ezdxf.addons import text2path
from ezdxf.enums import TextEntityAlignment
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# This example shows how to convert a text-string to outline paths.
#
# docs: https://ezdxf.mozman.at/docs/addons/text2path.html
# ------------------------------------------------------------------------------
def main():
doc = ezdxf.new()
doc.layers.new("OUTLINE")
doc.layers.new("FILLING")
msp = doc.modelspace()
attr = {"layer": "OUTLINE", "color": 1}
ff = fonts.FontFace(family="Noto Sans SC")
s = "Noto Sans SC 0123456789 %@ 中国文字"
align = TextEntityAlignment.LEFT
path.render_splines_and_polylines(
msp, text2path.make_paths_from_str(s, ff, align=align), dxfattribs=attr
)
attr["layer"] = "FILLING"
attr["color"] = 2
for hatch in text2path.make_hatches_from_str(
s, ff, align=align, dxfattribs=attr
):
msp.add_entity(hatch)
zoom.extents(msp)
doc.saveas(CWD / "text2path.dxf")
if __name__ == "__main__":
main()
| mit | 67d7b1d951d7405a902cf6b166cafe93 | 27.276596 | 80 | 0.58164 | 3.289604 | false | false | false | false |
mozman/ezdxf | src/ezdxf/tools/fonts.py | 1 | 19076 | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
"""
This module manages a backend agnostic font database.
Weight Values: https://developer.mozilla.org/de/docs/Web/CSS/font-weight
Supported by matplotlib, pyqt, SVG
=========== =====
Thin 100
Hairline 100
ExtraLight 200
UltraLight 200
Light 300
Normal 400
Medium 500
DemiBold 600
SemiBold 600
Bold 700
ExtraBold 800
UltraBold 800
Black 900
Heavy 900
ExtraBlack 950
UltraBlack 950
=========== =====
Stretch Values: https://developer.mozilla.org/en-US/docs/Web/CSS/font-stretch
Supported by matplotlib, SVG
=============== ======
ultra-condensed 50%
extra-condensed 62.5%
condensed 75%
semi-condensed 87.5%
normal 100%
semi-expanded 112.5%
expanded 125%
extra-expanded 150%
ultra-expanded 200%
=============== ======
"""
from __future__ import annotations
from typing import Dict, Optional, NamedTuple, TYPE_CHECKING, cast
import abc
import logging
from pathlib import Path
import json
from ezdxf import options
from ezdxf.lldxf import const
if TYPE_CHECKING:
from ezdxf.document import Drawing
from ezdxf.entities import DXFEntity, Textstyle
FONT_FACE_CACHE_FILE = "font_face_cache.json"
FONT_MEASUREMENT_CACHE_FILE = "font_measurement_cache.json"
logger = logging.getLogger("ezdxf")
class FontFace(NamedTuple):
# This would be the matplotlib FontProperties class, if matplotlib would
# be a core dependency!
ttf: str = ""
family: str = "sans-serif"
style: str = "normal"
stretch: str = "normal"
weight: str = "normal"
@property
def is_italic(self) -> bool:
return self.style.find("italic") > -1
@property
def is_oblique(self) -> bool:
return self.style.find("oblique") > -1
@property
def is_bold(self) -> bool:
weight = self.weight
if isinstance(weight, str):
weight = weight_name_to_value(weight) # type: ignore
return weight > 400 # type: ignore
# Key is TTF font file name without path in lowercase like "arial.ttf":
font_face_cache: dict[str, FontFace] = dict()
font_measurement_cache: dict[str, FontMeasurements] = dict()
WEIGHT_TO_VALUE = {
"thin": 100,
"hairline": 100,
"extralight": 200,
"UltraLight": 200,
"light": 300,
"normal": 400,
"medium": 500,
"demibold": 600,
"semibold": 600,
"bold": 700,
"extrabold": 800,
"ultrabold": 800,
"black": 900,
"heavy": 900,
"extrablack": 950,
"ultrablack": 950,
}
SHX_FONTS = {
# See examples in: CADKitSamples/Shapefont.dxf
# Shape file structure is not documented, therefore replace this fonts by
# true type fonts.
# `None` is for: use the default font.
#
# All these replacement TTF fonts have a copyright remark:
# "(c) Copyright 1996 by Autodesk Inc., All rights reserved"
# and therefore can not be included in ezdxf or the associated repository!
# You got them if you install any Autodesk product, like the free available
# DWG/DXF viewer "TrueView" : https://www.autodesk.com/viewers
"AMGDT": "amgdt___.ttf", # Tolerance symbols
"AMGDT.SHX": "amgdt___.ttf",
"COMPLEX": "complex_.ttf",
"COMPLEX.SHX": "complex_.ttf",
"ISOCP": "isocp.ttf",
"ISOCP.SHX": "isocp.ttf",
"ITALIC": "italicc_.ttf",
"ITALIC.SHX": "italicc_.ttf",
"GOTHICG": "gothicg_.ttf",
"GOTHICG.SHX": "gothicg_.ttf",
"GREEKC": "greekc.ttf",
"GREEKC.SHX": "greekc.ttf",
"ROMANS": "romans__.ttf",
"ROMANS.SHX": "romans__.ttf",
"SCRIPTS": "scripts_.ttf",
"SCRIPTS.SHX": "scripts_.ttf",
"SCRIPTC": "scriptc_.ttf",
"SCRIPTC.SHX": "scriptc_.ttf",
"SIMPLEX": "simplex_.ttf",
"SIMPLEX.SHX": "simplex_.ttf",
"SYMATH": "symath__.ttf",
"SYMATH.SHX": "symath__.ttf",
"SYMAP": "symap___.ttf",
"SYMAP.SHX": "symap___.ttf",
"SYMETEO": "symeteo_.ttf",
"SYMETEO.SHX": "symeteo_.ttf",
"TXT": "txt_____.ttf", # Default AutoCAD font
"TXT.SHX": "txt_____.ttf",
}
TTF_TO_SHX = {v: k for k, v in SHX_FONTS.items() if k.endswith("SHX")}
DESCENDER_FACTOR = 0.333 # from TXT SHX font - just guessing
X_HEIGHT_FACTOR = 0.666 # from TXT SHX font - just guessing
def map_shx_to_ttf(font_name: str) -> str:
"""Map SHX font names to TTF file names. e.g. "TXT" -> "txt_____.ttf" """
# Map SHX fonts to True Type Fonts:
font_upper = font_name.upper()
if font_upper in SHX_FONTS:
font_name = SHX_FONTS[font_upper]
return font_name
def map_ttf_to_shx(ttf: str) -> Optional[str]:
"""Map TTF file names to SHX font names. e.g. "txt_____.ttf" -> "TXT" """
return TTF_TO_SHX.get(ttf.lower())
def weight_name_to_value(name: str) -> int:
"""Map weight names to values. e.g. 'normal' -> 400"""
return WEIGHT_TO_VALUE.get(name.lower(), 400)
def cache_key(name: str) -> str:
"""Returns the normalized TTF file name in lower case without preceding
folders. e.g. "C:\\Windows\\Fonts\\Arial.TTF" -> "arial.ttf"
"""
return Path(name).name.lower()
def build_system_font_cache(*, path=None, rebuild=True) -> None:
"""Build system font cache and save it to directory `path` if given.
Set `rebuild` to ``False`` to just add new fonts.
Requires the Matplotlib package!
A rebuild has to be done only after a new ezdxf installation, or new fonts
were added to your system (which you want to use), or an update of ezdxf if
you don't use your own external font cache directory.
See also: :attr:`ezdxf.options.font_cache_directory`
"""
try:
from ._matplotlib_font_support import (
load_system_fonts,
build_font_measurement_cache,
remove_fonts_without_measurement,
reset_font_manager,
)
except ImportError:
logger.debug("This function requires the optional Matplotlib package.")
return
global font_face_cache, font_measurement_cache
if rebuild:
reset_font_manager()
cache = load_system_fonts()
if rebuild:
font_face_cache = cache
else:
font_face_cache.update(cache)
if rebuild:
font_measurement_cache = dict()
# else update existing measurement cache:
font_measurement_cache = build_font_measurement_cache(
font_face_cache, font_measurement_cache
)
# Fonts without a measurement can not be processed and should be replaced
# by a default font:
remove_fonts_without_measurement(font_face_cache, font_measurement_cache)
# save caches on default location defined by option.font_cache_directory:
save(path)
def find_font_face(ttf_path: Optional[str]) -> Optional[FontFace]:
"""Get cached font face definition by TTF file name e.g. "Arial.ttf",
returns ``None`` if not found.
"""
if ttf_path:
return font_face_cache.get(cache_key(ttf_path))
else:
return None
def get_font_face(ttf_path: str, map_shx=True) -> FontFace:
"""Get cached font face definition by TTF file name e.g. "Arial.ttf".
This function translates a DXF font definition by
the raw TTF font file name into a :class:`FontFace` object. Fonts which are
not available on the current system gets a default font face.
Args:
ttf_path: raw font file name as stored in the
:class:`~ezdxf.entities.Textstyle` entity
map_shx: maps SHX font names to TTF replacement fonts,
e.g. "TXT" -> "txt_____.ttf"
"""
if not isinstance(ttf_path, str):
raise TypeError("ttf_path has invalid type")
if map_shx:
ttf_path = map_shx_to_ttf(ttf_path)
font = find_font_face(ttf_path)
if font is None:
# Create a pseudo entry:
name = cache_key(ttf_path)
return FontFace(
name,
Path(ttf_path).stem,
"normal",
"normal",
"normal",
)
else:
return font
def get_font_measurements(ttf_path: str, map_shx=True) -> FontMeasurements:
"""Get cached font measurements by TTF file name e.g. "Arial.ttf".
Args:
ttf_path: raw font file name as stored in the
:class:`~ezdxf.entities.Textstyle` entity
map_shx: maps SHX font names to TTF replacement fonts,
e.g. "TXT" -> "txt_____.ttf"
"""
# TODO: is using freetype-py the better solution?
if map_shx:
ttf_path = map_shx_to_ttf(ttf_path)
m = font_measurement_cache.get(cache_key(ttf_path))
if m is None:
m = FontMeasurements(
baseline=0,
cap_height=1,
x_height=X_HEIGHT_FACTOR,
descender_height=DESCENDER_FACTOR,
)
return m
def find_font_face_by_family(
family: str, italic=False, bold=False
) -> Optional[FontFace]:
# TODO: find best match
# additional attributes "italic" and "bold" are ignored yet
key = family.lower()
for f in font_face_cache.values():
if key == f.family.lower():
return f
return None
def find_ttf_path(font_face: FontFace, default=const.DEFAULT_TTF) -> str:
"""Returns the true type font path."""
if options.use_matplotlib:
from ._matplotlib_font_support import find_filename
path = find_filename(
family=font_face.family,
style=font_face.style,
stretch=font_face.stretch,
weight=font_face.weight,
)
return path.name
else:
font_face = find_font_face_by_family( # type: ignore
font_face.family,
italic=font_face.is_italic,
bold=font_face.is_bold,
)
return default if font_face is None else font_face.ttf
def get_cache_file_path(path, name: str = FONT_FACE_CACHE_FILE) -> Path:
"""Build path to cache files."""
if path is None and options.font_cache_directory:
directory = options.font_cache_directory.strip('"')
path = Path(directory).expanduser()
path.mkdir(exist_ok=True)
path = Path(path) if path else Path(__file__).parent
return path.expanduser() / name
def load(path=None, reload=False):
"""Load all caches from given `path` or from default location, defined by
:attr:`ezdxf.options.font_cache_directory` or the default cache from
the ``ezdxf.tools`` folder.
This function is called automatically at startup if not disabled by
environment variable ``EZDXF_AUTO_LOAD_FONTS``.
"""
global font_face_cache, font_measurement_cache
if len(font_face_cache) and reload is False:
return # skip if called multiple times:
p = get_cache_file_path(path, FONT_FACE_CACHE_FILE)
if p.exists():
font_face_cache = _load_font_faces(p)
p = get_cache_file_path(path, FONT_MEASUREMENT_CACHE_FILE)
if p.exists():
font_measurement_cache = _load_measurement_cache(p)
def _load_font_faces(path) -> Dict:
"""Load font face cache."""
with open(path, "rt") as fp:
data = json.load(fp)
cache = dict()
if data:
for entry in data:
key = entry[0]
cache[key] = FontFace(*entry)
return cache
def _load_measurement_cache(path) -> Dict:
"""Load font measurement cache."""
with open(path, "rt") as fp:
data = json.load(fp)
cache = dict()
if data:
for entry in data:
key = entry[0]
cache[key] = FontMeasurements(*entry[1])
return cache
def save(path=None):
"""Save all caches to given `path` or to default location, defined by
options.font_cache_directory or into the ezdxf.tools folder.
"""
if path:
Path(path).expanduser().mkdir(parents=True, exist_ok=True)
p = get_cache_file_path(path, FONT_FACE_CACHE_FILE)
with open(p, "wt") as fp:
json.dump(list(font_face_cache.values()), fp, indent=2)
p = get_cache_file_path(path, FONT_MEASUREMENT_CACHE_FILE)
with open(p, "wt") as fp:
json.dump(list(font_measurement_cache.items()), fp, indent=2)
# A Visual Guide to the Anatomy of Typography: https://visme.co/blog/type-anatomy/
# Anatomy of a Character: https://www.fonts.com/content/learning/fontology/level-1/type-anatomy/anatomy
class FontMeasurements(NamedTuple):
baseline: float
cap_height: float
x_height: float
descender_height: float
def scale(self, factor: float = 1.0) -> FontMeasurements:
return FontMeasurements(
self.baseline * factor,
self.cap_height * factor,
self.x_height * factor,
self.descender_height * factor,
)
def shift(self, distance: float = 0.0) -> FontMeasurements:
return FontMeasurements(
self.baseline + distance,
self.cap_height,
self.x_height,
self.descender_height,
)
def scale_from_baseline(
self, desired_cap_height: float
) -> FontMeasurements:
factor = desired_cap_height / self.cap_height
return FontMeasurements(
self.baseline,
desired_cap_height,
self.x_height * factor,
self.descender_height * factor,
)
@property
def cap_top(self) -> float:
return self.baseline + self.cap_height
@property
def x_top(self) -> float:
return self.baseline + self.x_height
@property
def bottom(self) -> float:
return self.baseline - self.descender_height
@property
def total_height(self) -> float:
return self.cap_height + self.descender_height
class AbstractFont:
"""The `ezdxf` font abstraction."""
def __init__(self, measurements: FontMeasurements):
self.measurements = measurements
@abc.abstractmethod
def text_width(self, text: str) -> float:
pass
@abc.abstractmethod
def space_width(self) -> float:
pass
class MatplotlibFont(AbstractFont):
"""This class provides proper font measurement support by using the optional
Matplotlib font support.
Use the :func:`make_font` factory function to create a font abstraction.
"""
def __init__(
self, ttf_path: str, cap_height: float = 1.0, width_factor: float = 1.0
):
from . import _matplotlib_font_support
self._support_lib = _matplotlib_font_support
# unscaled font measurement:
font_measurements = get_font_measurements(ttf_path)
super().__init__(font_measurements.scale_from_baseline(cap_height))
font_face = get_font_face(ttf_path)
scale = cap_height / font_measurements.cap_height
self._font_properties = self._support_lib.get_font_properties(font_face)
self._width_factor = width_factor * scale
self._space_width = self.text_width(" X") - self.text_width("X")
def text_width(self, text: str) -> float:
"""Returns the text width in drawing units for the given `text` string.
Text rendering and width calculation is done by the Matplotlib
:class:`TextPath` class.
"""
if not text.strip():
return 0
try:
path = self._support_lib.get_text_path(text, self._font_properties)
return max(path.vertices[:, 0].tolist()) * self._width_factor
except Exception as e:
logger.error(f"Matplotlib RuntimeError: {str(e)}")
return 0
def space_width(self) -> float:
"""Returns the width of a "space" char."""
return self._space_width
class MonospaceFont(AbstractFont):
"""Defines a monospaced font without knowing the real font properties.
Each letter has the same cap- and descender height and the same width.
This font abstraction is used if no Matplotlib font support is available.
Use the :func:`make_font` factory function to create a font abstraction.
"""
def __init__(
self,
cap_height: float,
width_factor: float = 1.0,
baseline: float = 0,
descender_factor: float = DESCENDER_FACTOR,
x_height_factor: float = X_HEIGHT_FACTOR,
):
super().__init__(
FontMeasurements(
baseline=baseline,
cap_height=cap_height,
x_height=cap_height * x_height_factor,
descender_height=cap_height * descender_factor,
)
)
self._width_factor: float = abs(width_factor)
self._space_width = self.measurements.cap_height * self._width_factor
def text_width(self, text: str) -> float:
"""Returns the text width in drawing units for the given `text` based
on a simple monospaced font calculation.
"""
return len(text) * self.measurements.cap_height * self._width_factor
def space_width(self) -> float:
"""Returns the width of a "space" char."""
return self._space_width
def make_font(
ttf_path: str, cap_height: float, width_factor: float = 1.0
) -> AbstractFont:
"""Factory function to create a font abstraction.
Creates a :class:`MatplotlibFont` if the Matplotlib font support is
available and enabled or else a :class:`MonospaceFont`.
Args:
ttf_path: raw font file name as stored in the
:class:`~ezdxf.entities.Textstyle` entity
cap_height: desired cap height in drawing units.
width_factor: horizontal text stretch factor
"""
if options.use_matplotlib:
return MatplotlibFont(ttf_path, cap_height, width_factor)
else:
return MonospaceFont(cap_height, width_factor)
def get_entity_font_face(
entity: DXFEntity, doc: Optional[Drawing] = None
) -> FontFace:
"""Returns the :class:`FontFace` defined by the associated text style.
Returns the default font face if the `entity` does not have or support
the DXF attribute "style". Supports the extended font information stored in
:class:`~ezdxf.entities.Textstyle` table entries.
Pass a DXF document as argument `doc` to resolve text styles for virtual
entities which are not assigned to a DXF document. The argument `doc`
always overrides the DXF document to which the `entity` is assigned to.
"""
if entity.doc and doc is None:
doc = entity.doc
if doc is None:
return FontFace()
style_name = ""
# This works also for entities which do not support "style",
# where style_name = entity.dxf.get("style") would fail.
if entity.dxf.is_supported("style"):
style_name = entity.dxf.style
font_face = FontFace()
if style_name:
style = cast("Textstyle", doc.styles.get(style_name))
family, italic, bold = style.get_extended_font_data()
if family:
text_style = "italic" if italic else "normal"
text_weight = "bold" if bold else "normal"
font_face = FontFace(
family=family, style=text_style, weight=text_weight
)
else:
ttf = style.dxf.font
if ttf:
font_face = get_font_face(ttf)
return font_face
| mit | a1f73cc431611b6b4cab6945e410f071 | 30.220949 | 103 | 0.623768 | 3.561613 | false | false | false | false |
mozman/ezdxf | src/ezdxf/render/point.py | 1 | 2876 | # Copyright (c) 2020-2022, Manfred Moitzi
# License: MIT License
from __future__ import annotations
from typing import cast
import math
from ezdxf.entities import factory, Point, DXFGraphic
from ezdxf.math import Vec3, UCS, NULLVEC
def virtual_entities(
point: Point, pdsize: float = 1, pdmode: int = 0
) -> list[DXFGraphic]:
"""Yields point graphic as DXF primitives LINE and CIRCLE entities.
The dimensionless point is rendered as zero-length line!
Check for this condition::
e.dxftype() == 'LINE' and e.dxf.start.isclose(e.dxf.end)
if the rendering engine can't handle zero-length lines.
Args:
point: DXF POINT entity
pdsize: point size in drawing units
pdmode: point styling mode, see :class:`~ezdxf.entities.Point` class
"""
def add_line_symmetrical(offset: Vec3):
dxfattribs["start"] = ucs.to_wcs(-offset)
dxfattribs["end"] = ucs.to_wcs(offset)
entities.append(cast(DXFGraphic, factory.new("LINE", dxfattribs)))
def add_line(s: Vec3, e: Vec3):
dxfattribs["start"] = ucs.to_wcs(s)
dxfattribs["end"] = ucs.to_wcs(e)
entities.append(cast(DXFGraphic, factory.new("LINE", dxfattribs)))
center = point.dxf.location
# This is not a real OCS! Defines just the point orientation,
# location is in WCS!
ocs = point.ocs()
ucs = UCS(origin=center, ux=ocs.ux, uz=ocs.uz)
# The point angle is clockwise oriented:
ucs = ucs.rotate_local_z(math.radians(-point.dxf.angle))
entities: list[DXFGraphic] = []
gfx = point.graphic_properties()
radius = pdsize * 0.5
has_circle = bool(pdmode & 32)
has_square = bool(pdmode & 64)
style = pdmode & 7
dxfattribs = dict(gfx)
if style == 0: # . dimensionless point as zero-length line
add_line_symmetrical(NULLVEC)
# style == 1: no point symbol
elif style == 2: # + cross
add_line_symmetrical(Vec3(pdsize, 0))
add_line_symmetrical(Vec3(0, pdsize))
elif style == 3: # x cross
add_line_symmetrical(Vec3(pdsize, pdsize))
add_line_symmetrical(Vec3(pdsize, -pdsize))
elif style == 4: # ' tick
add_line(NULLVEC, Vec3(0, radius))
if has_square:
x1 = -radius
x2 = radius
y1 = -radius
y2 = radius
add_line(Vec3(x1, y1), Vec3(x2, y1))
add_line(Vec3(x2, y1), Vec3(x2, y2))
add_line(Vec3(x2, y2), Vec3(x1, y2))
add_line(Vec3(x1, y2), Vec3(x1, y1))
if has_circle:
dxfattribs = dict(gfx)
if point.dxf.hasattr("extrusion"):
dxfattribs["extrusion"] = ocs.uz
dxfattribs["center"] = ocs.from_wcs(center)
else:
dxfattribs["center"] = center
dxfattribs["radius"] = radius
entities.append(cast(DXFGraphic, factory.new("CIRCLE", dxfattribs)))
return entities
| mit | c9cb3d820dbe20bcec4cc296019b988f | 31.681818 | 76 | 0.622392 | 3.177901 | false | false | false | false |
mozman/ezdxf | src/ezdxf/sections/headervars.py | 1 | 58323 | # Copyright (c) 2019-2021, Manfred Moitzi
# License: MIT License
from functools import partial
from ezdxf.lldxf.hdrvars import SingleValue, Point2D, Point3D, HeaderVarDef
from ezdxf.lldxf.const import (
DXF12,
DXF2000,
DXF2004,
DXF2007,
DXF2010,
DXF2013,
DXF2018,
)
CONST_GUID = "{00000000-0000-0000-0000-000000000000}"
HEADER_VAR_MAP = {
"$ACADVER": HeaderVarDef(
name="$ACADVER",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=0,
default="AC1032",
),
"$ACADMAINTVER": HeaderVarDef(
name="$ACADMAINTVER",
# group code changed to 90 in DXF R2018+, this fact is handled in:
# ezdxf.sections.header.HeaderSection.export_dxf()
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=100,
default=4,
),
"$DWGCODEPAGE": HeaderVarDef(
name="$DWGCODEPAGE",
code=3,
factory=partial(SingleValue, code=3),
mindxf=DXF12,
maxdxf=DXF2018,
priority=200,
default="ANSI_1252",
),
"$LASTSAVEDBY": HeaderVarDef(
name="$LASTSAVEDBY",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=300,
default="ezdxf",
),
"$REQUIREDVERSIONS": HeaderVarDef(
name="$REQUIREDVERSIONS",
code=160,
factory=partial(SingleValue, code=160),
mindxf=DXF2013,
maxdxf=DXF2018,
priority=400,
default=0,
),
"$INSBASE": HeaderVarDef(
name="$INSBASE",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=500,
default=(0.0, 0.0, 0.0),
),
"$EXTMIN": HeaderVarDef(
name="$EXTMIN",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=600,
default=(1e20, 1e20, 1e20),
),
"$EXTMAX": HeaderVarDef(
name="$EXTMAX",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=700,
default=(-1e20, -1e20, -1e20),
),
"$LIMMIN": HeaderVarDef(
name="$LIMMIN",
code=10,
factory=Point2D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=800,
default=(0.0, 0.0),
),
"$LIMMAX": HeaderVarDef(
name="$LIMMAX",
code=10,
factory=Point2D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=900,
default=(420.0, 297.0),
),
"$ORTHOMODE": HeaderVarDef(
name="$ORTHOMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1000,
default=0,
),
"$REGENMODE": HeaderVarDef(
name="$REGENMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1100,
default=1,
),
"$FILLMODE": HeaderVarDef(
name="$FILLMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1200,
default=1,
),
"$DRAGMODE": HeaderVarDef(
name="$DRAGMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF12,
priority=1250,
default=2,
),
"$QTEXTMODE": HeaderVarDef(
name="$QTEXTMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1300,
default=0,
),
"$MIRRTEXT": HeaderVarDef(
name="$MIRRTEXT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1400,
default=1,
),
"$OSMODE": HeaderVarDef(
name="$OSMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF12,
priority=1400,
default=20583,
),
"$LTSCALE": HeaderVarDef(
name="$LTSCALE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1500,
default=1.0,
),
"$ATTMODE": HeaderVarDef(
name="$ATTMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1600,
default=1,
),
"$TEXTSIZE": HeaderVarDef(
name="$TEXTSIZE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1700,
default=2.5,
),
"$TRACEWID": HeaderVarDef(
name="$TRACEWID",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1800,
default=1.0,
),
"$TEXTSTYLE": HeaderVarDef(
name="$TEXTSTYLE",
code=7,
factory=partial(SingleValue, code=7),
mindxf=DXF12,
maxdxf=DXF2018,
priority=1900,
default="Standard",
),
"$CLAYER": HeaderVarDef(
name="$CLAYER",
code=8,
factory=partial(SingleValue, code=8),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2000,
default="0",
),
"$CELTYPE": HeaderVarDef(
name="$CELTYPE",
code=6,
factory=partial(SingleValue, code=6),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2100,
default="ByLayer",
),
"$CECOLOR": HeaderVarDef(
name="$CECOLOR",
code=62,
factory=partial(SingleValue, code=62),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2200,
default=256,
),
"$CELTSCALE": HeaderVarDef(
name="$CELTSCALE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=2300,
default=1.0,
),
"$DISPSILH": HeaderVarDef(
name="$DISPSILH",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=2400,
default=0,
),
"$DIMSCALE": HeaderVarDef(
name="$DIMSCALE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2500,
default=1.0,
),
"$DIMASZ": HeaderVarDef(
name="$DIMASZ",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2600,
default=2.5,
),
"$DIMEXO": HeaderVarDef(
name="$DIMEXO",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2700,
default=0.625,
),
"$DIMDLI": HeaderVarDef(
name="$DIMDLI",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2800,
default=3.75,
),
"$DIMRND": HeaderVarDef(
name="$DIMRND",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=2900,
default=0.0,
),
"$DIMDLE": HeaderVarDef(
name="$DIMDLE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3000,
default=0.0,
),
"$DIMEXE": HeaderVarDef(
name="$DIMEXE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3100,
default=1.25,
),
"$DIMTP": HeaderVarDef(
name="$DIMTP",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3200,
default=0.0,
),
"$DIMTM": HeaderVarDef(
name="$DIMTM",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3300,
default=0.0,
),
"$DIMTXT": HeaderVarDef(
name="$DIMTXT",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3400,
default=2.5,
),
"$DIMCEN": HeaderVarDef(
name="$DIMCEN",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3500,
default=2.5,
),
"$DIMTSZ": HeaderVarDef(
name="$DIMTSZ",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3600,
default=0.0,
),
"$DIMTOL": HeaderVarDef(
name="$DIMTOL",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3700,
default=0,
),
"$DIMLIM": HeaderVarDef(
name="$DIMLIM",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3800,
default=0,
),
"$DIMTIH": HeaderVarDef(
name="$DIMTIH",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=3900,
default=0,
),
"$DIMTOH": HeaderVarDef(
name="$DIMTOH",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4000,
default=0,
),
"$DIMSE1": HeaderVarDef(
name="$DIMSE1",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4100,
default=0,
),
"$DIMSE2": HeaderVarDef(
name="$DIMSE2",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4200,
default=0,
),
"$DIMTAD": HeaderVarDef(
name="$DIMTAD",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4300,
default=1,
),
"$DIMZIN": HeaderVarDef(
name="$DIMZIN",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4400,
default=8,
),
"$DIMBLK": HeaderVarDef(
name="$DIMBLK",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4500,
default="",
),
"$DIMASO": HeaderVarDef(
name="$DIMASO",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4600,
default=1,
),
"$DIMSHO": HeaderVarDef(
name="$DIMSHO",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4700,
default=1,
),
"$DIMPOST": HeaderVarDef(
name="$DIMPOST",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4800,
default="",
),
"$DIMAPOST": HeaderVarDef(
name="$DIMAPOST",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=4900,
default="",
),
"$DIMALT": HeaderVarDef(
name="$DIMALT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5000,
default=0,
),
"$DIMALTD": HeaderVarDef(
name="$DIMALTD",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5100,
default=3,
),
"$DIMALTF": HeaderVarDef(
name="$DIMALTF",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5200,
default=0.03937007874,
),
"$DIMLFAC": HeaderVarDef(
name="$DIMLFAC",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5300,
default=1.0,
),
"$DIMTOFL": HeaderVarDef(
name="$DIMTOFL",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5400,
default=1,
),
"$DIMTVP": HeaderVarDef(
name="$DIMTVP",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5500,
default=0.0,
),
"$DIMTIX": HeaderVarDef(
name="$DIMTIX",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5600,
default=0,
),
"$DIMSOXD": HeaderVarDef(
name="$DIMSOXD",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5700,
default=0,
),
"$DIMSAH": HeaderVarDef(
name="$DIMSAH",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5800,
default=0,
),
"$DIMBLK1": HeaderVarDef(
name="$DIMBLK1",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=5900,
default="",
),
"$DIMBLK2": HeaderVarDef(
name="$DIMBLK2",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6000,
default="",
),
"$DIMSTYLE": HeaderVarDef(
name="$DIMSTYLE",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6100,
default="ISO-25",
),
"$DIMCLRD": HeaderVarDef(
name="$DIMCLRD",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6200,
default=0,
),
"$DIMCLRE": HeaderVarDef(
name="$DIMCLRE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6300,
default=0,
),
"$DIMCLRT": HeaderVarDef(
name="$DIMCLRT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6400,
default=0,
),
"$DIMTFAC": HeaderVarDef(
name="$DIMTFAC",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6500,
default=1.0,
),
"$DIMGAP": HeaderVarDef(
name="$DIMGAP",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=6600,
default=0.625,
),
"$DIMJUST": HeaderVarDef(
name="$DIMJUST",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=6700,
default=0,
),
"$DIMSD1": HeaderVarDef(
name="$DIMSD1",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=6800,
default=0,
),
"$DIMSD2": HeaderVarDef(
name="$DIMSD2",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=6900,
default=0,
),
"$DIMTOLJ": HeaderVarDef(
name="$DIMTOLJ",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7000,
default=0,
),
"$DIMTZIN": HeaderVarDef(
name="$DIMTZIN",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7100,
default=8,
),
"$DIMALTZ": HeaderVarDef(
name="$DIMALTZ",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7200,
default=0,
),
"$DIMALTTZ": HeaderVarDef(
name="$DIMALTTZ",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7300,
default=0,
),
"$DIMUPT": HeaderVarDef(
name="$DIMUPT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7400,
default=0,
),
"$DIMDEC": HeaderVarDef(
name="$DIMDEC",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7500,
default=2,
),
"$DIMTDEC": HeaderVarDef(
name="$DIMTDEC",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7600,
default=2,
),
"$DIMALTU": HeaderVarDef(
name="$DIMALTU",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7700,
default=2,
),
"$DIMALTTD": HeaderVarDef(
name="$DIMALTTD",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7800,
default=3,
),
"$DIMTXSTY": HeaderVarDef(
name="$DIMTXSTY",
code=7,
factory=partial(SingleValue, code=7),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=7900,
default="Standard",
),
"$DIMAUNIT": HeaderVarDef(
name="$DIMAUNIT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8000,
default=0,
),
"$DIMADEC": HeaderVarDef(
name="$DIMADEC",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8100,
default=0,
),
"$DIMALTRND": HeaderVarDef(
name="$DIMALTRND",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8200,
default=0.0,
),
"$DIMAZIN": HeaderVarDef(
name="$DIMAZIN",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8300,
default=0,
),
"$DIMDSEP": HeaderVarDef(
name="$DIMDSEP",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8400,
default=44,
),
"$DIMATFIT": HeaderVarDef(
name="$DIMATFIT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8500,
default=3,
),
"$DIMFRAC": HeaderVarDef(
name="$DIMFRAC",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8600,
default=0,
),
"$DIMLDRBLK": HeaderVarDef(
name="$DIMLDRBLK",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8700,
default="",
),
"$DIMLUNIT": HeaderVarDef(
name="$DIMLUNIT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8800,
default=2,
),
"$COORDS": HeaderVarDef(
name="$COORDS",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF12,
priority=8800,
default=1,
),
"$DIMLWD": HeaderVarDef(
name="$DIMLWD",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=8900,
default=-2,
),
"$DIMLWE": HeaderVarDef(
name="$DIMLWE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=9000,
default=-2,
),
"$DIMTMOVE": HeaderVarDef(
name="$DIMTMOVE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=9100,
default=0,
),
"$DIMFXL": HeaderVarDef(
name="$DIMFXL",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9200,
default=1.0,
),
"$ATTDIA": HeaderVarDef(
name="$ATTDIA",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF12,
priority=9200,
default=0,
),
"$DIMFXLON": HeaderVarDef(
name="$DIMFXLON",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9300,
default=0,
),
"$ATTREQ": HeaderVarDef(
name="$ATTREQ",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF12,
priority=9300,
default=1,
),
"$DIMJOGANG": HeaderVarDef(
name="$DIMJOGANG",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9400,
default=0.785398163397,
),
"$DIMTFILL": HeaderVarDef(
name="$DIMTFILL",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9500,
default=0,
),
"$DIMTFILLCLR": HeaderVarDef(
name="$DIMTFILLCLR",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9600,
default=0,
),
"$DIMARCSYM": HeaderVarDef(
name="$DIMARCSYM",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9700,
default=0,
),
"$DIMLTYPE": HeaderVarDef(
name="$DIMLTYPE",
code=6,
factory=partial(SingleValue, code=6),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9800,
default="",
),
"$DIMLTEX1": HeaderVarDef(
name="$DIMLTEX1",
code=6,
factory=partial(SingleValue, code=6),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=9900,
default="",
),
"$DIMLTEX2": HeaderVarDef(
name="$DIMLTEX2",
code=6,
factory=partial(SingleValue, code=6),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=10000,
default="",
),
"$DIMTXTDIRECTION": HeaderVarDef(
name="$DIMTXTDIRECTION",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2010,
maxdxf=DXF2018,
priority=10100,
default=0,
),
"$LUNITS": HeaderVarDef(
name="$LUNITS",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10200,
default=2,
),
"$LUPREC": HeaderVarDef(
name="$LUPREC",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10300,
default=4,
),
"$SKETCHINC": HeaderVarDef(
name="$SKETCHINC",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10400,
default=1.0,
),
"$FILLETRAD": HeaderVarDef(
name="$FILLETRAD",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10500,
default=10.0,
),
"$AUNITS": HeaderVarDef(
name="$AUNITS",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10600,
default=0,
),
"$AUPREC": HeaderVarDef(
name="$AUPREC",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10700,
default=2,
),
"$MENU": HeaderVarDef(
name="$MENU",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10800,
default=".",
),
"$ELEVATION": HeaderVarDef(
name="$ELEVATION",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=10900,
default=0.0,
),
"$PELEVATION": HeaderVarDef(
name="$PELEVATION",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11000,
default=0.0,
),
"$THICKNESS": HeaderVarDef(
name="$THICKNESS",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11100,
default=0.0,
),
"$LIMCHECK": HeaderVarDef(
name="$LIMCHECK",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11200,
default=0,
),
"$CHAMFERA": HeaderVarDef(
name="$CHAMFERA",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11300,
default=0.0,
),
"$CHAMFERB": HeaderVarDef(
name="$CHAMFERB",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11400,
default=0.0,
),
"$CHAMFERC": HeaderVarDef(
name="$CHAMFERC",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=11500,
default=0.0,
),
"$CHAMFERD": HeaderVarDef(
name="$CHAMFERD",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=11600,
default=0.0,
),
"$SKPOLY": HeaderVarDef(
name="$SKPOLY",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11700,
default=0,
),
"$TDCREATE": HeaderVarDef(
name="$TDCREATE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=11800,
default=2458532.195663565,
),
"$TDUCREATE": HeaderVarDef(
name="$TDUCREATE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=11900,
default=2458532.153996898,
),
"$TDUPDATE": HeaderVarDef(
name="$TDUPDATE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12000,
default=2458532.196097766,
),
"$TDUUPDATE": HeaderVarDef(
name="$TDUUPDATE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=12100,
default=2458532.1544311,
),
"$TDINDWG": HeaderVarDef(
name="$TDINDWG",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12200,
default=0.0,
),
"$TDUSRTIMER": HeaderVarDef(
name="$TDUSRTIMER",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12300,
default=0.0,
),
"$USRTIMER": HeaderVarDef(
name="$USRTIMER",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12400,
default=1,
),
"$ANGBASE": HeaderVarDef(
name="$ANGBASE",
code=50,
factory=partial(SingleValue, code=50),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12500,
default=0.0,
),
"$ANGDIR": HeaderVarDef(
name="$ANGDIR",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12600,
default=0,
),
"$PDMODE": HeaderVarDef(
name="$PDMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12700,
default=0,
),
"$PDSIZE": HeaderVarDef(
name="$PDSIZE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12800,
default=0.0,
),
"$PLINEWID": HeaderVarDef(
name="$PLINEWID",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=12900,
default=0.0,
),
"$SPLFRAME": HeaderVarDef(
name="$SPLFRAME",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13000,
default=0,
),
"$SPLINETYPE": HeaderVarDef(
name="$SPLINETYPE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13100,
default=6,
),
"$SPLINESEGS": HeaderVarDef(
name="$SPLINESEGS",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13200,
default=8,
),
"$HANDLING": HeaderVarDef(
name="$HANDLING",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF12,
priority=9400,
default=1,
),
"$HANDSEED": HeaderVarDef(
name="$HANDSEED",
code=5,
factory=partial(SingleValue, code=5),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13300,
default="100",
),
"$SURFTAB1": HeaderVarDef(
name="$SURFTAB1",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13400,
default=6,
),
"$SURFTAB2": HeaderVarDef(
name="$SURFTAB2",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13500,
default=6,
),
"$SURFTYPE": HeaderVarDef(
name="$SURFTYPE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13600,
default=6,
),
"$SURFU": HeaderVarDef(
name="$SURFU",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13700,
default=6,
),
"$SURFV": HeaderVarDef(
name="$SURFV",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=13800,
default=6,
),
"$UCSBASE": HeaderVarDef(
name="$UCSBASE",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=13900,
default="",
),
"$UCSNAME": HeaderVarDef(
name="$UCSNAME",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF12,
maxdxf=DXF2018,
priority=14000,
default="",
),
"$UCSORG": HeaderVarDef(
name="$UCSORG",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=14100,
default=(0.0, 0.0, 0.0),
),
"$UCSXDIR": HeaderVarDef(
name="$UCSXDIR",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=14200,
default=(1.0, 0.0, 0.0),
),
"$UCSYDIR": HeaderVarDef(
name="$UCSYDIR",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=14300,
default=(0.0, 1.0, 0.0),
),
"$UCSORTHOREF": HeaderVarDef(
name="$UCSORTHOREF",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=14400,
default="",
),
"$UCSORTHOVIEW": HeaderVarDef(
name="$UCSORTHOVIEW",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=14500,
default=0,
),
"$UCSORGTOP": HeaderVarDef(
name="$UCSORGTOP",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=14600,
default=(0.0, 0.0, 0.0),
),
"$UCSORGBOTTOM": HeaderVarDef(
name="$UCSORGBOTTOM",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=14700,
default=(0.0, 0.0, 0.0),
),
"$UCSORGLEFT": HeaderVarDef(
name="$UCSORGLEFT",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=14800,
default=(0.0, 0.0, 0.0),
),
"$UCSORGRIGHT": HeaderVarDef(
name="$UCSORGRIGHT",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=14900,
default=(0.0, 0.0, 0.0),
),
"$UCSORGFRONT": HeaderVarDef(
name="$UCSORGFRONT",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=15000,
default=(0.0, 0.0, 0.0),
),
"$UCSORGBACK": HeaderVarDef(
name="$UCSORGBACK",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=15100,
default=(0.0, 0.0, 0.0),
),
"$PUCSBASE": HeaderVarDef(
name="$PUCSBASE",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=15200,
default="",
),
"$PUCSNAME": HeaderVarDef(
name="$PUCSNAME",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF12,
maxdxf=DXF2018,
priority=15300,
default="",
),
"$PUCSORG": HeaderVarDef(
name="$PUCSORG",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=15400,
default=(0.0, 0.0, 0.0),
),
"$PUCSXDIR": HeaderVarDef(
name="$PUCSXDIR",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=15500,
default=(1.0, 0.0, 0.0),
),
"$PUCSYDIR": HeaderVarDef(
name="$PUCSYDIR",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=15600,
default=(0.0, 1.0, 0.0),
),
"$PUCSORTHOREF": HeaderVarDef(
name="$PUCSORTHOREF",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=15700,
default="",
),
"$PUCSORTHOVIEW": HeaderVarDef(
name="$PUCSORTHOVIEW",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=15800,
default=0,
),
"$PUCSORGTOP": HeaderVarDef(
name="$PUCSORGTOP",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=15900,
default=(0.0, 0.0, 0.0),
),
"$PUCSORGBOTTOM": HeaderVarDef(
name="$PUCSORGBOTTOM",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=16000,
default=(0.0, 0.0, 0.0),
),
"$PUCSORGLEFT": HeaderVarDef(
name="$PUCSORGLEFT",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=16100,
default=(0.0, 0.0, 0.0),
),
"$PUCSORGRIGHT": HeaderVarDef(
name="$PUCSORGRIGHT",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=16200,
default=(0.0, 0.0, 0.0),
),
"$PUCSORGFRONT": HeaderVarDef(
name="$PUCSORGFRONT",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=16300,
default=(0.0, 0.0, 0.0),
),
"$PUCSORGBACK": HeaderVarDef(
name="$PUCSORGBACK",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=16400,
default=(0.0, 0.0, 0.0),
),
"$USERI1": HeaderVarDef(
name="$USERI1",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=16500,
default=0,
),
"$USERI2": HeaderVarDef(
name="$USERI2",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=16600,
default=0,
),
"$USERI3": HeaderVarDef(
name="$USERI3",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=16700,
default=0,
),
"$USERI4": HeaderVarDef(
name="$USERI4",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=16800,
default=0,
),
"$USERI5": HeaderVarDef(
name="$USERI5",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=16900,
default=0,
),
"$USERR1": HeaderVarDef(
name="$USERR1",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17000,
default=0.0,
),
"$USERR2": HeaderVarDef(
name="$USERR2",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17100,
default=0.0,
),
"$USERR3": HeaderVarDef(
name="$USERR3",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17200,
default=0.0,
),
"$USERR4": HeaderVarDef(
name="$USERR4",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17300,
default=0.0,
),
"$USERR5": HeaderVarDef(
name="$USERR5",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17400,
default=0.0,
),
"$WORLDVIEW": HeaderVarDef(
name="$WORLDVIEW",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17500,
default=1,
),
"$SHADEDGE": HeaderVarDef(
name="$SHADEDGE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17600,
default=3,
),
"$SHADEDIF": HeaderVarDef(
name="$SHADEDIF",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17700,
default=70,
),
"$TILEMODE": HeaderVarDef(
name="$TILEMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17800,
default=1,
),
"$MAXACTVP": HeaderVarDef(
name="$MAXACTVP",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=17900,
default=64,
),
"$PINSBASE": HeaderVarDef(
name="$PINSBASE",
code=10,
factory=Point3D,
mindxf=DXF2000,
maxdxf=DXF2018,
priority=18000,
default=(0.0, 0.0, 0.0),
),
"$PLIMCHECK": HeaderVarDef(
name="$PLIMCHECK",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=18100,
default=0,
),
"$PEXTMIN": HeaderVarDef(
name="$PEXTMIN",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=18200,
default=(1e20, 1e20, 1e20),
),
"$PEXTMAX": HeaderVarDef(
name="$PEXTMAX",
code=10,
factory=Point3D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=18300,
default=(-1e20, -1e20, -1e20),
),
"$PLIMMIN": HeaderVarDef(
name="$PLIMMIN",
code=10,
factory=Point2D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=18400,
default=(0.0, 0.0),
),
"$PLIMMAX": HeaderVarDef(
name="$PLIMMAX",
code=10,
factory=Point2D,
mindxf=DXF12,
maxdxf=DXF2018,
priority=18500,
default=(420.0, 297.0),
),
"$UNITMODE": HeaderVarDef(
name="$UNITMODE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=18600,
default=0,
),
"$VISRETAIN": HeaderVarDef(
name="$VISRETAIN",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=18700,
default=1,
),
"$PLINEGEN": HeaderVarDef(
name="$PLINEGEN",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=18800,
default=0,
),
"$PSLTSCALE": HeaderVarDef(
name="$PSLTSCALE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF12,
maxdxf=DXF2018,
priority=18900,
default=1,
),
"$TREEDEPTH": HeaderVarDef(
name="$TREEDEPTH",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19000,
default=3020,
),
"$CMLSTYLE": HeaderVarDef(
name="$CMLSTYLE",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19100,
default="Standard",
),
"$CMLJUST": HeaderVarDef(
name="$CMLJUST",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19200,
default=0,
),
"$CMLSCALE": HeaderVarDef(
name="$CMLSCALE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19300,
default=20.0,
),
"$PROXYGRAPHICS": HeaderVarDef(
name="$PROXYGRAPHICS",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19400,
default=1,
),
"$MEASUREMENT": HeaderVarDef(
name="$MEASUREMENT",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19500,
default=1,
),
"$CELWEIGHT": HeaderVarDef(
name="$CELWEIGHT",
code=370,
factory=partial(SingleValue, code=370),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19600,
default=-1,
),
"$ENDCAPS": HeaderVarDef(
name="$ENDCAPS",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19700,
default=0,
),
"$JOINSTYLE": HeaderVarDef(
name="$JOINSTYLE",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19800,
default=0,
),
"$LWDISPLAY": HeaderVarDef(
name="$LWDISPLAY",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=19900,
default=0,
),
"$INSUNITS": HeaderVarDef(
name="$INSUNITS",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20000,
default=6,
),
"$HYPERLINKBASE": HeaderVarDef(
name="$HYPERLINKBASE",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20100,
default="",
),
"$STYLESHEET": HeaderVarDef(
name="$STYLESHEET",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20200,
default="",
),
"$XEDIT": HeaderVarDef(
name="$XEDIT",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20300,
default=1,
),
"$CEPSNTYPE": HeaderVarDef(
name="$CEPSNTYPE",
code=380,
factory=partial(SingleValue, code=380),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20400,
default=0,
),
"$PSTYLEMODE": HeaderVarDef(
name="$PSTYLEMODE",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20500,
default=1,
),
"$FINGERPRINTGUID": HeaderVarDef(
name="$FINGERPRINTGUID",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20600,
default=CONST_GUID,
),
"$VERSIONGUID": HeaderVarDef(
name="$VERSIONGUID",
code=2,
factory=partial(SingleValue, code=2),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20700,
default=CONST_GUID,
),
"$EXTNAMES": HeaderVarDef(
name="$EXTNAMES",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20800,
default=1,
),
"$PSVPSCALE": HeaderVarDef(
name="$PSVPSCALE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=20900,
default=0.0,
),
"$OLESTARTUP": HeaderVarDef(
name="$OLESTARTUP",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2000,
maxdxf=DXF2018,
priority=21000,
default=0,
),
# 0 = Disables SORTENTS
# 1 = Sorts for object selection
# 2 = Sorts for object snap
# 4 = Sorts for redraws; obsolete
# 8 = Sorts for MSLIDE command slide creation; obsolete
# 16 = Sorts for REGEN commands
# 32 = Sorts for plotting
# 64 = Sorts for PostScript output; obsolete
"$SORTENTS": HeaderVarDef(
name="$SORTENTS",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21100,
default=127,
),
"$INDEXCTL": HeaderVarDef(
name="$INDEXCTL",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21200,
default=0,
),
"$HIDETEXT": HeaderVarDef(
name="$HIDETEXT",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21300,
default=1,
),
"$XCLIPFRAME": HeaderVarDef(
name="$XCLIPFRAME",
code=280, # 2004 & 2007 = 290
factory=partial(SingleValue, code=280), # 2004 & 2007 = 290
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21400,
default=0,
),
"$HALOGAP": HeaderVarDef(
name="$HALOGAP",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21500,
default=0,
),
"$OBSCOLOR": HeaderVarDef(
name="$OBSCOLOR",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21600,
default=257,
),
"$OBSLTYPE": HeaderVarDef(
name="$OBSLTYPE",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21700,
default=0,
),
"$INTERSECTIONDISPLAY": HeaderVarDef(
name="$INTERSECTIONDISPLAY",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21800,
default=0,
),
"$INTERSECTIONCOLOR": HeaderVarDef(
name="$INTERSECTIONCOLOR",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=21900,
default=257,
),
"$DIMASSOC": HeaderVarDef(
name="$DIMASSOC",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=22000,
default=2,
),
"$PROJECTNAME": HeaderVarDef(
name="$PROJECTNAME",
code=1,
factory=partial(SingleValue, code=1),
mindxf=DXF2004,
maxdxf=DXF2018,
priority=22100,
default="",
),
"$CAMERADISPLAY": HeaderVarDef(
name="$CAMERADISPLAY",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22200,
default=0,
),
"$LENSLENGTH": HeaderVarDef(
name="$LENSLENGTH",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22300,
default=50.0,
),
"$CAMERAHEIGHT": HeaderVarDef(
name="$CAMERAHEIGHT",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22400,
default=0.0,
),
"$STEPSPERSEC": HeaderVarDef(
name="$STEPSPERSEC",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22500,
default=24.0,
),
"$STEPSIZE": HeaderVarDef(
name="$STEPSIZE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22600,
default=100.0,
),
"$3DDWFPREC": HeaderVarDef(
name="$3DDWFPREC",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22700,
default=2.0,
),
"$PSOLWIDTH": HeaderVarDef(
name="$PSOLWIDTH",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22800,
default=0.005,
),
"$PSOLHEIGHT": HeaderVarDef(
name="$PSOLHEIGHT",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=22900,
default=0.08,
),
"$LOFTANG1": HeaderVarDef(
name="$LOFTANG1",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23000,
default=1.570796326795,
),
"$LOFTANG2": HeaderVarDef(
name="$LOFTANG2",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23100,
default=1.570796326795,
),
"$LOFTMAG1": HeaderVarDef(
name="$LOFTMAG1",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23200,
default=0.0,
),
"$LOFTMAG2": HeaderVarDef(
name="$LOFTMAG2",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23300,
default=0.0,
),
"$LOFTPARAM": HeaderVarDef(
name="$LOFTPARAM",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23400,
default=7,
),
"$LOFTNORMALS": HeaderVarDef(
name="$LOFTNORMALS",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23500,
default=1,
),
"$LATITUDE": HeaderVarDef(
name="$LATITUDE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23600,
default=37.795,
),
"$LONGITUDE": HeaderVarDef(
name="$LONGITUDE",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23700,
default=-122.394,
),
"$NORTHDIRECTION": HeaderVarDef(
name="$NORTHDIRECTION",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23800,
default=0.0,
),
"$TIMEZONE": HeaderVarDef(
name="$TIMEZONE",
code=70,
factory=partial(SingleValue, code=70),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=23900,
default=-8000,
),
"$LIGHTGLYPHDISPLAY": HeaderVarDef(
name="$LIGHTGLYPHDISPLAY",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24000,
default=1,
),
"$TILEMODELIGHTSYNCH": HeaderVarDef(
name="$TILEMODELIGHTSYNCH",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24100,
default=1,
),
"$CMATERIAL": HeaderVarDef(
name="$CMATERIAL",
code=347,
factory=partial(SingleValue, code=347),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24200,
default="45",
), # default value '0' crashes BricsCAD
"$SOLIDHIST": HeaderVarDef(
name="$SOLIDHIST",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24300,
default=0,
),
"$SHOWHIST": HeaderVarDef(
name="$SHOWHIST",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24400,
default=1,
),
"$DWFFRAME": HeaderVarDef(
name="$DWFFRAME",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24500,
default=2,
),
"$DGNFRAME": HeaderVarDef(
name="$DGNFRAME",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24600,
default=2,
),
"$REALWORLDSCALE": HeaderVarDef(
name="$REALWORLDSCALE",
code=290,
factory=partial(SingleValue, code=290),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24700,
default=1,
),
"$INTERFERECOLOR": HeaderVarDef(
name="$INTERFERECOLOR",
code=62,
factory=partial(SingleValue, code=62),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24800,
default=256,
),
"$INTERFEREOBJVS": HeaderVarDef(
# Handle to a VisualStyle, if pointing to an invalid or non-existing VS,
# copy to clipboard in AutoCAD can fail.
name="$INTERFEREOBJVS",
code=345,
factory=partial(SingleValue, code=345),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24810,
default=None,
), # will not be written, if not set
"$INTERFEREVPVS": HeaderVarDef(
# Handle to a VisualStyle, if pointing to an invalid or non-existing VS,
# copy to clipboard in AutoCAD can fail.
name="$INTERFEREVPVS",
code=346,
factory=partial(SingleValue, code=346),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24820,
default=None,
), # will not be written, if not set
"$CSHADOW": HeaderVarDef(
name="$CSHADOW",
code=280,
factory=partial(SingleValue, code=280),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=24900,
default=0,
),
"$SHADOWPLANELOCATION": HeaderVarDef(
name="$SHADOWPLANELOCATION",
code=40,
factory=partial(SingleValue, code=40),
mindxf=DXF2007,
maxdxf=DXF2018,
priority=25000,
default=0.0,
),
}
def version_specific_group_code(name: str, dxfversion: str) -> int:
group_code = HEADER_VAR_MAP[name].code
# The HEADER_VAR_MAP contains the group codes for the latest DXF version.
# This section adjust changed group codes for older DXF versions:
if name == "$ACADMAINTVER":
group_code = 70 if dxfversion < DXF2018 else 90
elif name == "$XCLIPFRAME":
group_code = 290 if dxfversion < DXF2010 else 280
return group_code
| mit | 53d665a3cddcc671aa7b9fef68839285 | 23.57775 | 80 | 0.529379 | 3.295085 | false | false | false | false |
mozman/ezdxf | src/ezdxf/acis/hdr.py | 1 | 3978 | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime
import struct
from . import const
# ACIS versions exported by BricsCAD:
# R2000/AC1015: 400, "ACIS 4.00 NT", text length has no prefix "@"
# R2004/AC1018: 20800 @ "ACIS 208.00 NT", text length has "@" prefix ??? weird
# R2007/AC1021: 700 @ "ACIS 32.0 NT", text length has "@" prefix
# R2010/AC1024: 700 @ "ACIS 32.0 NT", text length has "@" prefix
# A test showed that R2000 files that contains ACIS v700/32.0 or v20800/208.0
# data can be opened by Autodesk TrueView, BricsCAD and Allplan, so exporting
# only v700/32.0 for all DXF versions should be OK!
# test script: exploration/acis/transplant_acis_data.py
def encode_str(s: str) -> bytes:
b = s.encode("utf8", errors="ignore")
return struct.pack("<BB", const.Tags.STR, len(b)) + b
def encode_double(value: float) -> bytes:
return struct.pack("<Bd", const.Tags.DOUBLE, value)
@dataclass
class AcisHeader:
"""Represents an ACIS file header."""
version: int = const.MIN_EXPORT_VERSION
n_records: int = 0 # can be 0
n_entities: int = 0
flags: int = 0
product_id: str = const.EZDXF_BUILDER_ID
acis_version: str = const.ACIS_VERSION[const.MIN_EXPORT_VERSION]
creation_date: datetime = field(default_factory=datetime.now)
units_in_mm: float = 1.0
asm_version: str = ""
asm_end_marker: bool = False # depends on DXF version: R2013, RT2018
@property
def has_asm_header(self) -> bool:
return self.asm_version != ""
def dumps(self) -> list[str]:
"""Returns the SAT file header as list of strings."""
return [
f"{self.version} {self.n_records} {self.n_entities} {self.flags} ",
self._header_str(),
f"{self.units_in_mm:g} 9.9999999999999995e-007 1e-010 ",
]
def dumpb(self) -> bytes:
"""Returns the SAB file header as bytes."""
buffer: list[bytes] = []
if self.version > 21800:
buffer.append(const.ASM_SIGNATURE)
else:
buffer.append(const.ACIS_SIGNATURE)
data = struct.pack(
"<iiii", self.version, self.n_records, self.n_entities, self.flags
)
buffer.append(data)
buffer.append(encode_str(self.product_id))
buffer.append(encode_str(self.acis_version))
buffer.append(encode_str(self.creation_date.ctime()))
buffer.append(encode_double(self.units_in_mm))
buffer.append(encode_double(const.RES_TOL))
buffer.append(encode_double(const.NOR_TOL))
return b"".join(buffer)
def _header_str(self) -> str:
p_len = len(self.product_id)
a_len = len(self.acis_version)
date = self.creation_date.ctime()
if self.version > 400:
return f"@{p_len} {self.product_id} @{a_len} {self.acis_version} @{len(date)} {date} "
else:
return f"{p_len} {self.product_id} {a_len} {self.acis_version} {len(date)} {date} "
def set_version(self, version: int) -> None:
"""Sets the ACIS version as an integer value and updates the version
string accordingly.
"""
try:
self.acis_version = const.ACIS_VERSION[version]
self.version = version
except KeyError:
raise ValueError(f"invalid ACIS version number {version}")
self.asm_version = const.ASM_VERSION.get(version, "")
def asm_header(self):
from .entities import AsmHeader
return AsmHeader(self.asm_version)
def sat_end_marker(self) -> str:
if self.asm_end_marker:
return const.END_OF_ASM_DATA_SAT + " "
else:
return const.END_OF_ACIS_DATA_SAT + " "
def sab_end_marker(self) -> bytes:
if self.asm_end_marker:
return const.END_OF_ASM_DATA_SAB
else:
return const.END_OF_ACIS_DATA_SAB
| mit | b405c8b9bff627adb2f7c0c9d21b662f | 35.163636 | 98 | 0.620664 | 3.306733 | false | false | false | false |
mozman/ezdxf | tests/test_05_tools/test_526_explode.py | 1 | 1623 | # Copyright (c) 2021-2022, Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf import explode
from ezdxf.layouts import Modelspace
@pytest.fixture(scope="module")
def msp() -> Modelspace:
doc = ezdxf.new()
block = doc.blocks.new("BLK1")
block.add_line((0, 0), (1, 0))
return doc.modelspace()
def test_virtual_entities_from_insert(msp: Modelspace):
insert = msp.add_blockref("BLK1", (0, 0))
result = list(explode.virtual_block_reference_entities(insert))
assert len(result) == 1
def test_transparency_of_virtual_entities_from_insert(msp: Modelspace):
insert = msp.add_blockref("BLK1", (0, 0))
line = list(explode.virtual_block_reference_entities(insert))[0]
assert line.transparency == 0.0
assert line.dxf.hasattr("transparency") is False
def test_complex_target_coordinate_system(msp: Modelspace):
doc = msp.doc
block2 = doc.blocks.new("BLK2")
insert0 = block2.add_blockref("BLK1", (0, 0))
insert0.dxf.rotation = 30
insert0.dxf.xscale = 7
insert0.dxf.yscale = 3
insert0.dxf.zscale = 1
# this raises an internal InsertTransformationError!
insert0.dxf.extrusion = (1, 1, 1)
insert = msp.add_blockref("BLK2", (0, 0))
insert.dxf.rotation = 30
insert.dxf.xscale = 2
insert.dxf.yscale = -5
insert.dxf.zscale = 1
line = list(explode.virtual_block_reference_entities(insert))[0]
assert line.dxf.start.isclose((0, 0, 0))
assert line.dxf.end.isclose(
(-2.755149853494064, -18.089844737258662, 2.857738033247041)
)
if __name__ == "__main__":
pytest.main([__file__])
| mit | 1299288feeea88bf8b6e794215db0171 | 27.473684 | 71 | 0.669131 | 2.8879 | false | true | false | false |
mozman/ezdxf | examples/redraw_order.py | 1 | 3698 | # Copyright (c) 2018-2022 Manfred Moitzi
# License: MIT License
import random
import pathlib
import ezdxf
from ezdxf.math import Vec3
from ezdxf.enums import SortEntities
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# ------------------------------------------------------------------------------
# This example shows how to change the redraw-order of DXF entities.
#
# docs:
# Baselayout: https://ezdxf.mozman.at/docs/layouts/layouts.html#baselayout
# reorder module: https://ezdxf.mozman.at/docs/reorder.html
# ------------------------------------------------------------------------------
def random_in_range(a, b):
return random.random() * float(b - a) + a
def random_pos(lower_left=(0, 0), upper_right=(100, 100)):
x0, y0 = lower_left
x1, y1 = upper_right
x = random_in_range(x0, x1)
y = random_in_range(y0, y1)
return Vec3(x, y)
def add_solids(
msp, count=20, min_size=1, max_size=5, color=None, layer="SOLIDS"
):
def add_solid(pos, size, dxfattribs):
points = [
pos,
pos + (size, 0),
pos + (0, size),
pos + (size, size),
]
msp.add_solid(points, dxfattribs=dxfattribs)
dxfattribs = {
"color": color,
"layer": layer,
}
for _ in range(count):
pos = random_pos((0, 0), (100, 100))
size = random_in_range(min_size, max_size)
if color is None:
dxfattribs["color"] = random.randint(1, 7)
dxfattribs["layer"] = "color_" + str(dxfattribs["color"])
add_solid(pos, size, dxfattribs)
def order_solids_by_color(msp):
# AutoCAD regenerates entities in ascending handle order.
# Change redraw order for DXF entities by assigning a sort-handle to an
# objects handle.
# The sort-handle can be any handle you want, even '0', but this sort-handle
# will be drawn as latest (on top of all other entities) and not as first as
# expected.
#
# use the ACI color as sort-handle
# '%X': uppercase hex-value without 0x prefix, like 'FF'
msp.set_redraw_order(
(solid.dxf.handle, "%X" % solid.dxf.color)
for solid in msp.query("SOLID")
)
def reverse_order_solids_by_color(msp):
msp.set_redraw_order(
(solid.dxf.handle, "%X" % (10 - solid.dxf.color))
for solid in msp.query("SOLID")
)
def move_solids_on_top(msp, color, sort_handle="FFFF"):
# This also works if a redraw-order is already set
# returns a list of [(object_handle, sort_handle), ...] -> dict
order = dict(msp.get_redraw_order())
for solid in msp.query(f"SOLID[color=={color}]"):
order[solid.dxf.handle] = sort_handle
msp.set_redraw_order(order) # accepts also a dict
def remove_solids(msp, color=6):
for solid in msp.query(f"SOLID[color=={color}]"):
msp.delete_entity(solid)
def main():
doc = ezdxf.new("R2004") # does not work with AC1015/R2000, but it should
doc.header["$SORTENTS"] = SortEntities.REGEN
msp = doc.modelspace()
add_solids(msp, count=1000, min_size=3, max_size=7)
doc.saveas(CWD / "sort_solids_unordered.dxf")
order_solids_by_color(msp) # 1 -> 7
doc.saveas(CWD / "sort_solids_ordered.dxf")
reverse_order_solids_by_color(msp) # 7 -> 1
doc.saveas(CWD / "sort_solids_reversed_ordered.dxf")
move_solids_on_top(msp, 6) # 7, 5, 4, 3, 2, 1, 6
doc.saveas(CWD / "sort_solids_6_on_top.dxf") # 6 is magenta
# AutoCAD ignores removed entities in the redraw-order-table (SORTENTSTABLE)
remove_solids(msp, 6)
doc.saveas(CWD / "sort_solids_removed_color_6.dxf")
if __name__ == "__main__":
main()
| mit | 2b142b8c8433c0eae6013644e5a754e3 | 30.87931 | 80 | 0.598161 | 3.046129 | false | false | false | false |
mozman/ezdxf | src/ezdxf/tools/__init__.py | 1 | 3431 | # Copyright (c) 2015-2022, Manfred Moitzi
# License: MIT License
from __future__ import annotations
from typing import Any, Iterable
from uuid import uuid4
import functools
import html
from .juliandate import juliandate, calendardate
from .binarydata import hex_strings_to_bytes, bytes_to_hexstr
escape = functools.partial(html.escape, quote=True)
def float2transparency(value: float) -> int:
"""
Returns DXF transparency value as integer in the range from ``0`` to ``255``, where ``0`` is 100% transparent
and ``255`` is opaque.
Args:
value: transparency value as float in the range from ``0`` to ``1``, where ``0`` is opaque
and ``1`` is 100% transparency.
"""
return int((1.0 - float(value)) * 255) | 0x02000000
def transparency2float(value: int) -> float:
"""
Returns transparency value as float from ``0`` to ``1``, ``0`` for no transparency (opaque) and ``1``
for 100% transparency.
Args:
value: DXF integer transparency value, ``0`` for 100% transparency and ``255`` for opaque
"""
# 255 -> 0.
# 0 -> 1.
return 1.0 - float(int(value) & 0xFF) / 255.0
def set_flag_state(flags: int, flag: int, state: bool = True) -> int:
"""Set/clear binary `flag` in data `flags`.
Args:
flags: data value
flag: flag to set/clear
state: ``True`` for setting, ``False`` for clearing
"""
if state:
flags = flags | flag
else:
flags = flags & ~flag
return flags
def guid() -> str:
"""Returns a general unique ID, based on :func:`uuid.uuid4`.
This function creates a GUID for the header variables $VERSIONGUID and
$FINGERPRINTGUID, which matches the AutoCAD pattern
``{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}``.
"""
return "{" + str(uuid4()).upper() + "}"
def take2(iterable: Iterable) -> Iterable[tuple[Any, Any]]:
"""Iterate `iterable` as 2-tuples.
:code:`[1, 2, 3, 4, ...] -> (1, 2), (3, 4), ...`
"""
store = None
for item in iterable:
if store is None:
store = item
else:
yield store, item
store = None
def suppress_zeros(s: str, leading: bool = False, trailing: bool = True):
"""Suppress trailing and/or leading ``0`` of string `s`.
Args:
s: data string
leading: suppress leading ``0``
trailing: suppress trailing ``0``
"""
# is anything to do?
if (not leading) and (not trailing):
return s
# if `s` represents zero
if float(s) == 0.0:
return "0"
# preserve sign
if s[0] in "-+":
sign = s[0]
s = s[1:]
else:
sign = ""
# strip zeros
if leading:
s = s.lstrip("0")
if trailing and "." in s:
s = s.rstrip("0")
# remove comma if no decimals follow
if s[-1] in ".,":
s = s[:-1]
return sign + s
def normalize_text_angle(angle: float, fix_upside_down=True) -> float:
"""
Normalizes text `angle` to the range from 0 to 360 degrees and fixes upside down text angles.
Args:
angle: text angle in degrees
fix_upside_down: rotate upside down text angle about 180 degree
"""
angle = angle % 360.0 # normalize angle (0 .. 360)
if fix_upside_down and (90 < angle <= 270): # flip text orientation
angle -= 180
angle = angle % 360.0 # normalize again
return angle
| mit | 7c890055716d2af9962d35b7dbe54f10 | 24.796992 | 113 | 0.585544 | 3.62685 | false | false | false | false |
mozman/ezdxf | tests/test_06_math/conftest.py | 1 | 3865 | # Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import pytest
class PolyData:
def __init__(self, name, vertices):
self.name = name
self.vertices = vertices
POLYGON_DATA0 = [
PolyData(
name="Star",
vertices=[
(350, 75),
(379, 161),
(469, 161),
(397, 215),
(423, 301),
(350, 250),
(277, 301),
(303, 215),
(231, 161),
(321, 161),
],
),
PolyData(
name="Simple Diamond",
vertices=[
(0, 1),
(-1, 0),
(0, -1),
(1, 0),
],
),
PolyData(
name="No Concave Vertex",
vertices=[
(-2.0, -17.0),
(-2.0, -8.0),
(-8.0, -2.0),
(-17.0, -2.0),
(-20.0, -8.0),
(-18.0, -17.0),
(-12.0, -24.0),
(-7.0, -22.0),
],
),
PolyData(
name="Slanted Side",
vertices=[
(-10.0, -20.0),
(-10.0, -30.0),
(0.0, -20.0),
(0.0, -10.0),
(-20.0, -10.0),
(-20.0, -20.0),
],
),
PolyData(
name="New Thing",
vertices=[
(-20.0, -20.0),
(-10.0, -20.0),
(-10.0, -30.0),
(0.0, -20.0),
(10.0, -20.0),
(0.0, -10.0),
(10.0, 0.0),
(0.0, 0.0),
(-10.0, -10.0),
(-10.0, 0.0),
(-20.0, -10.0),
(-30.0, -10.0),
],
),
PolyData(
name="Edge Case 1",
vertices=[
(40.04332790675601, -30.70794551983977),
(54.13, -30.70794551983977),
(54.13, -28.03),
(69.13, -28.03),
(69.11, -52.53),
(40.04332790675601, -52.53),
],
),
PolyData( # 6
name="Edge Case 2",
vertices=[
(229.28340553, 78.91250014),
(258.42948809, 17.98278109),
(132.01956999, -22.96900817),
(107.97774096, 23.39276058),
(65.85573925, 28.63846858),
(41.66373597, -92.78859248),
(-5.59948763, -54.18987786),
(-44.61508682, -69.7461117),
(-28.41208894, -106.93810071),
(-71.11899145, -125.56044277),
(-100.84787818, -88.51853387),
(-211.53564549, -160.76853269),
(-244.22754588, -147.51172179),
(-226.83717643, -42.0984372),
(-230.65279618, -10.5455196),
(-240.50239817, 70.87826746),
(-12.48219264, 137.70176109),
(4.65848369, 204.21077075),
(176.5243417, 193.73497584),
(171.13537712, 87.27009315),
(229.28340553, 78.91250014),
],
),
PolyData( # 7
name="Edge Case 3-A",
vertices=[
(229, 78),
(66, 28.7),
(-244.2, -147.5),
(-226, -42),
(229, 78),
],
),
PolyData( # 8
name="Edge Case 3-B",
vertices=[
(229000, 78000),
(66000, 28700),
(-244200, -147500),
(-226000, -42000),
(229000, 78000),
],
),
PolyData( # 9
name="Edge Case 4",
vertices=[
(-1179, -842),
(-489, -1049),
(101, -1226),
(520, -558),
(779, -175),
(856, 257),
(544, 806),
(-72, 713),
(-1004, 945),
(-988, 62),
(-1179, -842),
],
),
]
@pytest.fixture(
scope="module",
params=POLYGON_DATA0,
ids=[d.name for d in POLYGON_DATA0],
)
def polygon_data0(request):
return request.param
| mit | 91c3bea8be19410f47109dcfefb89a41 | 23.006211 | 52 | 0.371281 | 3.089528 | false | false | false | false |
mozman/ezdxf | examples/render/dimension_radius.py | 1 | 12009 | # Copyright (c) 2019-2022, Manfred Moitzi
# License: MIT License
import pathlib
import math
import ezdxf
from ezdxf.math import Vec3, UCS
import logging
# ------------------------------------------------------------------------------
# This example shows how to use radius dimension.
#
# tutorial: https://ezdxf.mozman.at/docs/tutorials/radius_dimension.html
# ------------------------------------------------------------------------------
logging.basicConfig(level="WARNING")
DXFVERSION = "R2013"
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
# Default text attributes:
TEXT_ATTRIBS = {
"height": 0.25,
"style": ezdxf.options.default_dimension_text_style,
}
DIM_TEXT_STYLE = ezdxf.options.default_dimension_text_style
# Discarding the dimension rendering is possible for BricsCAD,
# but it is incompatible to AutoCAD -> error
BRICSCAD = False
def multiple_locations(delta=10, center=(0, 0)):
cx, cy = center
return [
(cx + delta, cy),
(cx + delta, cy + delta),
(cx, cy + delta),
(cx - delta, cy + delta),
(cx - delta, cy),
(cx - delta, cy - delta),
(cx, cy - delta),
(cx + delta, cy - delta),
]
def radius_default_outside(dxfversion=DXFVERSION, delta=10):
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
msp.add_circle((x, y), radius=3)
# Default DimStyle EZ_RADIUS:
# - 1 drawing unit == 1m
# - scale 1: 100; length_factor=100 -> measurement in cm
# - closed filled arrow, size = 0.25
#
# DIMSTYLE settings:
# dimtmove = 1:
# use leader, is the best setting for text outside to preserve
# appearance of DIMENSION entity, if editing afterwards in
# BricsCAD (AutoCAD)
#
# center:
# specifies the center of the circle
# radius:
# specifies the radius of the circle
# angle:
# specifies the orientation (angle) of the dimension line
dim = msp.add_radius_dim(
center=(x, y), radius=3, angle=angle, dimstyle="EZ_RADIUS"
)
# Necessary second step, to create the BLOCK entity with the DIMENSION
# geometry. Ezdxf supports DXF R2000 attributes for DXF R12 rendering,
# but they have to be applied by the DIMSTYLE override feature, this
# additional attributes are not stored in the XDATA section of the
# DIMENSION entity, they are just used to render the DIMENSION entity.
# The return value `dim` is not a DIMENSION entity, instead a
# DimStyleOverride object is returned, the DIMENSION entity is stored
# as dim.dimension, see also ezdxf.override.DimStyleOverride class.
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=3 * delta)
doc.saveas(CWD / f"dim_radius_{dxfversion}_default_outside.dxf")
def radius_default_inside(dxfversion=DXFVERSION, delta=10, dimtmove=0):
def add_dim(x, y, dimtad):
msp.add_circle((x, y), radius=3)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
angle=angle,
dimstyle="EZ_RADIUS_INSIDE",
override={
"dimtad": dimtad,
},
)
dim.render(discard=BRICSCAD)
doc = ezdxf.new(dxfversion, setup=True)
style = doc.dimstyles.get("EZ_RADIUS_INSIDE")
style.dxf.dimtmove = dimtmove
# Default DimStyle EZ_RADIUS_INSIDE:
# - 1 drawing unit == 1m
# - scale 1:100
# - length_factor=100 -> measurement in cm
# - closed filled arrow, size = 0.25
# DIMSTYLE settings:
# dimtmove = 0:
# keep dim line with text, is the best setting for text inside
# to preserve appearance of DIMENSION entity, if editing afterwards in
# BricsCAD (AutoCAD)
# dimtix = 1:
# force text inside
# dimatfit = 0:
# force text inside, required by BricsCAD (AutoCAD)
# dimtad = 0:
# center text vertical, BricsCAD (AutoCAD) always creates vertical
# centered text, ezdxf let you choose the vertical placement (above,
# below, center), but editing the DIMENSION in BricsCAD will reset text
# to center placement.
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
add_dim(x, y, dimtad=1) # above
add_dim(x + 3 * delta, y, dimtad=0) # center
add_dim(x + 6 * delta, y, dimtad=4) # below
doc.set_modelspace_vport(height=3 * delta)
doc.saveas(
CWD / f"dim_radius_{dxfversion}_default_inside_dimtmove_{dimtmove}.dxf"
)
def radius_default_outside_horizontal(dxfversion=DXFVERSION, delta=10):
def add_dim(x, y, dimtad):
msp.add_circle((x, y), radius=3)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
angle=angle,
dimstyle="EZ_RADIUS",
override={
"dimtoh": 1, # force text outside horizontal
"dimtad": dimtad,
},
)
dim.render(discard=BRICSCAD)
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
add_dim(x, y, dimtad=1) # above
add_dim(x + 3 * delta, y, dimtad=0) # center
add_dim(x + 6 * delta, y, dimtad=4) # below
doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0))
doc.saveas(CWD / f"dim_radius_{dxfversion}_default_outside_horizontal.dxf")
def radius_default_inside_horizontal(dxfversion=DXFVERSION, delta=10, dimtmove=0):
doc = ezdxf.new(dxfversion, setup=True)
style = doc.dimstyles.get("EZ_RADIUS_INSIDE")
style.dxf.dimtmove = dimtmove
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
msp.add_circle((x, y), radius=3)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
angle=angle,
dimstyle="EZ_RADIUS_INSIDE",
override={
"dimtih": 1, # force text inside horizontal
},
)
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=3 * delta)
doc.saveas(
CWD
/ f"dim_radius_{dxfversion}_default_inside_horizontal_dimtmove_{dimtmove}.dxf"
)
def radius_user_defined_outside(dxfversion=DXFVERSION, delta=15):
def add_dim(x, y, radius, dimtad):
center = Vec3(x, y)
msp.add_circle((x, y), radius=3)
dim_location = center + Vec3.from_deg_angle(angle, radius)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
location=dim_location,
dimstyle="EZ_RADIUS",
override={
"dimtad": dimtad,
},
)
dim.render(discard=BRICSCAD)
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
add_dim(x, y, 5, dimtad=1) # above
add_dim(x + 3 * delta, y, 5, dimtad=0) # center
add_dim(x + 6 * delta, y, 5, dimtad=4) # below
doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0))
doc.saveas(CWD / f"dim_radius_{dxfversion}_user_defined_outside.dxf")
def radius_user_defined_outside_horizontal(dxfversion=DXFVERSION, delta=15):
def add_dim(x, y, radius, dimtad):
center = Vec3(x, y)
msp.add_circle((x, y), radius=3)
dim_location = center + Vec3.from_deg_angle(angle, radius)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
location=dim_location,
dimstyle="EZ_RADIUS",
override={
"dimtad": dimtad,
"dimtoh": 1, # force text outside horizontal
},
)
dim.render(discard=BRICSCAD)
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
add_dim(x, y, 5, dimtad=1) # above
add_dim(x + 3 * delta, y, 5, dimtad=0) # center
add_dim(x + 6 * delta, y, 5, dimtad=4) # below
doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0))
doc.saveas(
CWD / f"dim_radius_{dxfversion}_user_defined_outside_horizontal.dxf"
)
def radius_user_defined_inside(dxfversion=DXFVERSION, delta=10, dimtmove=0):
def add_dim(x, y, radius, dimtad):
center = Vec3(x, y)
msp.add_circle((x, y), radius=3)
dim_location = center + Vec3.from_deg_angle(angle, radius)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
location=dim_location,
dimstyle="EZ_RADIUS",
override={
"dimtad": dimtad,
},
)
dim.render(discard=BRICSCAD)
doc = ezdxf.new(dxfversion, setup=True)
style = doc.dimstyles.get("EZ_RADIUS")
style.dxf.dimtmove = dimtmove
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
add_dim(x, y, 1, dimtad=1) # above
add_dim(x + 3 * delta, y, 1, dimtad=0) # center
add_dim(x + 6 * delta, y, 1, dimtad=4) # below
doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0))
doc.saveas(
CWD
/ f"dim_radius_{dxfversion}_user_defined_inside_dimtmove_{dimtmove}.dxf"
)
def radius_user_defined_inside_horizontal(dxfversion=DXFVERSION, delta=10):
def add_dim(x, y, radius, dimtad):
center = Vec3(x, y)
msp.add_circle((x, y), radius=3)
dim_location = center + Vec3.from_deg_angle(angle, radius)
dim = msp.add_radius_dim(
center=(x, y),
radius=3,
location=dim_location,
dimstyle="EZ_RADIUS",
override={
"dimtad": dimtad,
"dimtih": 1, # force text inside horizontal
},
)
dim.render(discard=BRICSCAD)
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
angle = Vec3(x, y).angle_deg
add_dim(x, y, 1, dimtad=1) # above
add_dim(x + 3 * delta, y, 1, dimtad=0) # center
add_dim(x + 6 * delta, y, 1, dimtad=4) # below
doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0))
doc.saveas(
CWD / f"dim_radius_{dxfversion}_user_defined_inside_horizontal.dxf"
)
def radius_3d(dxfversion=DXFVERSION, delta=10):
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
for x, y in multiple_locations(delta=delta):
ucs = UCS(origin=(x, y, 0)).rotate_local_x(math.radians(45))
angle = Vec3(x, y).angle_deg
msp.add_circle((0, 0), radius=3).transform(ucs.matrix)
dim = msp.add_radius_dim(
center=(0, 0), radius=3, angle=angle, dimstyle="EZ_RADIUS"
)
dim.render(discard=BRICSCAD, ucs=ucs)
doc.set_modelspace_vport(height=3 * delta)
doc.saveas(CWD / f"dim_radius_{dxfversion}_3d.dxf")
if __name__ == "__main__":
radius_default_outside()
radius_default_inside(dimtmove=0) # dimline from center
radius_default_inside(dimtmove=1) # dimline from text
radius_default_outside_horizontal()
radius_default_inside_horizontal(dimtmove=0) # dimline from center
radius_default_inside_horizontal(dimtmove=1) # dimline from text
radius_user_defined_outside()
radius_user_defined_outside_horizontal()
radius_user_defined_inside(dimtmove=0) # dimline from text, also for 1
radius_user_defined_inside(dimtmove=2) # dimline from center
radius_user_defined_inside_horizontal()
radius_3d()
| mit | cf992c69d924aad3529e873a3a671bdc | 33.508621 | 86 | 0.58656 | 3.192185 | false | false | false | false |
mozman/ezdxf | src/ezdxf/addons/drawing/qt_text_renderer.py | 1 | 4977 | # Copyright (c) 2020-2022, Matthew Broadway
# License: MIT License
from __future__ import annotations
from typing import Optional, Union
from collections import defaultdict
from functools import lru_cache
from .text_renderer import TextRenderer
from ezdxf.addons.xqt import QtCore as qc, QtGui as qg
from ezdxf.math import Matrix44
from ezdxf.tools.fonts import FontMeasurements, FontFace, weight_name_to_value
import ezdxf.path
class QtTextRenderer(TextRenderer[qg.QFont]):
def __init__(self, font=qg.QFont(), use_cache: bool = True):
self._default_font = font
self._use_cache = use_cache
# Each font has its own text path cache
# key is QFont.key()
self._text_path_cache: dict[
str, dict[str, qg.QPainterPath]
] = defaultdict(dict)
# Each font has its own font measurements cache
# key is QFont.key()
self._font_measurement_cache: dict[str, FontMeasurements] = {}
@property
def default_font(self) -> qg.QFont:
return self._default_font
def clear_cache(self):
self._text_path_cache.clear()
def get_scale(self, desired_cap_height: float, font: qg.QFont) -> float:
measurements = self.get_font_measurements(font)
return desired_cap_height / measurements.cap_height
def get_font_properties(self, font: Optional[FontFace]) -> qg.QFont:
if font is None:
return self.default_font
font_properties = _get_font(font)
if font_properties is None:
return self.default_font
return font_properties
def get_font_measurements(self, font: qg.QFont) -> FontMeasurements:
# None is the default font.
key = font.key() if font is not None else None
measurements = self._font_measurement_cache.get(key)
if measurements is None:
upper_x = self.get_text_rect("X", font)
lower_x = self.get_text_rect("x", font)
lower_p = self.get_text_rect("p", font)
baseline = lower_x.bottom()
measurements = FontMeasurements(
baseline=baseline,
cap_height=baseline - upper_x.top(),
x_height=baseline - lower_x.top(),
descender_height=lower_p.bottom() - baseline,
)
self._font_measurement_cache[key] = measurements
return measurements
def get_text_path(self, text: str, font: qg.QFont) -> qg.QPainterPath:
# None is the default font
key = font.key() if font is not None else None
cache = self._text_path_cache[key] # defaultdict(dict)
path = cache.get(text, None)
if path is None:
if font is None:
font = self._default_font
path = qg.QPainterPath()
path.addText(0, 0, font, text)
if self._use_cache:
cache[text] = path
return path
def get_text_rect(self, text: str, font: qg.QFont) -> qc.QRectF:
# no point caching the bounding rect calculation, it is very cheap
return self.get_text_path(text, font).boundingRect()
def get_text_line_width(
self, text: str, cap_height: float, font: Optional[FontFace] = None
) -> float:
qfont = self.get_font_properties(font)
scale = self.get_scale(cap_height, qfont)
return self.get_text_rect(text, qfont).right() * scale
def get_ezdxf_path(self, text: str, font: qg.QFont) -> ezdxf.path.Path:
try:
text_path = self.get_text_path(text, font)
except (RuntimeError, ValueError):
return ezdxf.path.Path()
return ezdxf.path.multi_path_from_qpainter_path(text_path).transform(
Matrix44.scale(1, -1, 0)
)
@lru_cache(maxsize=256) # fonts.Font is a named tuple
def _get_font(font: FontFace) -> Optional[qg.QFont]:
qfont = None
if font:
family = font.family
italic = "italic" in font.style.lower()
weight = _map_weight(font.weight)
qfont = qg.QFont(family, weight=weight, italic=italic)
# INFO: setting the stretch value makes results worse!
# qfont.setStretch(_map_stretch(font.stretch))
return qfont
# https://doc.qt.io/qt-5/qfont.html#Weight-enum
# QFont::Thin 0 0
# QFont::ExtraLight 12 12
# QFont::Light 25 25
# QFont::Normal 50 50
# QFont::Medium 57 57
# QFont::DemiBold 63 63
# QFont::Bold 75 75
# QFont::ExtraBold 81 81
# QFont::Black 87 87
def _map_weight(weight: Union[str, int]) -> int:
if isinstance(weight, str):
weight = weight_name_to_value(weight)
value = int((weight / 10) + 10) # normal: 400 -> 50
return min(max(0, value), 99)
# https://doc.qt.io/qt-5/qfont.html#Stretch-enum
StretchMapping = {
"ultracondensed": 50,
"extracondensed": 62,
"condensed": 75,
"semicondensed": 87,
"unstretched": 100,
"semiexpanded": 112,
"expanded": 125,
"extraexpanded": 150,
"ultraexpanded": 200,
}
| mit | b42de24b84b9f5da4816d551ce8482ce | 34.049296 | 78 | 0.620454 | 3.420619 | false | false | false | false |
mozman/ezdxf | profiling/read_big_R12_files.py | 1 | 2436 | # Copyright (c) 2019 Manfred Moitzi
# License: MIT License
import time
import ezdxf
CWD = ezdxf.options.test_files_path / "CADKitSamples"
_3D_MODEL = CWD / "fanuc-430-arm.dxf"
_2D_PLAN = CWD / "AEC Plan Elev Sample.dxf"
def load_3D_model():
import ezdxf
ezdxf.readfile(filename=_3D_MODEL)
def iter_3D_model():
import ezdxf
doc = ezdxf.readfile(filename=_3D_MODEL)
msp = doc.modelspace()
count = 0
for e in msp:
e.dxftype()
count += 1
print(f"Iterated {count} entities in modelspace (fanuc-430-arm.dxf).")
del doc
def single_pass_iter_3D_model():
from ezdxf.addons.iterdxf import single_pass_modelspace
count = 0
for e in single_pass_modelspace(open(_3D_MODEL, "rb")):
e.dxftype()
count += 1
print(f"Iterated {count} entities in modelspace (fanuc-430-arm.dxf).")
def from_disk_iter_3D_model():
from ezdxf.addons.iterdxf import opendxf
count = 0
doc = opendxf(_3D_MODEL)
for e in doc.modelspace():
e.dxftype()
count += 1
doc.close()
print(f"Iterated {count} entities in modelspace (fanuc-430-arm.dxf).")
def load_2D_plan():
import ezdxf
ezdxf.readfile(_2D_PLAN)
def iter_2D_plan():
import ezdxf
doc = ezdxf.readfile(_2D_PLAN)
msp = doc.modelspace()
count = 0
for e in msp:
e.dxftype()
count += 1
print(
f"Iterated {count} entities in modelspace (AEC Plan Elev Sample.dxf)."
)
del doc
def print_result(time, text):
print(f"Operation: {text} takes {time:.2f} s\n")
def run(func):
start = time.perf_counter()
func()
end = time.perf_counter()
return end - start
if __name__ == "__main__":
print_result(
run(load_3D_model), 'ezdxf.readfile() - load "faunc-430-arm.dxf"'
)
print_result(
run(iter_3D_model), 'ezdxf.readfile() - iteration "faunc-430-arm.dxf"'
)
print_result(
run(single_pass_iter_3D_model),
'iterdxf.single_pass_modelspace() - single pass iteration from disk "faunc-430-arm.dxf"',
)
print_result(
run(from_disk_iter_3D_model),
'iterdxf.opendxf() - seekable file iteration from disk "faunc-430-arm.dxf"',
)
print_result(
run(load_2D_plan), 'ezdxf.readfile() - load "AEC Plan Elev Sample.dxf"'
)
print_result(
run(iter_2D_plan), 'ezdxf.readfile() - iter "AEC Plan Elev Sample.dxf"'
)
| mit | 51108cdfedbae7107cece0a0dd82e8ba | 22.423077 | 97 | 0.612069 | 2.862515 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/packages/project_package.py | 1 | 2595 | from __future__ import annotations
from typing import TYPE_CHECKING
from typing import Any
from poetry.core.constraints.version import parse_constraint
from poetry.core.version.markers import parse_marker
if TYPE_CHECKING:
from poetry.core.packages.dependency import Dependency
from poetry.core.constraints.version import Version
from poetry.core.packages.package import Package
from poetry.core.packages.utils.utils import create_nested_marker
class ProjectPackage(Package):
def __init__(
self,
name: str,
version: str | Version,
pretty_version: str | None = None,
) -> None:
super().__init__(name, version, pretty_version)
self.build_config: dict[str, Any] = {}
self.packages: list[dict[str, Any]] = []
self.include: list[dict[str, Any]] = []
self.exclude: list[dict[str, Any]] = []
self.custom_urls: dict[str, str] = {}
if self._python_versions == "*":
self._python_constraint = parse_constraint("~2.7 || >=3.4")
@property
def build_script(self) -> str | None:
return self.build_config.get("script")
def is_root(self) -> bool:
return True
def to_dependency(self) -> Dependency:
dependency = super().to_dependency()
dependency.is_root = True
return dependency
@property
def python_versions(self) -> str:
return self._python_versions
@python_versions.setter
def python_versions(self, value: str) -> None:
self._python_versions = value
if value == "*":
value = "~2.7 || >=3.4"
self._python_constraint = parse_constraint(value)
self._python_marker = parse_marker(
create_nested_marker("python_version", self._python_constraint)
)
@property
def version(self) -> Version:
# override version to make it settable
return super().version
@version.setter
def version(self, value: str | Version) -> None:
self._set_version(value)
@property
def urls(self) -> dict[str, str]:
urls = super().urls
urls.update(self.custom_urls)
return urls
def __hash__(self) -> int:
# The parent Package class's __hash__ incorporates the version because
# a Package's version is immutable. But a ProjectPackage's version is
# mutable. So call Package's parent hash function.
return super(Package, self).__hash__()
def build_should_generate_setup(self) -> bool:
return self.build_config.get("generate-setup-file", True)
| mit | ae894385ae03184a1f77fdf93a3a8b20 | 27.833333 | 78 | 0.623892 | 3.96789 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/_vendor/attr/setters.py | 4 | 1400 | # SPDX-License-Identifier: MIT
"""
Commonly used hooks for on_setattr.
"""
from . import _config
from .exceptions import FrozenAttributeError
def pipe(*setters):
"""
Run all *setters* and return the return value of the last one.
.. versionadded:: 20.1.0
"""
def wrapped_pipe(instance, attrib, new_value):
rv = new_value
for setter in setters:
rv = setter(instance, attrib, rv)
return rv
return wrapped_pipe
def frozen(_, __, ___):
"""
Prevent an attribute to be modified.
.. versionadded:: 20.1.0
"""
raise FrozenAttributeError()
def validate(instance, attrib, new_value):
"""
Run *attrib*'s validator on *new_value* if it has one.
.. versionadded:: 20.1.0
"""
if _config._run_validators is False:
return new_value
v = attrib.validator
if not v:
return new_value
v(instance, attrib, new_value)
return new_value
def convert(instance, attrib, new_value):
"""
Run *attrib*'s converter -- if it has one -- on *new_value* and return the
result.
.. versionadded:: 20.1.0
"""
c = attrib.converter
if c:
return c(new_value)
return new_value
# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
# autodata stopped working, so the docstring is inlined in the API docs.
NO_OP = object()
| mit | d01ea889c9856ba1410a8d861621342b | 18.178082 | 79 | 0.618571 | 3.743316 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/_vendor/attr/converters.py | 2 | 3610 | # SPDX-License-Identifier: MIT
"""
Commonly useful converters.
"""
import typing
from ._compat import _AnnotationExtractor
from ._make import NOTHING, Factory, pipe
__all__ = [
"default_if_none",
"optional",
"pipe",
"to_bool",
]
def optional(converter):
"""
A converter that allows an attribute to be optional. An optional attribute
is one which can be set to ``None``.
Type annotations will be inferred from the wrapped converter's, if it
has any.
:param callable converter: the converter that is used for non-``None``
values.
.. versionadded:: 17.1.0
"""
def optional_converter(val):
if val is None:
return None
return converter(val)
xtr = _AnnotationExtractor(converter)
t = xtr.get_first_param_type()
if t:
optional_converter.__annotations__["val"] = typing.Optional[t]
rt = xtr.get_return_type()
if rt:
optional_converter.__annotations__["return"] = typing.Optional[rt]
return optional_converter
def default_if_none(default=NOTHING, factory=None):
"""
A converter that allows to replace ``None`` values by *default* or the
result of *factory*.
:param default: Value to be used if ``None`` is passed. Passing an instance
of `attrs.Factory` is supported, however the ``takes_self`` option
is *not*.
:param callable factory: A callable that takes no parameters whose result
is used if ``None`` is passed.
:raises TypeError: If **neither** *default* or *factory* is passed.
:raises TypeError: If **both** *default* and *factory* are passed.
:raises ValueError: If an instance of `attrs.Factory` is passed with
``takes_self=True``.
.. versionadded:: 18.2.0
"""
if default is NOTHING and factory is None:
raise TypeError("Must pass either `default` or `factory`.")
if default is not NOTHING and factory is not None:
raise TypeError(
"Must pass either `default` or `factory` but not both."
)
if factory is not None:
default = Factory(factory)
if isinstance(default, Factory):
if default.takes_self:
raise ValueError(
"`takes_self` is not supported by default_if_none."
)
def default_if_none_converter(val):
if val is not None:
return val
return default.factory()
else:
def default_if_none_converter(val):
if val is not None:
return val
return default
return default_if_none_converter
def to_bool(val):
"""
Convert "boolean" strings (e.g., from env. vars.) to real booleans.
Values mapping to :code:`True`:
- :code:`True`
- :code:`"true"` / :code:`"t"`
- :code:`"yes"` / :code:`"y"`
- :code:`"on"`
- :code:`"1"`
- :code:`1`
Values mapping to :code:`False`:
- :code:`False`
- :code:`"false"` / :code:`"f"`
- :code:`"no"` / :code:`"n"`
- :code:`"off"`
- :code:`"0"`
- :code:`0`
:raises ValueError: for any other value.
.. versionadded:: 21.3.0
"""
if isinstance(val, str):
val = val.lower()
truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
falsy = {False, "false", "f", "no", "n", "off", "0", 0}
try:
if val in truthy:
return True
if val in falsy:
return False
except TypeError:
# Raised when "val" is not hashable (e.g., lists)
pass
raise ValueError("Cannot convert value to bool: {}".format(val))
| mit | 9bd576df0006ceae0f919c1a284a96cd | 24.069444 | 79 | 0.581163 | 3.733195 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/_vendor/lark/tree_matcher.py | 2 | 6004 | """Tree matcher based on Lark grammar"""
import re
from collections import defaultdict
from . import Tree, Token
from .common import ParserConf
from .parsers import earley
from .grammar import Rule, Terminal, NonTerminal
def is_discarded_terminal(t):
return t.is_term and t.filter_out
class _MakeTreeMatch:
def __init__(self, name, expansion):
self.name = name
self.expansion = expansion
def __call__(self, args):
t = Tree(self.name, args)
t.meta.match_tree = True
t.meta.orig_expansion = self.expansion
return t
def _best_from_group(seq, group_key, cmp_key):
d = {}
for item in seq:
key = group_key(item)
if key in d:
v1 = cmp_key(item)
v2 = cmp_key(d[key])
if v2 > v1:
d[key] = item
else:
d[key] = item
return list(d.values())
def _best_rules_from_group(rules):
rules = _best_from_group(rules, lambda r: r, lambda r: -len(r.expansion))
rules.sort(key=lambda r: len(r.expansion))
return rules
def _match(term, token):
if isinstance(token, Tree):
name, _args = parse_rulename(term.name)
return token.data == name
elif isinstance(token, Token):
return term == Terminal(token.type)
assert False, (term, token)
def make_recons_rule(origin, expansion, old_expansion):
return Rule(origin, expansion, alias=_MakeTreeMatch(origin.name, old_expansion))
def make_recons_rule_to_term(origin, term):
return make_recons_rule(origin, [Terminal(term.name)], [term])
def parse_rulename(s):
"Parse rule names that may contain a template syntax (like rule{a, b, ...})"
name, args_str = re.match(r'(\w+)(?:{(.+)})?', s).groups()
args = args_str and [a.strip() for a in args_str.split(',')]
return name, args
class ChildrenLexer:
def __init__(self, children):
self.children = children
def lex(self, parser_state):
return self.children
class TreeMatcher:
"""Match the elements of a tree node, based on an ontology
provided by a Lark grammar.
Supports templates and inlined rules (`rule{a, b,..}` and `_rule`)
Initiialize with an instance of Lark.
"""
def __init__(self, parser):
# XXX TODO calling compile twice returns different results!
assert not parser.options.maybe_placeholders
# XXX TODO: we just ignore the potential existence of a postlexer
self.tokens, rules, _extra = parser.grammar.compile(parser.options.start, set())
self.rules_for_root = defaultdict(list)
self.rules = list(self._build_recons_rules(rules))
self.rules.reverse()
# Choose the best rule from each group of {rule => [rule.alias]}, since we only really need one derivation.
self.rules = _best_rules_from_group(self.rules)
self.parser = parser
self._parser_cache = {}
def _build_recons_rules(self, rules):
"Convert tree-parsing/construction rules to tree-matching rules"
expand1s = {r.origin for r in rules if r.options.expand1}
aliases = defaultdict(list)
for r in rules:
if r.alias:
aliases[r.origin].append(r.alias)
rule_names = {r.origin for r in rules}
nonterminals = {sym for sym in rule_names
if sym.name.startswith('_') or sym in expand1s or sym in aliases}
seen = set()
for r in rules:
recons_exp = [sym if sym in nonterminals else Terminal(sym.name)
for sym in r.expansion if not is_discarded_terminal(sym)]
# Skip self-recursive constructs
if recons_exp == [r.origin] and r.alias is None:
continue
sym = NonTerminal(r.alias) if r.alias else r.origin
rule = make_recons_rule(sym, recons_exp, r.expansion)
if sym in expand1s and len(recons_exp) != 1:
self.rules_for_root[sym.name].append(rule)
if sym.name not in seen:
yield make_recons_rule_to_term(sym, sym)
seen.add(sym.name)
else:
if sym.name.startswith('_') or sym in expand1s:
yield rule
else:
self.rules_for_root[sym.name].append(rule)
for origin, rule_aliases in aliases.items():
for alias in rule_aliases:
yield make_recons_rule_to_term(origin, NonTerminal(alias))
yield make_recons_rule_to_term(origin, origin)
def match_tree(self, tree, rulename):
"""Match the elements of `tree` to the symbols of rule `rulename`.
Parameters:
tree (Tree): the tree node to match
rulename (str): The expected full rule name (including template args)
Returns:
Tree: an unreduced tree that matches `rulename`
Raises:
UnexpectedToken: If no match was found.
Note:
It's the callers' responsibility match the tree recursively.
"""
if rulename:
# validate
name, _args = parse_rulename(rulename)
assert tree.data == name
else:
rulename = tree.data
# TODO: ambiguity?
try:
parser = self._parser_cache[rulename]
except KeyError:
rules = self.rules + _best_rules_from_group(self.rules_for_root[rulename])
# TODO pass callbacks through dict, instead of alias?
callbacks = {rule: rule.alias for rule in rules}
conf = ParserConf(rules, callbacks, [rulename])
parser = earley.Parser(self.parser.lexer_conf, conf, _match, resolve_ambiguity=True)
self._parser_cache[rulename] = parser
# find a full derivation
unreduced_tree = parser.parse(ChildrenLexer(tree.children), rulename)
assert unreduced_tree.data == rulename
return unreduced_tree
| mit | 02f943d1234a6092dc12971735a546dd | 31.27957 | 115 | 0.598101 | 3.886084 | false | false | false | false |
python-poetry/poetry-core | tests/spdx/test_helpers.py | 1 | 1413 | from __future__ import annotations
import pytest
from poetry.core.spdx.helpers import license_by_id
def test_license_by_id() -> None:
license = license_by_id("MIT")
assert license.id == "MIT"
assert license.name == "MIT License"
assert license.is_osi_approved
assert not license.is_deprecated
license = license_by_id("LGPL-3.0-or-later")
assert license.id == "LGPL-3.0-or-later"
assert license.name == "GNU Lesser General Public License v3.0 or later"
assert license.is_osi_approved
assert not license.is_deprecated
def test_license_by_id_is_case_insensitive() -> None:
license = license_by_id("mit")
assert license.id == "MIT"
license = license_by_id("miT")
assert license.id == "MIT"
def test_license_by_id_with_full_name() -> None:
license = license_by_id("GNU Lesser General Public License v3.0 or later")
assert license.id == "LGPL-3.0-or-later"
assert license.name == "GNU Lesser General Public License v3.0 or later"
assert license.is_osi_approved
assert not license.is_deprecated
def test_license_by_id_invalid() -> None:
with pytest.raises(ValueError):
license_by_id("")
def test_license_by_id_custom() -> None:
license = license_by_id("Custom")
assert license.id == "Custom"
assert license.name == "Custom"
assert not license.is_osi_approved
assert not license.is_deprecated
| mit | 1c23937360afdca64a162f2265ac646f | 25.166667 | 78 | 0.677282 | 3.372315 | false | true | false | false |
python-poetry/poetry-core | tests/masonry/builders/test_complete.py | 1 | 19528 | from __future__ import annotations
import ast
import os
import platform
import re
import shutil
import sys
import tarfile
import tempfile
import zipfile
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Iterator
import pytest
from poetry.core import __version__
from poetry.core.factory import Factory
from poetry.core.masonry.builder import Builder
if TYPE_CHECKING:
from pytest_mock import MockerFixture
fixtures_dir = Path(__file__).parent / "fixtures"
@pytest.fixture(autouse=True)
def setup() -> Iterator[None]:
clear_samples_dist()
yield
clear_samples_dist()
def clear_samples_dist() -> None:
for dist in fixtures_dir.glob("**/dist"):
if dist.is_dir():
shutil.rmtree(str(dist))
@pytest.mark.skipif(
sys.platform == "win32"
and sys.version_info <= (3, 6)
or platform.python_implementation().lower() == "pypy",
reason="Disable test on Windows for Python <=3.6 and for PyPy",
)
def test_wheel_c_extension() -> None:
module_path = fixtures_dir / "extended"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = fixtures_dir / "extended" / "dist" / "extended-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "extended-0.1/build.py" in tar.getnames()
assert "extended-0.1/extended/extended.c" in tar.getnames()
whl = list((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl"))[0]
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
has_compiled_extension = False
for name in zip.namelist():
if name.startswith("extended/extended") and name.endswith((".so", ".pyd")):
has_compiled_extension = True
assert has_compiled_extension
try:
wheel_data = zip.read("extended-0.1.dist-info/WHEEL").decode()
assert (
re.match(
f"""(?m)^\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: false
Tag: cp[23]_?\\d+-cp[23]_?\\d+m?u?-.+
$""",
wheel_data,
)
is not None
)
records = zip.read("extended-0.1.dist-info/RECORD").decode()
assert re.search(r"\s+extended/extended.*\.(so|pyd)", records) is not None
finally:
zip.close()
@pytest.mark.skipif(
sys.platform == "win32"
and sys.version_info <= (3, 6)
or platform.python_implementation().lower() == "pypy",
reason="Disable test on Windows for Python <=3.6 and for PyPy",
)
def test_wheel_c_extension_with_no_setup() -> None:
module_path = fixtures_dir / "extended_with_no_setup"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = fixtures_dir / "extended_with_no_setup" / "dist" / "extended-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "extended-0.1/build.py" in tar.getnames()
assert "extended-0.1/extended/extended.c" in tar.getnames()
whl = list((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl"))[0]
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
has_compiled_extension = False
for name in zip.namelist():
if name.startswith("extended/extended") and name.endswith((".so", ".pyd")):
has_compiled_extension = True
assert has_compiled_extension
try:
wheel_data = zip.read("extended-0.1.dist-info/WHEEL").decode()
assert (
re.match(
f"""(?m)^\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: false
Tag: cp[23]_?\\d+-cp[23]_?\\d+m?u?-.+
$""",
wheel_data,
)
is not None
)
records = zip.read("extended-0.1.dist-info/RECORD").decode()
assert re.search(r"\s+extended/extended.*\.(so|pyd)", records) is not None
finally:
zip.close()
@pytest.mark.skipif(
sys.platform == "win32"
and sys.version_info <= (3, 6)
or platform.python_implementation().lower() == "pypy",
reason="Disable test on Windows for Python <=3.6 and for PyPy",
)
def test_wheel_c_extension_src_layout() -> None:
module_path = fixtures_dir / "src_extended"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = fixtures_dir / "src_extended" / "dist" / "extended-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "extended-0.1/build.py" in tar.getnames()
assert "extended-0.1/src/extended/extended.c" in tar.getnames()
whl = list((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl"))[0]
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
has_compiled_extension = False
for name in zip.namelist():
if name.startswith("extended/extended") and name.endswith((".so", ".pyd")):
has_compiled_extension = True
assert has_compiled_extension
try:
wheel_data = zip.read("extended-0.1.dist-info/WHEEL").decode()
assert (
re.match(
f"""(?m)^\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: false
Tag: cp[23]_?\\d+-cp[23]_?\\d+m?u?-.+
$""",
wheel_data,
)
is not None
)
records = zip.read("extended-0.1.dist-info/RECORD").decode()
assert re.search(r"\s+extended/extended.*\.(so|pyd)", records) is not None
finally:
zip.close()
def test_complete() -> None:
module_path = fixtures_dir / "complete"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl"
assert whl.exists()
if sys.platform != "win32":
assert (os.stat(str(whl)).st_mode & 0o777) == 0o644
zip = zipfile.ZipFile(str(whl))
try:
assert "my_package/sub_pgk1/extra_file.xml" not in zip.namelist()
assert "my-package-1.2.3.data/scripts/script.sh" in zip.namelist()
assert (
"Hello World"
in zip.read("my-package-1.2.3.data/scripts/script.sh").decode()
)
entry_points = zip.read("my_package-1.2.3.dist-info/entry_points.txt")
assert (
entry_points.decode()
== """\
[console_scripts]
extra-script=my_package.extra:main[time]
my-2nd-script=my_package:main2
my-script=my_package:main
"""
)
wheel_data = zip.read("my_package-1.2.3.dist-info/WHEEL").decode()
assert (
wheel_data
== f"""\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: true
Tag: py3-none-any
"""
)
wheel_data = zip.read("my_package-1.2.3.dist-info/METADATA").decode()
assert (
wheel_data
== """\
Metadata-Version: 2.1
Name: my-package
Version: 1.2.3
Summary: Some description.
Home-page: https://python-poetry.org/
License: MIT
Keywords: packaging,dependency,poetry
Author: Sébastien Eustace
Author-email: sebastien@eustace.io
Maintainer: People Everywhere
Maintainer-email: people@everywhere.com
Requires-Python: >=3.6,<4.0
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Provides-Extra: time
Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0)
Requires-Dist: cleo (>=0.6,<0.7)
Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time")
Project-URL: Documentation, https://python-poetry.org/docs
Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues
Project-URL: Repository, https://github.com/python-poetry/poetry
Description-Content-Type: text/x-rst
My Package
==========
"""
)
actual_records = zip.read("my_package-1.2.3.dist-info/RECORD").decode()
# For some reason, the ordering of the files and the SHA hashes
# vary per operating systems and Python versions.
# So instead of 1:1 assertion, let's do a bit clunkier one:
expected_records = [
"my_package/__init__.py",
"my_package/data1/test.json",
"my_package/sub_pkg1/__init__.py",
"my_package/sub_pkg2/__init__.py",
"my_package/sub_pkg2/data2/data.json",
"my_package-1.2.3.dist-info/entry_points.txt",
"my_package-1.2.3.dist-info/LICENSE",
"my_package-1.2.3.dist-info/WHEEL",
"my_package-1.2.3.dist-info/METADATA",
]
for expected_record in expected_records:
assert expected_record in actual_records
finally:
zip.close()
def test_complete_no_vcs() -> None:
# Copy the complete fixtures dir to a temporary directory
module_path = fixtures_dir / "complete"
temporary_dir = Path(tempfile.mkdtemp()) / "complete"
shutil.copytree(module_path.as_posix(), temporary_dir.as_posix())
builder = Builder(Factory().create_poetry(temporary_dir))
builder.build(fmt="all")
whl = temporary_dir / "dist" / "my_package-1.2.3-py3-none-any.whl"
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
# Check the zipped file to be sure that included and excluded files are
# correctly taken account of without vcs
expected_name_list = [
"my_package/__init__.py",
"my_package/data1/test.json",
"my_package/sub_pkg1/__init__.py",
"my_package/sub_pkg2/__init__.py",
"my_package/sub_pkg2/data2/data.json",
"my-package-1.2.3.data/scripts/script.sh",
"my_package/sub_pkg3/foo.py",
"my_package-1.2.3.dist-info/entry_points.txt",
"my_package-1.2.3.dist-info/LICENSE",
"my_package-1.2.3.dist-info/WHEEL",
"my_package-1.2.3.dist-info/METADATA",
"my_package-1.2.3.dist-info/RECORD",
]
assert sorted(zip.namelist()) == sorted(expected_name_list)
try:
entry_points = zip.read("my_package-1.2.3.dist-info/entry_points.txt")
assert (
entry_points.decode()
== """\
[console_scripts]
extra-script=my_package.extra:main[time]
my-2nd-script=my_package:main2
my-script=my_package:main
"""
)
wheel_data = zip.read("my_package-1.2.3.dist-info/WHEEL").decode()
assert (
wheel_data
== f"""\
Wheel-Version: 1.0
Generator: poetry-core {__version__}
Root-Is-Purelib: true
Tag: py3-none-any
"""
)
wheel_data = zip.read("my_package-1.2.3.dist-info/METADATA").decode()
assert (
wheel_data
== """\
Metadata-Version: 2.1
Name: my-package
Version: 1.2.3
Summary: Some description.
Home-page: https://python-poetry.org/
License: MIT
Keywords: packaging,dependency,poetry
Author: Sébastien Eustace
Author-email: sebastien@eustace.io
Maintainer: People Everywhere
Maintainer-email: people@everywhere.com
Requires-Python: >=3.6,<4.0
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Provides-Extra: time
Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0)
Requires-Dist: cleo (>=0.6,<0.7)
Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time")
Project-URL: Documentation, https://python-poetry.org/docs
Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues
Project-URL: Repository, https://github.com/python-poetry/poetry
Description-Content-Type: text/x-rst
My Package
==========
"""
)
finally:
zip.close()
def test_module_src() -> None:
module_path = fixtures_dir / "source_file"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = module_path / "dist" / "module_src-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "module_src-0.1/src/module_src.py" in tar.getnames()
whl = module_path / "dist" / "module_src-0.1-py2.py3-none-any.whl"
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
try:
assert "module_src.py" in zip.namelist()
finally:
zip.close()
def test_package_src() -> None:
module_path = fixtures_dir / "source_package"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = module_path / "dist" / "package_src-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "package_src-0.1/src/package_src/module.py" in tar.getnames()
whl = module_path / "dist" / "package_src-0.1-py2.py3-none-any.whl"
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
try:
assert "package_src/__init__.py" in zip.namelist()
assert "package_src/module.py" in zip.namelist()
finally:
zip.close()
def test_split_source() -> None:
module_path = fixtures_dir / "split_source"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = module_path / "dist" / "split_source-0.1.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
assert "split_source-0.1/lib_a/module_a/__init__.py" in tar.getnames()
assert "split_source-0.1/lib_b/module_b/__init__.py" in tar.getnames()
whl = module_path / "dist" / "split_source-0.1-py3-none-any.whl"
assert whl.exists()
zip = zipfile.ZipFile(str(whl))
try:
assert "module_a/__init__.py" in zip.namelist()
assert "module_b/__init__.py" in zip.namelist()
finally:
zip.close()
def test_package_with_include(mocker: MockerFixture) -> None:
module_path = fixtures_dir / "with-include"
# Patch git module to return specific excluded files
p = mocker.patch("poetry.core.vcs.git.Git.get_ignored_files")
p.return_value = [
str(
Path(__file__).parent
/ "fixtures"
/ "with-include"
/ "extra_dir"
/ "vcs_excluded.txt"
),
str(
Path(__file__).parent
/ "fixtures"
/ "with-include"
/ "extra_dir"
/ "sub_pkg"
/ "vcs_excluded.txt"
),
]
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = fixtures_dir / "with-include" / "dist" / "with_include-1.2.3.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
names = tar.getnames()
assert len(names) == len(set(names))
assert "with_include-1.2.3/LICENSE" in names
assert "with_include-1.2.3/README.rst" in names
assert "with_include-1.2.3/extra_dir/__init__.py" in names
assert "with_include-1.2.3/extra_dir/vcs_excluded.txt" in names
assert "with_include-1.2.3/extra_dir/sub_pkg/__init__.py" in names
assert "with_include-1.2.3/extra_dir/sub_pkg/vcs_excluded.txt" not in names
assert "with_include-1.2.3/my_module.py" in names
assert "with_include-1.2.3/notes.txt" in names
assert "with_include-1.2.3/package_with_include/__init__.py" in names
assert "with_include-1.2.3/tests/__init__.py" in names
assert "with_include-1.2.3/pyproject.toml" in names
assert "with_include-1.2.3/setup.py" in names
assert "with_include-1.2.3/PKG-INFO" in names
assert "with_include-1.2.3/for_wheel_only/__init__.py" not in names
assert "with_include-1.2.3/src/src_package/__init__.py" in names
file = tar.extractfile("with_include-1.2.3/setup.py")
assert file
setup = file.read()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns: dict[str, Any] = {}
exec(compile(setup_ast, filename="setup.py", mode="exec"), ns)
assert ns["package_dir"] == {"": "src"}
assert ns["packages"] == [
"extra_dir",
"extra_dir.sub_pkg",
"package_with_include",
"src_package",
"tests",
]
assert ns["package_data"] == {"": ["*"]}
assert ns["modules"] == ["my_module"]
whl = module_path / "dist" / "with_include-1.2.3-py3-none-any.whl"
assert whl.exists()
with zipfile.ZipFile(str(whl)) as z:
names = z.namelist()
assert len(names) == len(set(names))
assert "with_include-1.2.3.dist-info/LICENSE" in names
assert "extra_dir/__init__.py" in names
assert "extra_dir/vcs_excluded.txt" in names
assert "extra_dir/sub_pkg/__init__.py" in names
assert "extra_dir/sub_pkg/vcs_excluded.txt" not in names
assert "for_wheel_only/__init__.py" in names
assert "my_module.py" in names
assert "notes.txt" in names
assert "package_with_include/__init__.py" in names
assert "tests/__init__.py" not in names
assert "src_package/__init__.py" in names
def test_respect_format_for_explicit_included_files() -> None:
module_path = fixtures_dir / "exclude-whl-include-sdist"
builder = Builder(Factory().create_poetry(module_path))
builder.build(fmt="all")
sdist = module_path / "dist" / "exclude_whl_include_sdist-0.1.0.tar.gz"
assert sdist.exists()
with tarfile.open(str(sdist), "r") as tar:
names = tar.getnames()
assert (
"exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/__init__.py"
in names
)
assert (
"exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/compiled/source.c"
in names
)
assert (
"exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/compiled/source.h"
in names
)
assert (
"exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/cython_code.pyx"
in names
)
assert "exclude_whl_include_sdist-0.1.0/pyproject.toml" in names
assert "exclude_whl_include_sdist-0.1.0/setup.py" in names
assert "exclude_whl_include_sdist-0.1.0/PKG-INFO" in names
whl = module_path / "dist" / "exclude_whl_include_sdist-0.1.0-py3-none-any.whl"
assert whl.exists()
with zipfile.ZipFile(str(whl)) as z:
names = z.namelist()
assert "exclude_whl_include_sdist/__init__.py" in names
assert "exclude_whl_include_sdist/compiled/source.c" not in names
assert "exclude_whl_include_sdist/compiled/source.h" not in names
assert "exclude_whl_include_sdist/cython_code.pyx" not in names
pass
| mit | ac38be9be58a3a1148b9b121421351da | 30.042925 | 145 | 0.616921 | 3.233317 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/_vendor/jsonschema/cli.py | 3 | 8518 | """
The ``jsonschema`` command line.
"""
from json import JSONDecodeError
from textwrap import dedent
import argparse
import json
import sys
import traceback
import warnings
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata # type: ignore
try:
from pkgutil import resolve_name
except ImportError:
from pkgutil_resolve_name import resolve_name # type: ignore
import attr
from jsonschema.exceptions import SchemaError
from jsonschema.validators import RefResolver, validator_for
warnings.warn(
(
"The jsonschema CLI is deprecated and will be removed in a future "
"version. Please use check-jsonschema instead, which can be installed "
"from https://pypi.org/project/check-jsonschema/"
),
DeprecationWarning,
stacklevel=2,
)
class _CannotLoadFile(Exception):
pass
@attr.s
class _Outputter:
_formatter = attr.ib()
_stdout = attr.ib()
_stderr = attr.ib()
@classmethod
def from_arguments(cls, arguments, stdout, stderr):
if arguments["output"] == "plain":
formatter = _PlainFormatter(arguments["error_format"])
elif arguments["output"] == "pretty":
formatter = _PrettyFormatter()
return cls(formatter=formatter, stdout=stdout, stderr=stderr)
def load(self, path):
try:
file = open(path)
except FileNotFoundError:
self.filenotfound_error(path=path, exc_info=sys.exc_info())
raise _CannotLoadFile()
with file:
try:
return json.load(file)
except JSONDecodeError:
self.parsing_error(path=path, exc_info=sys.exc_info())
raise _CannotLoadFile()
def filenotfound_error(self, **kwargs):
self._stderr.write(self._formatter.filenotfound_error(**kwargs))
def parsing_error(self, **kwargs):
self._stderr.write(self._formatter.parsing_error(**kwargs))
def validation_error(self, **kwargs):
self._stderr.write(self._formatter.validation_error(**kwargs))
def validation_success(self, **kwargs):
self._stdout.write(self._formatter.validation_success(**kwargs))
@attr.s
class _PrettyFormatter:
_ERROR_MSG = dedent(
"""\
===[{type}]===({path})===
{body}
-----------------------------
""",
)
_SUCCESS_MSG = "===[SUCCESS]===({path})===\n"
def filenotfound_error(self, path, exc_info):
return self._ERROR_MSG.format(
path=path,
type="FileNotFoundError",
body="{!r} does not exist.".format(path),
)
def parsing_error(self, path, exc_info):
exc_type, exc_value, exc_traceback = exc_info
exc_lines = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback),
)
return self._ERROR_MSG.format(
path=path,
type=exc_type.__name__,
body=exc_lines,
)
def validation_error(self, instance_path, error):
return self._ERROR_MSG.format(
path=instance_path,
type=error.__class__.__name__,
body=error,
)
def validation_success(self, instance_path):
return self._SUCCESS_MSG.format(path=instance_path)
@attr.s
class _PlainFormatter:
_error_format = attr.ib()
def filenotfound_error(self, path, exc_info):
return "{!r} does not exist.\n".format(path)
def parsing_error(self, path, exc_info):
return "Failed to parse {}: {}\n".format(
"<stdin>" if path == "<stdin>" else repr(path),
exc_info[1],
)
def validation_error(self, instance_path, error):
return self._error_format.format(file_name=instance_path, error=error)
def validation_success(self, instance_path):
return ""
def _resolve_name_with_default(name):
if "." not in name:
name = "jsonschema." + name
return resolve_name(name)
parser = argparse.ArgumentParser(
description="JSON Schema Validation CLI",
)
parser.add_argument(
"-i", "--instance",
action="append",
dest="instances",
help="""
a path to a JSON instance (i.e. filename.json) to validate (may
be specified multiple times). If no instances are provided via this
option, one will be expected on standard input.
""",
)
parser.add_argument(
"-F", "--error-format",
help="""
the format to use for each validation error message, specified
in a form suitable for str.format. This string will be passed
one formatted object named 'error' for each ValidationError.
Only provide this option when using --output=plain, which is the
default. If this argument is unprovided and --output=plain is
used, a simple default representation will be used.
""",
)
parser.add_argument(
"-o", "--output",
choices=["plain", "pretty"],
default="plain",
help="""
an output format to use. 'plain' (default) will produce minimal
text with one line for each error, while 'pretty' will produce
more detailed human-readable output on multiple lines.
""",
)
parser.add_argument(
"-V", "--validator",
type=_resolve_name_with_default,
help="""
the fully qualified object name of a validator to use, or, for
validators that are registered with jsonschema, simply the name
of the class.
""",
)
parser.add_argument(
"--base-uri",
help="""
a base URI to assign to the provided schema, even if it does not
declare one (via e.g. $id). This option can be used if you wish to
resolve relative references to a particular URI (or local path)
""",
)
parser.add_argument(
"--version",
action="version",
version=metadata.version("jsonschema"),
)
parser.add_argument(
"schema",
help="the path to a JSON Schema to validate with (i.e. schema.json)",
)
def parse_args(args):
arguments = vars(parser.parse_args(args=args or ["--help"]))
if arguments["output"] != "plain" and arguments["error_format"]:
raise parser.error(
"--error-format can only be used with --output plain",
)
if arguments["output"] == "plain" and arguments["error_format"] is None:
arguments["error_format"] = "{error.instance}: {error.message}\n"
return arguments
def _validate_instance(instance_path, instance, validator, outputter):
invalid = False
for error in validator.iter_errors(instance):
invalid = True
outputter.validation_error(instance_path=instance_path, error=error)
if not invalid:
outputter.validation_success(instance_path=instance_path)
return invalid
def main(args=sys.argv[1:]):
sys.exit(run(arguments=parse_args(args=args)))
def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin):
outputter = _Outputter.from_arguments(
arguments=arguments,
stdout=stdout,
stderr=stderr,
)
try:
schema = outputter.load(arguments["schema"])
except _CannotLoadFile:
return 1
if arguments["validator"] is None:
arguments["validator"] = validator_for(schema)
try:
arguments["validator"].check_schema(schema)
except SchemaError as error:
outputter.validation_error(
instance_path=arguments["schema"],
error=error,
)
return 1
if arguments["instances"]:
load, instances = outputter.load, arguments["instances"]
else:
def load(_):
try:
return json.load(stdin)
except JSONDecodeError:
outputter.parsing_error(
path="<stdin>", exc_info=sys.exc_info(),
)
raise _CannotLoadFile()
instances = ["<stdin>"]
resolver = RefResolver(
base_uri=arguments["base_uri"],
referrer=schema,
) if arguments["base_uri"] is not None else None
validator = arguments["validator"](schema, resolver=resolver)
exit_code = 0
for each in instances:
try:
instance = load(each)
except _CannotLoadFile:
exit_code = 1
else:
exit_code |= _validate_instance(
instance_path=each,
instance=instance,
validator=validator,
outputter=outputter,
)
return exit_code
| mit | 4e2b89156151927eff8f236a7d2dc832 | 27.488294 | 79 | 0.609885 | 4.151072 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/_vendor/pyrsistent/_pdeque.py | 3 | 12203 | from collections.abc import Sequence, Hashable
from itertools import islice, chain
from numbers import Integral
from pyrsistent._plist import plist
class PDeque(object):
"""
Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
using two persistent lists.
A maximum length can be specified to create a bounded queue.
Fully supports the Sequence and Hashable protocols including indexing and slicing but
if you need fast random access go for the PVector instead.
Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
create an instance.
Some examples:
>>> x = pdeque([1, 2, 3])
>>> x.left
1
>>> x.right
3
>>> x[0] == x.left
True
>>> x[-1] == x.right
True
>>> x.pop()
pdeque([1, 2])
>>> x.pop() == x[:-1]
True
>>> x.popleft()
pdeque([2, 3])
>>> x.append(4)
pdeque([1, 2, 3, 4])
>>> x.appendleft(4)
pdeque([4, 1, 2, 3])
>>> y = pdeque([1, 2, 3], maxlen=3)
>>> y.append(4)
pdeque([2, 3, 4], maxlen=3)
>>> y.appendleft(4)
pdeque([4, 1, 2], maxlen=3)
"""
__slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
def __new__(cls, left_list, right_list, length, maxlen=None):
instance = super(PDeque, cls).__new__(cls)
instance._left_list = left_list
instance._right_list = right_list
instance._length = length
if maxlen is not None:
if not isinstance(maxlen, Integral):
raise TypeError('An integer is required as maxlen')
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
instance._maxlen = maxlen
return instance
@property
def right(self):
"""
Rightmost element in dqueue.
"""
return PDeque._tip_from_lists(self._right_list, self._left_list)
@property
def left(self):
"""
Leftmost element in dqueue.
"""
return PDeque._tip_from_lists(self._left_list, self._right_list)
@staticmethod
def _tip_from_lists(primary_list, secondary_list):
if primary_list:
return primary_list.first
if secondary_list:
return secondary_list[-1]
raise IndexError('No elements in empty deque')
def __iter__(self):
return chain(self._left_list, self._right_list.reverse())
def __repr__(self):
return "pdeque({0}{1})".format(list(self),
', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
__str__ = __repr__
@property
def maxlen(self):
"""
Maximum length of the queue.
"""
return self._maxlen
def pop(self, count=1):
"""
Return new deque with rightmost element removed. Popping the empty queue
will return the empty queue. A optional count can be given to indicate the
number of elements to pop. Popping with a negative index is the same as
popleft. Executes in amortized O(k) where k is the number of elements to pop.
>>> pdeque([1, 2]).pop()
pdeque([1])
>>> pdeque([1, 2]).pop(2)
pdeque([])
>>> pdeque([1, 2]).pop(-1)
pdeque([2])
"""
if count < 0:
return self.popleft(-count)
new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
def popleft(self, count=1):
"""
Return new deque with leftmost element removed. Otherwise functionally
equivalent to pop().
>>> pdeque([1, 2]).popleft()
pdeque([2])
"""
if count < 0:
return self.pop(-count)
new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
@staticmethod
def _pop_lists(primary_list, secondary_list, count):
new_primary_list = primary_list
new_secondary_list = secondary_list
while count > 0 and (new_primary_list or new_secondary_list):
count -= 1
if new_primary_list.rest:
new_primary_list = new_primary_list.rest
elif new_primary_list:
new_primary_list = new_secondary_list.reverse()
new_secondary_list = plist()
else:
new_primary_list = new_secondary_list.reverse().rest
new_secondary_list = plist()
return new_primary_list, new_secondary_list
def _is_empty(self):
return not self._left_list and not self._right_list
def __lt__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
return tuple(self) < tuple(other)
def __eq__(self, other):
if not isinstance(other, PDeque):
return NotImplemented
if tuple(self) == tuple(other):
# Sanity check of the length value since it is redundant (there for performance)
assert len(self) == len(other)
return True
return False
def __hash__(self):
return hash(tuple(self))
def __len__(self):
return self._length
def append(self, elem):
"""
Return new deque with elem as the rightmost element.
>>> pdeque([1, 2]).append(3)
pdeque([1, 2, 3])
"""
new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def appendleft(self, elem):
"""
Return new deque with elem as the leftmost element.
>>> pdeque([1, 2]).appendleft(3)
pdeque([3, 1, 2])
"""
new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
def _append(self, primary_list, secondary_list, elem):
if self._maxlen is not None and self._length == self._maxlen:
if self._maxlen == 0:
return primary_list, secondary_list, 0
new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
return new_primary_list, new_secondary_list.cons(elem), self._length
return primary_list, secondary_list.cons(elem), self._length + 1
@staticmethod
def _extend_list(the_list, iterable):
count = 0
for elem in iterable:
the_list = the_list.cons(elem)
count += 1
return the_list, count
def _extend(self, primary_list, secondary_list, iterable):
new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
new_secondary_list = secondary_list
current_len = self._length + extend_count
if self._maxlen is not None and current_len > self._maxlen:
pop_len = current_len - self._maxlen
new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
extend_count -= pop_len
return new_primary_list, new_secondary_list, extend_count
def extend(self, iterable):
"""
Return new deque with all elements of iterable appended to the right.
>>> pdeque([1, 2]).extend([3, 4])
pdeque([1, 2, 3, 4])
"""
new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def extendleft(self, iterable):
"""
Return new deque with all elements of iterable appended to the left.
NB! The elements will be inserted in reverse order compared to the order in the iterable.
>>> pdeque([1, 2]).extendleft([3, 4])
pdeque([4, 3, 1, 2])
"""
new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
def count(self, elem):
"""
Return the number of elements equal to elem present in the queue
>>> pdeque([1, 2, 1]).count(1)
2
"""
return self._left_list.count(elem) + self._right_list.count(elem)
def remove(self, elem):
"""
Return new deque with first element from left equal to elem removed. If no such element is found
a ValueError is raised.
>>> pdeque([2, 1, 2]).remove(2)
pdeque([1, 2])
"""
try:
return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
except ValueError:
# Value not found in left list, try the right list
try:
# This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
return PDeque(self._left_list,
self._right_list.reverse().remove(elem).reverse(), self._length - 1)
except ValueError as e:
raise ValueError('{0} not found in PDeque'.format(elem)) from e
def reverse(self):
"""
Return reversed deque.
>>> pdeque([1, 2, 3]).reverse()
pdeque([3, 2, 1])
Also supports the standard python reverse function.
>>> reversed(pdeque([1, 2, 3]))
pdeque([3, 2, 1])
"""
return PDeque(self._right_list, self._left_list, self._length)
__reversed__ = reverse
def rotate(self, steps):
"""
Return deque with elements rotated steps steps.
>>> x = pdeque([1, 2, 3])
>>> x.rotate(1)
pdeque([3, 1, 2])
>>> x.rotate(-2)
pdeque([3, 1, 2])
"""
popped_deque = self.pop(steps)
if steps >= 0:
return popped_deque.extendleft(islice(self.reverse(), steps))
return popped_deque.extend(islice(self, -steps))
def __reduce__(self):
# Pickling support
return pdeque, (list(self), self._maxlen)
def __getitem__(self, index):
if isinstance(index, slice):
if index.step is not None and index.step != 1:
# Too difficult, no structural sharing possible
return pdeque(tuple(self)[index], maxlen=self._maxlen)
result = self
if index.start is not None:
result = result.popleft(index.start % self._length)
if index.stop is not None:
result = result.pop(self._length - (index.stop % self._length))
return result
if not isinstance(index, Integral):
raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
if index >= 0:
return self.popleft(index).left
shifted = len(self) + index
if shifted < 0:
raise IndexError(
"pdeque index {0} out of range {1}".format(index, len(self)),
)
return self.popleft(shifted).left
index = Sequence.index
Sequence.register(PDeque)
Hashable.register(PDeque)
def pdeque(iterable=(), maxlen=None):
"""
Return deque containing the elements of iterable. If maxlen is specified then
len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
>>> pdeque([1, 2, 3])
pdeque([1, 2, 3])
>>> pdeque([1, 2, 3, 4], maxlen=2)
pdeque([3, 4], maxlen=2)
"""
t = tuple(iterable)
if maxlen is not None:
t = t[-maxlen:]
length = len(t)
pivot = int(length / 2)
left = plist(t[:pivot])
right = plist(t[pivot:], reverse=True)
return PDeque(left, right, length, maxlen)
def dq(*elements):
"""
Return deque containing all arguments.
>>> dq(1, 2, 3)
pdeque([1, 2, 3])
"""
return pdeque(elements)
| mit | 5651dff4cf3082ea4db9576fa8847876 | 31.454787 | 115 | 0.574695 | 3.841045 | false | false | false | false |
python-poetry/poetry-core | tests/integration/test_pep517_backend.py | 1 | 1499 | from __future__ import annotations
import shutil
from pathlib import Path
import pytest
from tests.testutils import subprocess_run
pytestmark = pytest.mark.integration
BUILD_SYSTEM_TEMPLATE = """
[build-system]
requires = ["poetry-core @ file://{project_path}"]
build-backend = "poetry.core.masonry.api"
"""
def test_pip_install(
temporary_directory: Path, project_source_root: Path, python: str
) -> None:
"""
Ensure that a project using the repository version of poetry-core as a PEP 517 backend can be built.
"""
temp_pep_517_backend_path = temporary_directory / "pep_517_backend"
# Copy `pep_517_backend` to a temporary directory as we need to dynamically add the
# build system during the test. This ensures that we don't update the source, since
# the value of `requires` is dynamic.
shutil.copytree(
Path(__file__).parent.parent / "fixtures/pep_517_backend",
temp_pep_517_backend_path,
)
# Append dynamic `build-system` section to `pyproject.toml` in the temporary
# project directory.
with open(temp_pep_517_backend_path / "pyproject.toml", "a") as f:
f.write(
BUILD_SYSTEM_TEMPLATE.format(project_path=project_source_root.as_posix())
)
subprocess_run(
python,
"-m",
"pip",
"install",
temp_pep_517_backend_path.as_posix(),
)
pip_show = subprocess_run(python, "-m", "pip", "show", "foo")
assert "Name: foo" in pip_show.stdout
| mit | 066f8a212643cdb68bf614b3d8331ec1 | 26.759259 | 104 | 0.657105 | 3.569048 | false | true | false | false |
python-poetry/poetry-core | src/poetry/core/_vendor/pyparsing/util.py | 17 | 6805 | # util.py
import warnings
import types
import collections
import itertools
from functools import lru_cache
from typing import List, Union, Iterable
_bslash = chr(92)
class __config_flags:
"""Internal class for defining compatibility and debugging flags"""
_all_names: List[str] = []
_fixed_names: List[str] = []
_type_desc = "configuration"
@classmethod
def _set(cls, dname, value):
if dname in cls._fixed_names:
warnings.warn(
"{}.{} {} is {} and cannot be overridden".format(
cls.__name__,
dname,
cls._type_desc,
str(getattr(cls, dname)).upper(),
)
)
return
if dname in cls._all_names:
setattr(cls, dname, value)
else:
raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
enable = classmethod(lambda cls, name: cls._set(name, True))
disable = classmethod(lambda cls, name: cls._set(name, False))
@lru_cache(maxsize=128)
def col(loc: int, strg: str) -> int:
"""
Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
@lru_cache(maxsize=128)
def lineno(loc: int, strg: str) -> int:
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
@lru_cache(maxsize=128)
def line(loc: int, strg: str) -> str:
"""
Returns the line of text containing loc within a string, counting newlines as line separators.
"""
last_cr = strg.rfind("\n", 0, loc)
next_cr = strg.find("\n", loc)
return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
class _UnboundedCache:
def __init__(self):
cache = {}
cache_get = cache.get
self.not_in_cache = not_in_cache = object()
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
def clear(_):
cache.clear()
self.size = None
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class _FifoCache:
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = collections.OrderedDict()
cache_get = cache.get
def get(_, key):
return cache_get(key, not_in_cache)
def set_(_, key, value):
cache[key] = value
while len(cache) > size:
cache.popitem(last=False)
def clear(_):
cache.clear()
self.size = size
self.get = types.MethodType(get, self)
self.set = types.MethodType(set_, self)
self.clear = types.MethodType(clear, self)
class LRUMemo:
"""
A memoizing mapping that retains `capacity` deleted items
The memo tracks retained items by their access order; once `capacity` items
are retained, the least recently used item is discarded.
"""
def __init__(self, capacity):
self._capacity = capacity
self._active = {}
self._memory = collections.OrderedDict()
def __getitem__(self, key):
try:
return self._active[key]
except KeyError:
self._memory.move_to_end(key)
return self._memory[key]
def __setitem__(self, key, value):
self._memory.pop(key, None)
self._active[key] = value
def __delitem__(self, key):
try:
value = self._active.pop(key)
except KeyError:
pass
else:
while len(self._memory) >= self._capacity:
self._memory.popitem(last=False)
self._memory[key] = value
def clear(self):
self._active.clear()
self._memory.clear()
class UnboundedMemo(dict):
"""
A memoizing mapping that retains all deleted items
"""
def __delitem__(self, key):
pass
def _escape_regex_range_chars(s: str) -> str:
# escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
def _collapse_string_to_ranges(
s: Union[str, Iterable[str]], re_escape: bool = True
) -> str:
def is_consecutive(c):
c_int = ord(c)
is_consecutive.prev, prev = c_int, is_consecutive.prev
if c_int - prev > 1:
is_consecutive.value = next(is_consecutive.counter)
return is_consecutive.value
is_consecutive.prev = 0
is_consecutive.counter = itertools.count()
is_consecutive.value = -1
def escape_re_range_char(c):
return "\\" + c if c in r"\^-][" else c
def no_escape_re_range_char(c):
return c
if not re_escape:
escape_re_range_char = no_escape_re_range_char
ret = []
s = "".join(sorted(set(s)))
if len(s) > 3:
for _, chars in itertools.groupby(s, key=is_consecutive):
first = last = next(chars)
last = collections.deque(
itertools.chain(iter([last]), chars), maxlen=1
).pop()
if first == last:
ret.append(escape_re_range_char(first))
else:
sep = "" if ord(last) == ord(first) + 1 else "-"
ret.append(
"{}{}{}".format(
escape_re_range_char(first), sep, escape_re_range_char(last)
)
)
else:
ret = [escape_re_range_char(c) for c in s]
return "".join(ret)
def _flatten(ll: list) -> list:
ret = []
for i in ll:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
| mit | 1e247836b6d756c099c0a4447a4c80e7 | 27.957447 | 98 | 0.568259 | 3.816601 | false | false | false | false |
python-poetry/poetry-core | src/poetry/core/masonry/utils/module.py | 1 | 3697 | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
if TYPE_CHECKING:
from poetry.core.masonry.utils.include import Include
class ModuleOrPackageNotFound(ValueError):
pass
class Module:
def __init__(
self,
name: str,
directory: str = ".",
packages: list[dict[str, Any]] | None = None,
includes: list[dict[str, Any]] | None = None,
) -> None:
from poetry.core.masonry.utils.include import Include
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.utils.helpers import module_name
self._name = module_name(name)
self._in_src = False
self._is_package = False
self._path = Path(directory)
self._includes: list[Include] = []
packages = packages or []
includes = includes or []
if not packages:
# It must exist either as a .py file or a directory, but not both
pkg_dir = Path(directory, self._name)
py_file = Path(directory, self._name + ".py")
if pkg_dir.is_dir() and py_file.is_file():
raise ValueError(f"Both {pkg_dir} and {py_file} exist")
elif pkg_dir.is_dir():
packages = [{"include": str(pkg_dir.relative_to(self._path))}]
elif py_file.is_file():
packages = [{"include": str(py_file.relative_to(self._path))}]
else:
# Searching for a src module
src = Path(directory, "src")
src_pkg_dir = src / self._name
src_py_file = src / (self._name + ".py")
if src_pkg_dir.is_dir() and src_py_file.is_file():
raise ValueError(f"Both {pkg_dir} and {py_file} exist")
elif src_pkg_dir.is_dir():
packages = [
{
"include": str(src_pkg_dir.relative_to(src)),
"from": str(src.relative_to(self._path)),
}
]
elif src_py_file.is_file():
packages = [
{
"include": str(src_py_file.relative_to(src)),
"from": str(src.relative_to(self._path)),
}
]
else:
raise ModuleOrPackageNotFound(
f"No file/folder found for package {name}"
)
for package in packages:
formats = package.get("format")
if formats and not isinstance(formats, list):
formats = [formats]
self._includes.append(
PackageInclude(
self._path,
package["include"],
formats=formats,
source=package.get("from"),
)
)
for include in includes:
self._includes.append(
Include(self._path, include["path"], formats=include["format"])
)
@property
def name(self) -> str:
return self._name
@property
def path(self) -> Path:
return self._path
@property
def file(self) -> Path:
if self._is_package:
return self._path / "__init__.py"
else:
return self._path
@property
def includes(self) -> list[Include]:
return self._includes
def is_package(self) -> bool:
return self._is_package
def is_in_src(self) -> bool:
return self._in_src
| mit | 0792441303f03efad6e16bfba0fe2186 | 31.147826 | 79 | 0.492291 | 4.339202 | false | false | false | false |
lk-geimfari/mimesis | mimesis/providers/base.py | 2 | 6580 | """Base data provider."""
import contextlib
import json
import operator
import typing as t
from functools import reduce
from pathlib import Path
from mimesis.exceptions import NonEnumerableError
from mimesis.locales import Locale, validate_locale
from mimesis.random import Random, get_random_item
from mimesis.types import JSON, Seed
__all__ = ["BaseDataProvider", "BaseProvider"]
class BaseProvider:
"""This is a base class for all providers."""
class Meta:
name: str
def __init__(self, *, seed: Seed = None, **kwargs: t.Any) -> None:
"""Initialize attributes.
Keep in mind, that locale-independent data providers will work
only with keyword-only arguments since version 5.0.0.
:param seed: Seed for random.
When set to `None` the current system time is used.
"""
self.seed = seed
self.random = Random()
self.reseed(seed)
def reseed(self, seed: Seed = None) -> None:
"""Reseed the internal random generator.
In case we use the default seed, we need to create a per instance
random generator, in this case two providers with the same seed
will always return the same values.
:param seed: Seed for random.
When set to `None` the current system time is used.
"""
self.seed = seed
self.random.seed(seed)
def validate_enum(self, item: t.Any, enum: t.Any) -> t.Any:
"""Validate enum parameter of method in subclasses of BaseProvider.
:param item: Item of enum object.
:param enum: Enum object.
:return: Value of item.
:raises NonEnumerableError: if ``item`` not in ``enum``.
"""
if item is None:
result = get_random_item(enum, self.random)
elif item and isinstance(item, enum):
result = item
else:
raise NonEnumerableError(enum)
return result.value
def __str__(self) -> str:
"""Human-readable representation of locale."""
return self.__class__.__name__
class BaseDataProvider(BaseProvider):
"""This is a base class for all data providers."""
def __init__(self, locale: Locale = Locale.DEFAULT, seed: Seed = None) -> None:
"""Initialize attributes for data providers.
:param locale: Current locale.
:param seed: Seed to all the random functions.
"""
super().__init__(seed=seed)
self._data: JSON = {}
self._datafile: str = ""
self._setup_locale(locale)
self._data_dir = Path(__file__).parent.parent.joinpath("data")
def _setup_locale(self, locale: Locale = Locale.DEFAULT) -> None:
"""Set up locale after pre-check.
:param str locale: Locale
:raises UnsupportedLocale: When locale not supported.
:return: Nothing.
"""
locale_obj = validate_locale(locale)
self.locale = locale_obj.value
def extract(self, keys: t.List[str], default: t.Optional[t.Any] = None) -> t.Any:
"""Extracts nested values from JSON file by list of keys.
:param keys: List of keys (order extremely matters).
:param default: Default value.
:return: Data.
"""
if not keys:
raise ValueError("The list of keys to extract cannot be empty.")
try:
return reduce(operator.getitem, keys, self._data)
except (TypeError, KeyError):
return default
def _update_dict(self, initial: JSON, other: JSON) -> JSON:
"""Recursively update a dictionary.
:param initial: Dict to update.
:param other: Dict to update from.
:return: Updated dict.
"""
for key, value in other.items():
if isinstance(value, dict):
r = self._update_dict(initial.get(key, {}), value)
initial[key] = r
else:
initial[key] = other[key]
return initial
def _load_datafile(self, datafile: str = "") -> None:
"""Loads the content from the JSON.
:param datafile: The name of file.
:return: The content of the file.
:raises UnsupportedLocale: Raises if locale is unsupported.
"""
locale = self.locale
data_dir = self._data_dir
if not datafile:
datafile = self._datafile
def get_data(locale_name: str) -> t.Any:
"""Pull JSON data from file.
:param locale_name: Locale name.
:return: Content of JSON file as dict.
"""
file_path = Path(data_dir).joinpath(locale_name, datafile)
with open(file_path, encoding="utf8") as f:
return json.load(f)
locale_separator = "-"
master_locale = locale.split(locale_separator).pop(0)
data = get_data(master_locale)
if locale_separator in locale:
data = self._update_dict(data, get_data(locale))
self._data = data
def get_current_locale(self) -> str:
"""Get current locale.
If locale is not defined then this method will always return ``en``,
because ``en`` is default locale for all providers, excluding builtins.
:return: Current locale.
"""
# noinspection PyTypeChecker
return self.locale
def _override_locale(self, locale: Locale = Locale.DEFAULT) -> None:
"""Overrides current locale with passed and pull data for new locale.
:param locale: Locale
:return: Nothing.
"""
self._setup_locale(locale)
self._load_datafile()
@contextlib.contextmanager
def override_locale(
self,
locale: Locale,
) -> t.Generator["BaseDataProvider", None, None]:
"""Context manager which allows overriding current locale.
Temporarily overrides current locale for
locale-dependent providers.
:param locale: Locale.
:return: Provider with overridden locale.
"""
try:
origin_locale = Locale(self.locale)
self._override_locale(locale)
try:
yield self
finally:
self._override_locale(origin_locale)
except AttributeError:
raise ValueError(f"«{self.__class__.__name__}» has not locale dependent")
def __str__(self) -> str:
"""Human-readable representation of locale."""
locale = Locale(getattr(self, "locale", Locale.DEFAULT))
return f"{self.__class__.__name__} <{locale}>"
| mit | b3c7c77f9a8c85ceb4184e92abc83d39 | 30.932039 | 85 | 0.591821 | 4.333333 | false | false | false | false |
bachya/regenmaschine | regenmaschine/client.py | 1 | 8931 | """Define a client to interact with a RainMachine unit."""
from __future__ import annotations
import asyncio
import json
import ssl
from datetime import datetime
from typing import Any, cast
import async_timeout
from aiohttp import ClientSession, ClientTimeout
from aiohttp.client_exceptions import ClientOSError, ServerDisconnectedError
from .const import LOGGER
from .controller import Controller, LocalController, RemoteController
from .errors import RequestError, TokenExpiredError, raise_for_error
DEFAULT_LOCAL_PORT = 8080
DEFAULT_TIMEOUT = 30
class Client:
"""Define the client."""
def __init__(
self,
*,
request_timeout: int = DEFAULT_TIMEOUT,
session: ClientSession | None = None,
) -> None:
"""Initialize.
Args:
request_timeout: The number of seconds before a request times out.
session: An optional aiohttp ClientSession.
"""
self._request_timeout = request_timeout
self._session = session
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The local API on Gen 1 controllers uses outdated RSA ciphers (and there isn't
# any indication that they'll be updated). Python 3.10+ enforces minimum TLS
# standards that the Gen 1 can't support, so to keep compatibility, we loosen
# things up:
# 1. We set a minimum TLS version of SSLv3
# 2. We utilize the "DEFAULT" cipher suite (which includes old RSA ciphers).
# 3. We don't validate the hostname.
# 4. We allow self-signed certificates.
self._ssl_context.minimum_version = ssl.TLSVersion.SSLv3
self._ssl_context.set_ciphers("DEFAULT")
self._ssl_context.check_hostname = False
self._ssl_context.verify_mode = ssl.CERT_NONE
self.controllers: dict[str, Controller] = {}
async def _request(
self,
method: str,
url: str,
*,
access_token: str | None = None,
access_token_expiration: datetime | None = None,
use_ssl: bool = True,
**kwargs: dict[str, Any],
) -> dict[str, Any]:
"""Make an API request.
Args:
method: An HTTP method.
url: An API URL.
access_token: An optional API access token.
access_token_expiration: An optional API token expiration datetime.
use_ssl: Whether to use SSL/TLS on the request.
**kwargs: Additional kwargs to send with the request.
Returns:
An API response payload.
Raises:
AssertionError: To handle mypy strangeness.
RequestError: Raised upon an underlying HTTP error.
TokenExpiredError: Raised upon an expired access token
"""
if access_token_expiration and datetime.now() >= access_token_expiration:
raise TokenExpiredError("Long-lived access token has expired")
kwargs.setdefault("headers", {})
kwargs["headers"]["Content-Type"] = "application/json"
kwargs.setdefault("params", {})
if access_token:
kwargs["params"]["access_token"] = access_token
if use_running_session := self._session and not self._session.closed:
session = self._session
else:
session = ClientSession(timeout=ClientTimeout(total=DEFAULT_TIMEOUT))
try:
# Only try 2x for ServerDisconnectedError to comply with the RFC:
# https://datatracker.ietf.org/doc/html/rfc2616#section-8.1.4
for attempt in range(2):
try:
return await self._request_with_session(
session, method, url, use_ssl, **kwargs
)
except ServerDisconnectedError as err:
# The HTTP/1.1 spec allows the device to close the connection
# at any time. aiohttp raises ServerDisconnectedError to let us
# decide what to do. In this case we want to retry as it likely
# means the connection was stale and the server closed it on us:
if attempt == 0:
continue
raise RequestError(
f"Error requesting data from {url}: {err}"
) from err
finally:
if not use_running_session:
await session.close()
raise AssertionError # https://github.com/python/mypy/issues/8964
async def _request_with_session(
self,
session: ClientSession,
method: str,
url: str,
use_ssl: bool,
**kwargs: dict[str, Any],
) -> dict[str, Any]:
"""Make a request with a session.
Args:
session: An aiohttp ClientSession.
method: An HTTP method.
url: An API URL.
use_ssl: Whether to use SSL/TLS on the request.
**kwargs: Additional kwargs to send with the request.
Returns:
An API response payload.
Raises:
RequestError: Raised upon an underlying HTTP error.
"""
try:
async with async_timeout.timeout(self._request_timeout), session.request(
method, url, ssl=self._ssl_context if use_ssl else None, **kwargs
) as resp:
data = await resp.json(content_type=None)
except json.decoder.JSONDecodeError as err:
raise RequestError("Unable to parse response as JSON") from err
except ClientOSError as err:
raise RequestError(
f"Connection error while requesting data from {url}"
) from err
except asyncio.TimeoutError as err:
raise RequestError(f"Timed out while requesting data from {url}") from err
else:
LOGGER.debug("Data received for %s: %s", url, data)
raise_for_error(resp, data)
return cast(dict[str, Any], data)
async def load_local( # pylint: disable=too-many-arguments
self,
host: str,
password: str,
port: int = DEFAULT_LOCAL_PORT,
use_ssl: bool = True,
skip_existing: bool = True,
) -> None:
"""Create a local client.
Args:
host: The IP address or hostname of the controller.
password: The controller password.
port: The port that serves the controller's API.
use_ssl: Whether to use SSL/TLS on the request.
skip_existing: Don't load the controller if it's already loaded.
"""
controller = LocalController(self._request, host, port, use_ssl)
await controller.login(password)
wifi_data = await controller.provisioning.wifi()
if skip_existing and wifi_data["macAddress"] in self.controllers:
return
version_data = await controller.api.versions()
controller.api_version = version_data["apiVer"]
controller.hardware_version = str(version_data["hwVer"])
controller.mac = wifi_data["macAddress"]
controller.software_version = version_data["swVer"]
name = await controller.provisioning.device_name
controller.name = str(name)
self.controllers[controller.mac] = controller
async def load_remote(
self, email: str, password: str, skip_existing: bool = True
) -> None:
"""Create a remote client.
Args:
email: A RainMachine account email address.
password: The account password.
skip_existing: Don't load the controller if it's already loaded.
"""
auth_resp = await self._request(
"post",
"https://my.rainmachine.com/login/auth",
json={"user": {"email": email, "pwd": password, "remember": 1}},
)
access_token: str = auth_resp["access_token"]
sprinklers_resp = await self._request(
"post",
"https://my.rainmachine.com/devices/get-sprinklers",
access_token=access_token,
json={"user": {"email": email, "pwd": password, "remember": 1}},
)
for sprinkler in sprinklers_resp["sprinklers"]:
if skip_existing and sprinkler["mac"] in self.controllers:
continue
controller: RemoteController = RemoteController(self._request)
await controller.login(access_token, sprinkler["sprinklerId"], password)
version_data = await controller.api.versions()
controller.api_version = version_data["apiVer"]
controller.hardware_version = str(version_data["hwVer"])
controller.mac = sprinkler["mac"]
controller.name = str(sprinkler["name"])
controller.software_version = version_data["swVer"]
self.controllers[sprinkler["mac"]] = controller
| mit | 4d429fdca15686b6e70f7b8aeecbd3ef | 36.058091 | 87 | 0.595006 | 4.503782 | false | false | false | false |
lk-geimfari/mimesis | mimesis/providers/cryptographic.py | 2 | 3325 | """Cryptographic data provider."""
import hashlib
import secrets
import typing as t
from uuid import UUID, uuid4
from mimesis.data.int.cryptographic import WORDLIST
from mimesis.enums import Algorithm
from mimesis.providers.base import BaseProvider
__all__ = ["Cryptographic"]
class Cryptographic(BaseProvider):
"""Class that provides cryptographic data."""
class Meta:
"""Class for metadata."""
name: t.Final[str] = "cryptographic"
@staticmethod
def uuid_object() -> UUID:
"""Generate UUID4 object.
:return: UUID4 object.
"""
return uuid4()
def uuid(self) -> str:
"""Generate UUID4 string.
:return: UUID4 as string.
"""
return str(self.uuid_object())
def hash(self, algorithm: t.Optional[Algorithm] = None) -> str: # noqa: A003
"""Generate random hash.
To change hashing algorithm, pass parameter ``algorithm``
with needed value of the enum object :class:`~mimesis.enums.Algorithm`
.. warning:: Seed is not applicable to this method,
because of its cryptographic-safe nature.
:param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.
:return: Hash.
:raises NonEnumerableError: When algorithm is unsupported.
"""
key = self.validate_enum(algorithm, Algorithm)
func = getattr(hashlib, key)
value = func(self.uuid().encode())
return str(value.hexdigest())
@staticmethod
def token_bytes(entropy: int = 32) -> bytes:
"""Generate byte string containing ``entropy`` bytes.
The string has ``entropy`` random bytes, each byte
converted to two hex digits.
.. warning:: Seed is not applicable to this method,
because of its cryptographic-safe nature.
:param entropy: Number of bytes (default: 32).
:return: Random bytes.
"""
return secrets.token_bytes(entropy)
@staticmethod
def token_hex(entropy: int = 32) -> str:
"""Return a random text string, in hexadecimal.
The string has *entropy* random bytes, each byte converted to two
hex digits. If *entropy* is ``None`` or not supplied, a reasonable
default is used.
.. warning:: Seed is not applicable to this method,
because of its cryptographic-safe nature.
:param entropy: Number of bytes (default: 32).
:return: Token.
"""
return secrets.token_hex(entropy)
@staticmethod
def token_urlsafe(entropy: int = 32) -> str:
"""Return a random URL-safe text string, in Base64 encoding.
The string has *entropy* random bytes. If *entropy* is ``None``
or not supplied, a reasonable default is used.
.. warning:: Seed is not applicable to this method,
because of its cryptographic-safe nature.
:param entropy: Number of bytes (default: 32).
:return: URL-safe token.
"""
return secrets.token_urlsafe(entropy)
def mnemonic_phrase(self) -> str:
"""Generate BIP-39-compatible mnemonic phrase.
:return: Mnemonic phrase.
"""
length = self.random.choice([12, 24])
phrases = self.random.choices(WORDLIST, k=length)
return " ".join(phrases)
| mit | 942623a945e4483d6c26bbfd771b3eb5 | 29.504587 | 81 | 0.622256 | 4.306995 | false | false | false | false |
lk-geimfari/mimesis | tests/test_providers/test_path.py | 3 | 2145 | import re
import pytest
from mimesis import Path
from mimesis.data import FOLDERS, PROGRAMMING_LANGS, PROJECT_NAMES
class TestPath:
def test_root(self, path):
result = path.root()
assert result == "C:\\" or result == "/"
def test_home(self, path):
result = path.home()
assert result == "C:\\Users" or result == "/home"
def test_user(self, path):
user = path.user()
pattern = r"C:\\Users\\[A-Z].*" if path.platform == "win32" else r"/home/[a-z]."
result = re.search(pattern, user)
assert result
def directory_separator(self, path):
slash_character = ""
if path.platform == "win32":
slash_character = "\\"
elif path.platform == "linux":
slash_character = "/"
return slash_character
def test_users_folder(self, path):
folder = path.users_folder()
folder = folder.split(self.directory_separator(path))
assert len(folder) == 4
assert folder[3] in FOLDERS
def test_dev_dir(self, path):
dev_dir = path.dev_dir()
dev_dir = dev_dir.split(self.directory_separator(path))
assert len(dev_dir) == 5
assert dev_dir[4] in PROGRAMMING_LANGS
def test_project_dir(self, path):
project_path = path.project_dir()
project_path = project_path.split(self.directory_separator(path))
assert len(project_path) == 6
assert project_path[5] in PROJECT_NAMES
class TestSeededPath:
@pytest.fixture
def p1(self, seed):
return Path(seed=seed)
@pytest.fixture
def p2(self, seed):
return Path(seed=seed)
def test_root(self, p1, p2):
assert p1.root() == p2.root()
def test_home(self, p1, p2):
assert p1.home() == p2.home()
def test_user(self, p1, p2):
assert p1.user() == p2.user()
def test_users_folder(self, p1, p2):
assert p1.users_folder() == p2.users_folder()
def test_dev_dir(self, p1, p2):
assert p1.dev_dir() == p2.dev_dir()
def test_project_dir(self, p1, p2):
assert p1.project_dir() == p2.project_dir()
| mit | 324d176d9d390e10e4b3409c34167138 | 26.857143 | 88 | 0.586014 | 3.362069 | false | true | false | false |
bachya/regenmaschine | regenmaschine/controller.py | 1 | 4684 | """Define a RainMachine controller class."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from datetime import datetime, timedelta
from typing import Any
from regenmaschine.endpoints.api import API
from regenmaschine.endpoints.diagnostics import Diagnostics
from regenmaschine.endpoints.machine import Machine
from regenmaschine.endpoints.parser import Parser
from regenmaschine.endpoints.program import Program
from regenmaschine.endpoints.provision import Provision
from regenmaschine.endpoints.restriction import Restriction
from regenmaschine.endpoints.stats import Stats
from regenmaschine.endpoints.watering import Watering
from regenmaschine.endpoints.zone import Zone
URL_BASE_LOCAL = "https://{0}:{1}/api/4"
URL_BASE_REMOTE = "https://api.rainmachine.com/{0}/api/4"
class Controller: # pylint: disable=too-few-public-methods,too-many-instance-attributes
"""Define the controller."""
def __init__(self, request: Callable[..., Awaitable[dict[str, Any]]]) -> None:
"""Initialize.
Args:
request: The request method from the Client object.
"""
self._access_token: str | None = None
self._access_token_expiration: datetime | None = None
self._client_request = request
self._host: str = ""
self._use_ssl = True
self.api_version: str = ""
self.hardware_version: str = ""
self.mac: str = ""
self.name: str = ""
self.software_version: str = ""
# API endpoints:
self.api = API(self)
self.diagnostics = Diagnostics(self)
self.machine = Machine(self)
self.parsers = Parser(self)
self.programs = Program(self)
self.provisioning = Provision(self)
self.restrictions = Restriction(self)
self.stats = Stats(self)
self.watering = Watering(self)
self.zones = Zone(self)
async def request(
self, method: str, endpoint: str, **kwargs: dict[str, Any]
) -> dict[str, Any]:
"""Wrap the generic request method to add access token, etc.
Args:
method: An HTTP method.
endpoint: An API URL endpoint.
**kwargs: Additional kwargs to send with the request.
Returns:
An API response payload.
"""
return await self._client_request(
method,
f"{self._host}/{endpoint}",
access_token=self._access_token,
access_token_expiration=self._access_token_expiration,
use_ssl=self._use_ssl,
**kwargs,
)
class LocalController(Controller):
"""Define a controller accessed over the LAN."""
def __init__(
self,
request: Callable[..., Awaitable[dict[str, Any]]],
host: str,
port: int,
use_ssl: bool = True,
) -> None:
"""Initialize.
Args:
request: The request method from the Client object.
host: The IP address or hostname of the controller.
port: The port that serves the controller's API.
use_ssl: Whether to use SSL/TLS on the request.
"""
super().__init__(request)
self._host = URL_BASE_LOCAL.format(host, port)
self._use_ssl = use_ssl
async def login(self, password: str) -> None:
"""Authenticate against the device (locally).
Args:
password: The controller password.
"""
auth_resp = await self._client_request(
"post", f"{self._host}/auth/login", json={"pwd": password, "remember": 1}
)
self._access_token: str = auth_resp["access_token"]
self._access_token_expiration: datetime = datetime.now() + timedelta(
seconds=int(auth_resp["expires_in"]) - 10
)
class RemoteController(Controller):
"""Define a controller accessed over RainMachine's cloud."""
async def login(
self, stage_1_access_token: str, sprinkler_id: str, password: str
) -> None:
"""Authenticate against the device (remotely).
Args:
stage_1_access_token: The first-stage access token from the remote cloud.
sprinkler_id: A unique ID for the controller.
password: The account password.
"""
auth_resp: dict = await self._client_request(
"post",
"https://my.rainmachine.com/devices/login-sprinkler",
access_token=stage_1_access_token,
json={"sprinklerId": sprinkler_id, "pwd": password},
)
self._access_token = auth_resp["access_token"]
self._host = URL_BASE_REMOTE.format(sprinkler_id)
| mit | aa449aafff3c5df1951bdd5e4343480c | 32.942029 | 88 | 0.615073 | 4.08726 | false | false | false | false |
tankywoo/simiki | simiki/log.py | 1 | 2428 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from logging import getLogger, Formatter, StreamHandler
from simiki import utils
from simiki.compat import is_linux, is_osx
class ANSIFormatter(Formatter):
"""Use ANSI escape sequences to colored log"""
def format(self, record):
try:
msg = super(ANSIFormatter, self).format(record)
except:
# 2017-05-15: not support py26
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
lvl2color = {
"DEBUG": "blue",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bgred"
}
rln = record.levelname
if rln in lvl2color:
return "[{0}]: {1}".format(
utils.color_msg(lvl2color[rln], rln),
msg
)
else:
return msg
class NonANSIFormatter(Formatter):
"""Non ANSI color format"""
def format(self, record):
try:
msg = super(NonANSIFormatter, self).format(record)
except:
# 2017-05-15: not support py26
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
rln = record.levelname
return "[{0}]: {1}".format(rln, msg)
def _is_platform_allowed_ansi():
"""ansi be used on linux/macos"""
if is_linux or is_osx:
return True
else:
return False
def logging_init(level=None, logger=getLogger(),
handler=StreamHandler(), use_color=True):
if use_color and _is_platform_allowed_ansi():
fmt = ANSIFormatter()
else:
fmt = NonANSIFormatter()
handler.setFormatter(fmt)
logger.addHandler(handler)
if level:
logger.setLevel(level)
if __name__ == "__main__":
logging_init(level=logging.DEBUG)
root_logger = logging.getLogger()
root_logger.debug("debug")
root_logger.info("info")
root_logger.warning("warning")
root_logger.error("error")
root_logger.critical("critical")
| mit | 3d0a4ac7b87bfbcadf190c46690b928a | 26.280899 | 76 | 0.581549 | 3.903537 | false | false | false | false |
alphagov/notifications-python-client | notifications_python_client/base.py | 1 | 4017 | import json
import logging
import time
import urllib.parse
import requests
from notifications_python_client import __version__
from notifications_python_client.authentication import create_jwt_token
from notifications_python_client.errors import HTTPError, InvalidResponse
logger = logging.getLogger(__name__)
class BaseAPIClient(object):
def __init__(
self,
api_key,
base_url='https://api.notifications.service.gov.uk',
timeout=30
):
"""
Initialise the client
Error if either of base_url or secret missing
:param base_url - base URL of GOV.UK Notify API:
:param secret - application secret - used to sign the request:
:param timeout - request timeout on the client
:return:
"""
service_id = api_key[-73:-37]
api_key = api_key[-36:]
assert base_url, "Missing base url"
assert service_id, "Missing service ID"
assert api_key, "Missing API key"
self.base_url = base_url
self.service_id = service_id
self.api_key = api_key
self.timeout = timeout
def put(self, url, data):
return self.request("PUT", url, data=data)
def get(self, url, params=None):
return self.request("GET", url, params=params)
def post(self, url, data):
return self.request("POST", url, data=data)
def delete(self, url, data=None):
return self.request("DELETE", url, data=data)
def generate_headers(self, api_token):
return {
"Content-type": "application/json",
"Authorization": "Bearer {}".format(api_token),
"User-agent": "NOTIFY-API-PYTHON-CLIENT/{}".format(__version__)
}
def request(self, method, url, data=None, params=None):
logger.debug("API request {} {}".format(method, url))
url, kwargs = self._create_request_objects(url, data, params)
response = self._perform_request(method, url, kwargs)
return self._process_json_response(response)
def _create_request_objects(self, url, data, params):
api_token = create_jwt_token(
self.api_key,
self.service_id
)
kwargs = {
"headers": self.generate_headers(api_token),
"timeout": self.timeout
}
if data is not None:
kwargs.update(data=self._serialize_data(data))
if params is not None:
kwargs.update(params=params)
url = urllib.parse.urljoin(str(self.base_url), str(url))
return url, kwargs
def _serialize_data(self, data):
return json.dumps(data, default=self._extended_json_encoder)
def _extended_json_encoder(self, obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def _perform_request(self, method, url, kwargs):
start_time = time.monotonic()
try:
response = requests.request(
method,
url,
**kwargs
)
response.raise_for_status()
return response
except requests.RequestException as e:
api_error = HTTPError.create(e)
logger.error(
"API {} request on {} failed with {} '{}'".format(
method,
url,
api_error.status_code,
api_error.message
)
)
raise api_error
finally:
elapsed_time = time.monotonic() - start_time
logger.debug("API {} request on {} finished in {}".format(method, url, elapsed_time))
def _process_json_response(self, response):
try:
if response.status_code == 204:
return
return response.json()
except ValueError:
raise InvalidResponse(
response,
message="No JSON response object could be decoded"
)
| mit | c6e8717a41cccadb168d0d614e154ba4 | 29.431818 | 97 | 0.5646 | 4.310086 | false | false | false | false |
facebook/prophet | python/prophet/make_holidays.py | 1 | 2146 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import pandas as pd
import prophet.hdays as hdays_part2
import holidays as hdays_part1
def get_holiday_names(country):
"""Return all possible holiday names of given country
Parameters
----------
country: country name
Returns
-------
A set of all possible holiday names of given country
"""
years = np.arange(1995, 2045)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
holiday_names = getattr(hdays_part2, country)(years=years).values()
except AttributeError:
try:
holiday_names = getattr(hdays_part1, country)(years=years).values()
except AttributeError as e:
raise AttributeError(f"Holidays in {country} are not currently supported!") from e
return set(holiday_names)
def make_holidays_df(year_list, country, province=None, state=None):
"""Make dataframe of holidays for given years and countries
Parameters
----------
year_list: a list of years
country: country name
Returns
-------
Dataframe with 'ds' and 'holiday', which can directly feed
to 'holidays' params in Prophet
"""
try:
holidays = getattr(hdays_part2, country)(years=year_list, expand=False)
except AttributeError:
try:
holidays = getattr(hdays_part1, country)(prov=province, state=state, years=year_list, expand=False)
except AttributeError as e:
raise AttributeError(f"Holidays in {country} are not currently supported!") from e
holidays_df = pd.DataFrame([(date, holidays.get_list(date)) for date in holidays], columns=['ds', 'holiday'])
holidays_df = holidays_df.explode('holiday')
holidays_df.reset_index(inplace=True, drop=True)
holidays_df['ds'] = pd.to_datetime(holidays_df['ds'])
return (holidays_df)
| mit | 13ea2291f772d9590547aac8aa2a4138 | 30.558824 | 113 | 0.666822 | 3.887681 | false | false | false | false |
facebook/prophet | python/prophet/serialize.py | 1 | 6858 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from copy import deepcopy
from io import StringIO
import json
from pathlib import Path
import numpy as np
import pandas as pd
from prophet.forecaster import Prophet
about = {}
here = Path(__file__).parent.resolve()
with open(here / "__version__.py", "r") as f:
exec(f.read(), about)
SIMPLE_ATTRIBUTES = [
'growth', 'n_changepoints', 'specified_changepoints', 'changepoint_range',
'yearly_seasonality', 'weekly_seasonality', 'daily_seasonality',
'seasonality_mode', 'seasonality_prior_scale', 'changepoint_prior_scale',
'holidays_prior_scale', 'mcmc_samples', 'interval_width', 'uncertainty_samples',
'y_scale', 'logistic_floor', 'country_holidays', 'component_modes'
]
PD_SERIES = ['changepoints', 'history_dates', 'train_holiday_names']
PD_TIMESTAMP = ['start']
PD_TIMEDELTA = ['t_scale']
PD_DATAFRAME = ['holidays', 'history', 'train_component_cols']
NP_ARRAY = ['changepoints_t']
ORDEREDDICT = ['seasonalities', 'extra_regressors']
def model_to_dict(model):
"""Convert a Prophet model to a dictionary suitable for JSON serialization.
Model must be fitted. Skips Stan objects that are not needed for predict.
Can be reversed with model_from_dict.
Parameters
----------
model: Prophet model object.
Returns
-------
dict that can be used to serialize a Prophet model as JSON or loaded back
into a Prophet model.
"""
if model.history is None:
raise ValueError(
"This can only be used to serialize models that have already been fit."
)
model_dict = {
attribute: getattr(model, attribute) for attribute in SIMPLE_ATTRIBUTES
}
# Handle attributes of non-core types
for attribute in PD_SERIES:
if getattr(model, attribute) is None:
model_dict[attribute] = None
else:
model_dict[attribute] = getattr(model, attribute).to_json(
orient='split', date_format='iso'
)
for attribute in PD_TIMESTAMP:
model_dict[attribute] = getattr(model, attribute).timestamp()
for attribute in PD_TIMEDELTA:
model_dict[attribute] = getattr(model, attribute).total_seconds()
for attribute in PD_DATAFRAME:
if getattr(model, attribute) is None:
model_dict[attribute] = None
else:
model_dict[attribute] = getattr(model, attribute).to_json(orient='table', index=False)
for attribute in NP_ARRAY:
model_dict[attribute] = getattr(model, attribute).tolist()
for attribute in ORDEREDDICT:
model_dict[attribute] = [
list(getattr(model, attribute).keys()),
getattr(model, attribute),
]
# Other attributes with special handling
# fit_kwargs -> Transform any numpy types before serializing.
# They do not need to be transformed back on deserializing.
fit_kwargs = deepcopy(model.fit_kwargs)
if 'init' in fit_kwargs:
for k, v in fit_kwargs['init'].items():
if isinstance(v, np.ndarray):
fit_kwargs['init'][k] = v.tolist()
elif isinstance(v, np.floating):
fit_kwargs['init'][k] = float(v)
model_dict['fit_kwargs'] = fit_kwargs
# Params (Dict[str, np.ndarray])
model_dict['params'] = {k: v.tolist() for k, v in model.params.items()}
# Attributes that are skipped: stan_fit, stan_backend
model_dict['__prophet_version'] = about["__version__"]
return model_dict
def model_to_json(model):
"""Serialize a Prophet model to json string.
Model must be fitted. Skips Stan objects that are not needed for predict.
Can be deserialized with model_from_json.
Parameters
----------
model: Prophet model object.
Returns
-------
json string that can be deserialized into a Prophet model.
"""
model_json = model_to_dict(model)
return json.dumps(model_json)
def model_from_dict(model_dict):
"""Recreate a Prophet model from a dictionary.
Recreates models that were converted with model_to_dict.
Parameters
----------
model_dict: Dictionary containing model, created with model_to_dict.
Returns
-------
Prophet model.
"""
model = Prophet() # We will overwrite all attributes set in init anyway
# Simple types
for attribute in SIMPLE_ATTRIBUTES:
setattr(model, attribute, model_dict[attribute])
for attribute in PD_SERIES:
if model_dict[attribute] is None:
setattr(model, attribute, None)
else:
s = pd.read_json(StringIO(model_dict[attribute]), typ='series', orient='split')
if s.name == 'ds':
if len(s) == 0:
s = pd.to_datetime(s)
s = s.dt.tz_localize(None)
setattr(model, attribute, s)
for attribute in PD_TIMESTAMP:
setattr(model, attribute, pd.Timestamp.utcfromtimestamp(model_dict[attribute]).tz_localize(None))
for attribute in PD_TIMEDELTA:
setattr(model, attribute, pd.Timedelta(seconds=model_dict[attribute]))
for attribute in PD_DATAFRAME:
if model_dict[attribute] is None:
setattr(model, attribute, None)
else:
df = pd.read_json(StringIO(model_dict[attribute]), typ='frame', orient='table', convert_dates=['ds'])
if attribute == 'train_component_cols':
# Special handling because of named index column
df.columns.name = 'component'
df.index.name = 'col'
setattr(model, attribute, df)
for attribute in NP_ARRAY:
setattr(model, attribute, np.array(model_dict[attribute]))
for attribute in ORDEREDDICT:
key_list, unordered_dict = model_dict[attribute]
od = OrderedDict()
for key in key_list:
od[key] = unordered_dict[key]
setattr(model, attribute, od)
# Other attributes with special handling
# fit_kwargs
model.fit_kwargs = model_dict['fit_kwargs']
# Params (Dict[str, np.ndarray])
model.params = {k: np.array(v) for k, v in model_dict['params'].items()}
# Skipped attributes
model.stan_backend = None
model.stan_fit = None
return model
def model_from_json(model_json):
"""Deserialize a Prophet model from json string.
Deserializes models that were serialized with model_to_json.
Parameters
----------
model_json: Serialized model string
Returns
-------
Prophet model.
"""
model_dict = json.loads(model_json)
return model_from_dict(model_dict)
| mit | ecd7584146c964e5ee1967555c79d1d5 | 32.291262 | 113 | 0.638233 | 3.887755 | false | false | false | false |
vim-awesome/vim-awesome | db/migrations/dedupe_plugin_repo_owner.py | 2 | 2415 | """Remove case-sensitive duplicates in the plugins table"""
import rethinkdb as r
import db.plugins
import db.util
r_conn = db.util.r_conn
LOG_FILE = 'deleted_slugs.log'
def dupe_log_line(canonical, dupes):
return '%s: %s\n' % (canonical, ', '.join(dupes))
def merge_plugins(plugins):
def reducer(new, old):
# Use the plugin with the shortest slug as the new plugin
if len(old['slug']) < len(new['slug']):
new, old = old, new
new = db.plugins.update_plugin(old, new)
# Preserve categories
if new['category'] == 'uncategorized':
new['category'] = old['category']
# Merge tags
new['tags'] = list(set(new['tags'] + old['tags']))
# Collect the total number of dotfiles referencing this plugin
new['github_bundles'] += old['github_bundles']
return new
return reduce(reducer, plugins)
if __name__ == '__main__':
print('Removing duplicate rows in plugins. Logging to: %s' % LOG_FILE)
updated = 0
deleted = 0
query = r.table('plugins')
# Group by the normalized GitHub path
query = query.group([
r.row['github_owner'].downcase(),
r.row['github_repo_name'].downcase()])
grouped_plugins = query.run(r_conn())
slug_map = {}
for owner_repo, plugins in grouped_plugins.iteritems():
print('\nPlugin with GitHub path %s occurs %s times' % (
owner_repo,
len(plugins)))
canonical = merge_plugins(plugins)
print("Using %s as canonical" % canonical['slug'])
# db.plugins.insert normalizes the ower/repo to lower case
db.plugins.insert(canonical, conflict='replace')
updated += 1
dupes = [dupe for dupe in plugins if dupe['slug'] != canonical['slug']]
if dupes:
dupe_slugs = [dupe['slug'] for dupe in dupes]
# Store deleted slugs for logging
slug_map[canonical['slug']] = dupe_slugs
print('Deleting duplicates rows: %s' % ', '.join(dupe_slugs))
r.table('plugins').get_all(r.args(dupe_slugs)).delete().run(r_conn())
deleted += len(dupes)
with open(LOG_FILE, 'w') as log:
print('Writing deleted slug names to %s' % LOG_FILE)
log.writelines(dupe_log_line(c, d) for c, d in slug_map.iteritems())
print("Updated %d rows and deleted %d" % (updated, deleted))
| mit | 099677508b2f78d342e5ad7d9fcfff37 | 27.411765 | 81 | 0.592547 | 3.631579 | false | false | false | false |
vim-awesome/vim-awesome | tools/scrape/github.py | 2 | 27751 | import base64
import collections
import datetime
import logging
import re
import sys
import time
from urllib import urlencode
import urlparse
import configparser
import dateutil.parser
import requests
import rethinkdb as r
from termcolor import colored
from db.github_repos import PluginGithubRepos, DotfilesGithubRepos
import db.plugins
import db.util
import util
r_conn = db.util.r_conn
try:
import secrets
_GITHUB_API_TOKEN = getattr(secrets, 'GITHUB_PERSONAL_ACCESS_TOKEN', None)
except ImportError:
_GITHUB_API_TOKEN = None
_NO_GITHUB_API_TOKEN_MESSAGE = """
*******************************************************************************
* Warning: GitHub API token not found in secrets.py. Scraping will be severely
* rate-limited. See secrets.py.example to obtain a GitHub personal access token
*******************************************************************************
"""
if not _GITHUB_API_TOKEN:
logging.warn(colored(_NO_GITHUB_API_TOKEN_MESSAGE, 'red'))
ReposByManager = collections.namedtuple('ReposByManager', ['vundle', 'neobundle', 'vimplug'])
###############################################################################
# General utilities for interacting with the GitHub API.
class ApiRateLimitExceededError(Exception):
def __init__(self, headers):
self.headers = headers
def __str__(self):
return repr(self.headers)
def get_api_page(url_or_path, query_params=None, page=1, per_page=100):
"""Get a page from GitHub's v3 API.
Arguments:
url_or_path: The API method to call or the full URL.
query_params: A dict of additional query parameters
page: Page number
per_page: How many results to return per page. Max is 100.
Returns:
A tuple: (Response object, JSON-decoded dict of the response)
Raises: ApiRateLimitExceededError
"""
split_url = urlparse.urlsplit(url_or_path)
query = {
'page': page,
'per_page': per_page,
}
if _GITHUB_API_TOKEN:
query['access_token'] = _GITHUB_API_TOKEN
query.update(dict(urlparse.parse_qsl(split_url.query)))
query.update(query_params or {})
url = urlparse.SplitResult(scheme='https', netloc='api.github.com',
path=split_url.path, query=urlencode(query),
fragment=split_url.fragment).geturl()
res = requests.get(url)
if res.status_code == 403 and res.headers['X-RateLimit-Remaining'] == '0':
raise ApiRateLimitExceededError(res.headers)
return res, res.json()
def get_requests_left():
"""Retrieve how many API requests are remaining"""
_, data = get_api_page('rate_limit')
return data['rate']['remaining']
def maybe_wait_until_api_limit_resets(response_headers):
"""If we're about to exceed our API limit, sleeps until our API limit is
reset.
"""
if response_headers['X-RateLimit-Remaining'] == '0':
reset_timestamp = response_headers['X-RateLimit-Reset']
reset_date = datetime.datetime.fromtimestamp(int(reset_timestamp))
now = datetime.datetime.now()
seconds_to_wait = (reset_date - now).seconds + 1
print "Sleeping %s seconds for API limit to reset." % seconds_to_wait
time.sleep(seconds_to_wait)
###############################################################################
# Routines for scraping Vim plugin repos from GitHub.
def get_plugin_data(owner, repo_name, repo_data, readme_data=None):
"""Populate info relevant to a plugin from a GitHub repo.
This should not be used to fetch info from the vim-scripts user's repos.
Arguments:
owner: The repo's owner's login, eg. "gmarik"
repo_name: The repo name, eg. "vundle"
repo_data: GitHub API /repos response for this repo
readme_data: (optional) GitHub API /readme response for this repo
scrape_fork: Whether to bother scraping this repo if it's a fork
Returns:
A dict of properties that can be inserted as a row in the plugins table
"""
assert owner != 'vim-scripts'
if not readme_data:
_, readme_data = get_api_page('repos/%s/%s/readme' % (
owner, repo_name))
readme_base64_decoded = base64.b64decode(readme_data.get('content', ''))
readme = unicode(readme_base64_decoded, 'utf-8', errors='ignore')
readme_filename = readme_data.get('name', '')
# TODO(david): We used to extract the vim.org ID from the homepage if it
# were a vim.org URL, but that became too unreliable as many different
# repos would all claim to have the same vim.org homepage, when
# sometimes those repos were of different plugins. But it's still
# useful information in heuristic matching, just can't be used as
# a key.
homepage = repo_data['homepage']
repo_created_date = dateutil.parser.parse(repo_data['created_at'])
# Fetch commits so we can get the update/create dates.
_, commits_data = get_api_page('repos/%s/%s/commits' % (owner, repo_name),
per_page=100)
if commits_data and isinstance(commits_data, list) and len(commits_data):
# Unfortunately repo_data['updated_at'] and repo_data['pushed_at'] are
# wildy misrepresentative of the last time someone made a commit to the
# repo.
updated_date_text = commits_data[0]['commit']['author']['date']
updated_date = dateutil.parser.parse(updated_date_text)
# To get the creation date, we use the heuristic of min(repo creation
# date, 100th latest commit date). We do this because repo creation
# date can be later than the date of the first commit, which is
# particularly pervasive for vim-scripts repos. Fortunately, most
# vim-scripts repos don't have more than 100 commits, and also we get
# creation_date for vim-scripts repos when scraping vim.org.
early_commit_date_text = commits_data[-1]['commit']['author']['date']
early_commit_date = dateutil.parser.parse(early_commit_date_text)
created_date = min(repo_created_date, early_commit_date)
else:
updated_date = dateutil.parser.parse(repo_data['updated_at'])
created_date = repo_created_date
# Fetch owner info to get author name.
owner_login = repo_data['owner']['login']
_, owner_data = get_api_page('users/%s' % owner_login)
author = owner_data.get('name') or owner_data.get('login')
return {
'created_at': util.to_timestamp(created_date),
'updated_at': util.to_timestamp(updated_date),
'vimorg_id': None,
'github_repo_id': str(repo_data['id']),
'github_owner': owner,
'github_repo_name': repo_name,
'github_author': author,
'github_stars': repo_data['watchers'],
'github_homepage': homepage,
'github_short_desc': repo_data['description'],
'github_readme': readme,
'github_readme_filename': readme_filename,
}
def _add_submission_data(plugin, submission):
"""Updates a plugin with info from a user submission."""
if (plugin.get('category', 'uncategorized') == 'uncategorized' and
submission.get('category', 'uncategorized') != 'uncategorized'):
plugin['category'] = submission['category']
if not plugin.get('tags') and submission.get('tags'):
db.plugins.update_tags(plugin, submission['tags'])
# TODO(david): Simplify/break-up this function.
def scrape_plugin_repos(num):
"""Scrapes the num plugin repos that have been least recently scraped."""
MIN_FORK_USERS = 3
query = r.table('plugin_github_repos').filter({'is_blacklisted': False})
# We don't want to scrape forks that not many people use.
query = query.filter(r.not_((r.row['is_fork'] == True) & ( # NOQA
r.row['plugin_manager_users'] < MIN_FORK_USERS)),
default=True)
# Only scrape repos that don't redirect to other ones (probably renamed).
query = query.filter(r.row['redirects_to'] == '')
# We scrape vim-scripts separately using the batch /users/:user/repos call
query = query.filter(r.row['owner'] != 'vim-scripts')
query = query.order_by('last_scraped_at').limit(num)
repos = query.run(r_conn())
# TODO(david): Print stats at the end: # successfully scraped, # not found,
# # redirects, etc.
for repo in repos:
repo_name = repo['repo_name']
repo_owner = repo['owner']
# Print w/o newline.
print " scraping %s/%s ..." % (repo_owner, repo_name),
sys.stdout.flush()
# Attempt to fetch data about the plugin.
res, repo_data = get_api_page('repos/%s/%s' % (repo_owner, repo_name))
# If the API call 404s, then see if the repo has been renamed by
# checking for a redirect in a non-API call.
if res.status_code == 404:
res = requests.head('https://github.com/%s/%s' % (
repo_owner, repo_name))
if res.status_code == 301:
location = res.headers.get('location')
valid_repo_url = re.compile("^https://github.com/[^/]+/[^/]+")
if not valid_repo_url.match(location):
print 'redirects to invalid GitHub repo URL: %s' % location
continue
_, redirect_owner, redirect_repo_name = location.rsplit('/', 2)
repo['redirects_to'] = '%s/%s' % (redirect_owner,
redirect_repo_name)
# Make sure we insert the new location of the repo, which will
# be scraped in a future run.
PluginGithubRepos.upsert_with_owner_repo({
'owner': redirect_owner,
'repo_name': redirect_repo_name,
# TODO(david): Should append to a list
'redirects_from': ('%s/%s' % (repo_owner, repo_name)),
})
# And now change the GitHub repo location of the plugin that
# the old repo location pointed to
query = r.table('plugins').get_all(
[repo_owner, repo_name], index='github_owner_repo')
db_plugin = db.util.get_first(query)
if db_plugin:
db_plugin['github_owner'] = redirect_owner
db_plugin['github_repo_name'] = redirect_repo_name
db.plugins.insert(db_plugin, conflict='replace')
print 'redirects to %s/%s.' % (redirect_owner,
redirect_repo_name)
else:
# TODO(david): Insert some metadata in the github repo that
# this is not found
print 'not found.'
plugin_data = None
else:
plugin_data = get_plugin_data(repo_owner, repo_name, repo_data)
repo['repo_data'] = repo_data
repo['repo_id'] = str(repo_data.get('id', repo['repo_id']))
PluginGithubRepos.log_scrape(repo)
# If this is a fork, note it and ensure we know about original repo.
if repo_data.get('fork'):
repo['is_fork'] = True
PluginGithubRepos.upsert_with_owner_repo({
'owner': repo_data['parent']['owner']['login'],
'repo_name': repo_data['parent']['name'],
})
PluginGithubRepos.upsert_with_owner_repo(repo)
# For most cases we don't care about forked repos, unless the forked
# repo is used by others.
if repo_data.get('fork') and (
repo.get('plugin_manager_users', 0) < MIN_FORK_USERS):
print 'skipping fork of %s' % repo_data['parent']['full_name']
continue
if plugin_data:
# Insert the number of plugin manager users across all names/owners
# of this repo.
# TODO(david): Try to also use repo_id for this (but not all repos
# have it), or look at multiple levels of redirects.
plugin_manager_users = repo.get('plugin_manager_users', 0)
other_repos = r.table('plugin_github_repos').get_all(
'%s/%s' % (repo_owner, repo_name),
index='redirects_to').run(r_conn())
for other_repo in other_repos:
if other_repo['id'] == repo['id']:
continue
plugin_manager_users += other_repo.get(
'plugin_manager_users', 0)
plugin_data['github_bundles'] = plugin_manager_users
if repo.get('from_submission'):
_add_submission_data(plugin_data, repo['from_submission'])
db.plugins.add_scraped_data(plugin_data, repo,
submission=repo.get('from_submission'))
print 'done.'
def scrape_vim_scripts_repos(num):
"""Scrape at least num repos from the vim-scripts GitHub user."""
_, user_data = get_api_page('users/vim-scripts')
# Calculate how many pages of repositories there are.
num_repos = user_data['public_repos']
num_pages = (num_repos + 99) / 100 # ceil(num_repos / 100.0)
num_inserted = 0
num_scraped = 0
for page in range(1, num_pages + 1):
if num_scraped >= num:
break
_, repos_data = get_api_page('users/vim-scripts/repos', page=page)
for repo_data in repos_data:
# Scrape plugin-relevant data. We don't need much info from
# vim-scripts because it's a mirror of vim.org.
# vimorg_id is required for associating with the corresponding
# vim.org-scraped plugin.
vimorg_id = util.get_vimorg_id_from_url(repo_data['homepage'])
assert vimorg_id
repo_name = repo_data['name']
repo = PluginGithubRepos.get_with_owner_repo('vim-scripts',
repo_name)
num_bundles = repo['plugin_manager_users'] if repo else 0
db.plugins.add_scraped_data({
'vimorg_id': vimorg_id,
'github_vim_scripts_repo_name': repo_name,
'github_vim_scripts_stars': repo_data['watchers'],
'github_vim_scripts_bundles': num_bundles,
})
# Also add to our index of known GitHub plugins.
inserted = PluginGithubRepos.upsert_with_owner_repo({
'owner': 'vim-scripts',
'repo_name': repo_name,
'repo_data': repo_data,
})
num_inserted += int(inserted)
num_scraped += 1
print ' scraped %s repos' % num_scraped
print "\nScraped %s vim-scripts GitHub repos; inserted %s new ones." % (
num_scraped, num_inserted)
###############################################################################
# Code to scrape GitHub dotfiles repos to extract plugins used.
# TODO(david): Write up a blurb on how all of this works.
# The following are names of repos and locations where we search for
# Vundle/Pathogen plugin references. They were found by manually going through
# search results of
# github.com/search?q=scrooloose%2Fsyntastic&ref=searchresults&type=Code
# TODO(david): It would be good to add "vim", "settings", and "config", but
# there are too many false positives that need to be filtered out.
_DOTFILE_REPO_NAMES = ['vimrc', 'vimfile', 'vim-file', 'vimconf',
'vim-conf', 'dotvim', 'vim-setting', 'myvim', 'dotfile',
'config-files', 'plug']
_VIMRC_FILENAMES = ['vimrc', 'bundle', 'vundle.vim', 'vundles.vim',
'vim.config', 'plugins.vim', 'plug.vim']
_VIM_DIRECTORIES = ['vim', 'config', 'home']
# Regexes for extracting plugin references from dotfile repos. See
# github_test.py for examples of what they match and don't.
# Matches eg. "Bundle 'gmarik/vundle'" or "Bundle 'taglist'"
# [^\S\n] means whitespace except newline: stackoverflow.com/a/3469155/392426
_PLUGIN_REGEX_TEMPLATE = r'^[^\S\n]*%s[^\S\n]*[\'"]([^\'"\n\r]+)[\'"]'
_VUNDLE_PLUGIN_REGEX = re.compile(_PLUGIN_REGEX_TEMPLATE %
'(?:Bundle|Plugin)', re.MULTILINE)
_NEOBUNDLE_PLUGIN_REGEX = re.compile(_PLUGIN_REGEX_TEMPLATE %
'(?:NeoBundle|NeoBundleFetch|NeoBundleLazy)', re.MULTILINE)
_VIMPLUG_PLUGIN_REGEX = re.compile(_PLUGIN_REGEX_TEMPLATE %
'(?:Plug)', re.MULTILINE)
# Extracts owner and repo name from a bundle spec -- a git repo URI, implicity
# github.com if host not given.
# eg. ('gmarik', 'vundle') or (None, 'taglist')
_BUNDLE_OWNER_REPO_REGEX = re.compile(
r'(?:([^:\'"/]+)/)?([^\'"\n\r/]+?)(?:\.git|/)?$')
# Matches a .gitmodules section heading that's likely of a Pathogen bundle.
_SUBMODULE_IS_BUNDLE_REGEX = re.compile(r'submodule.+(bundles?)|(vim.plugins).+',
re.IGNORECASE)
def _extract_bundles_with_regex(file_contents, bundle_plugin_regex):
"""Extracts plugin repos from contents of a file using a given regex.
Arguments:
file_contents: A string of the contents of the file to search through.
bundle_plugin_regex: A regex to use to match all lines referencing
plugin repos.
Returns:
A list of tuples (owner, repo_name) referencing GitHub repos.
"""
bundles = bundle_plugin_regex.findall(file_contents)
if not bundles:
return []
plugin_repos = []
for bundle in bundles:
match = _BUNDLE_OWNER_REPO_REGEX.search(bundle)
if match and len(match.groups()) == 2:
owner, repo = match.groups()
owner = 'vim-scripts' if owner is None else owner
plugin_repos.append((owner, repo))
else:
logging.error(colored(
'Failed to extract owner/repo from "%s"' % bundle, 'red'))
return plugin_repos
def _extract_bundle_repos_from_file(file_contents):
"""Extracts Vundle Neobundle and Plug plugins from contents of a vimrc-like
file.
Arguments:
file_contents: A string of the contents of the file to search through.
Returns:
A named tuple with a key for each plugin manager. Each value is a list
of tuples of the form (owner, repo_name) referencing a GitHub repo.
"""
vundle_repos = _extract_bundles_with_regex(file_contents,
_VUNDLE_PLUGIN_REGEX)
neobundle_repos = _extract_bundles_with_regex(file_contents,
_NEOBUNDLE_PLUGIN_REGEX)
vimplug_repos = _extract_bundles_with_regex(file_contents,
_VIMPLUG_PLUGIN_REGEX)
return ReposByManager(vundle_repos, neobundle_repos, vimplug_repos)
def _extract_bundle_repos_from_dir(dir_data, depth=0):
"""Extracts vim plugin bundles from a GitHub dotfiles directory.
Will recursively search through directories likely to contain vim config
files (lots of people seem to like putting their vim config in a "vim"
subdirectory).
Arguments:
dir_data: API response from GitHub of a directory or repo's contents.
depth: Current recursion depth (0 = top-level repo).
Returns:
A tuple (Vundle repos, NeoBundle repos). Each element is a list of
tuples of the form (owner, repo_name) referencing a GitHub repo.
"""
# First, look for top-level files that are likely to contain references to
# vim plugins.
files = [f for f in dir_data if f['type'] == 'file']
for file_data in files:
filename = file_data['name'].lower()
if 'gvimrc' in filename:
continue
if not any((f in filename) for f in _VIMRC_FILENAMES):
continue
# Ok, there could potentially be references to vim plugins here.
_, file_contents = get_api_page(file_data['url'])
contents_decoded = base64.b64decode(file_contents.get('content', ''))
repos_by_manger = _extract_bundle_repos_from_file(contents_decoded)
if any(repos_by_manger):
return repos_by_manger
if depth >= 3:
return ReposByManager([], [], [])
# No plugins were found, so look in subdirectories that could potentially
# have vim config files.
dirs = [f for f in dir_data if f['type'] == 'dir']
for dir_data in dirs:
filename = dir_data['name'].lower()
if not any((f in filename) for f in _VIM_DIRECTORIES):
continue
# Ok, there could potentially be vim config files in here.
_, subdir_data = get_api_page(dir_data['url'])
repos_by_manger = _extract_bundle_repos_from_dir(subdir_data, depth + 1)
if any(repos_by_manger):
return repos_by_manger
return ReposByManager([], [], [])
def _extract_pathogen_repos(repo_contents):
"""Extracts Pathogen plugin repos from a GitHub dotfiles repository.
This currently just extracts plugins if they are checked in as submodules,
because it's easy to extract repo URLs from the .gitmodules file but
difficult to determine the repo URL of a plugin that's just cloned in.
Arguments:
repo_contents: API response from GitHub of a directory or repo's
contents.
Returns:
A list of tuples (owner, repo_name) referencing GitHub repos.
"""
gitmodules = filter(lambda f: f['type'] == 'file' and
f['name'].lower() == '.gitmodules', repo_contents)
if not gitmodules:
return []
_, file_contents = get_api_page(gitmodules[0]['url'])
contents_decoded = base64.b64decode(file_contents.get('content', ''))
contents_unicode = unicode(contents_decoded, 'utf-8', errors='ignore')
parser = configparser.ConfigParser(interpolation=None)
try:
parser.read_string(unicode(contents_unicode))
except configparser.Error:
logging.exception(colored(
'Could not parse the .gitmodules file of %s.' %
file_contents['url'], 'red'))
return []
plugin_repos = []
for section, config in parser.items():
if not _SUBMODULE_IS_BUNDLE_REGEX.search(section):
continue
if not config.get('url'):
continue
# The parser sometimes over-parses the value
url = config['url'].split('\n')[0]
match = _BUNDLE_OWNER_REPO_REGEX.search(url)
if match and len(match.groups()) == 2 and match.group(1):
owner, repo = match.groups()
plugin_repos.append((owner, repo))
else:
logging.error(colored(
'Failed to extract owner/repo from "%s"' % url, 'red'))
return plugin_repos
def _get_plugin_repos_from_dotfiles(repo_data, search_keyword):
"""Search for references to vim plugin repos from a dotfiles repository,
and insert them into DB.
Arguments:
repo_data: API response from GitHub of a repository.
search_keyword: The keyword used that found this repo.
"""
owner_repo = repo_data['full_name']
# Print w/o newline.
print " scraping %s ..." % owner_repo,
sys.stdout.flush()
res, contents_data = get_api_page('repos/%s/contents' % owner_repo)
if res.status_code == 404 or not isinstance(contents_data, list):
print "contents not found"
return
repos_by_manager = _extract_bundle_repos_from_dir(contents_data)
vundle_repos = repos_by_manager.vundle
neobundle_repos = repos_by_manager.neobundle
vimplug_repos = repos_by_manager.vimplug
pathogen_repos = _extract_pathogen_repos(contents_data)
owner, repo_name = owner_repo.split('/')
db_repo = DotfilesGithubRepos.get_with_owner_repo(owner, repo_name)
pushed_date = dateutil.parser.parse(repo_data['pushed_at'])
def stringify_repo(owner_repo_tuple):
return '/'.join(owner_repo_tuple)
repo = dict(db_repo or {}, **{
'owner': owner,
'pushed_at': util.to_timestamp(pushed_date),
'repo_name': repo_name,
'search_keyword': search_keyword,
'vundle_repos': map(stringify_repo, vundle_repos),
'neobundle_repos': map(stringify_repo, neobundle_repos),
'vimplug_repos': map(stringify_repo, vimplug_repos),
'pathogen_repos': map(stringify_repo, pathogen_repos),
})
DotfilesGithubRepos.log_scrape(repo)
DotfilesGithubRepos.upsert_with_owner_repo(repo)
print 'found %s Vundles, %s NeoBundles, %s VimPlugs, %s Pathogens' % (
len(vundle_repos), len(neobundle_repos),
len(vimplug_repos), len(pathogen_repos))
return {
'vundle_repos_count': len(vundle_repos),
'neobundle_repos_count': len(neobundle_repos),
'vimplug_repos_count': len(vimplug_repos),
'pathogen_repos_count': len(pathogen_repos),
}
def scrape_dotfiles_repos(num):
"""Scrape at most num dotfiles repos from GitHub for references to Vim
plugin repos.
We perform a search on GitHub repositories that are likely to contain
Vundle and Pathogen bundles instead of a code search matching
Vundle/Pathogen commands (which has higher precision and recall), because
GitHub's API requires code search to be limited to
a user/repo/organization. :(
"""
# Earliest allowable updated date to start scraping from (so we won't be
# scraping repos that were last pushed before this date).
EARLIEST_PUSHED_DATE = datetime.datetime(2013, 1, 1)
repos_scraped = 0
scraped_counter = collections.Counter()
for repo_name in _DOTFILE_REPO_NAMES:
latest_repo = DotfilesGithubRepos.get_latest_with_keyword(repo_name)
if latest_repo and latest_repo.get('pushed_at'):
last_pushed_date = max(datetime.datetime.utcfromtimestamp(
latest_repo['pushed_at']), EARLIEST_PUSHED_DATE)
else:
last_pushed_date = EARLIEST_PUSHED_DATE
# We're going to scrape all repos updated after the latest updated repo
# in our DB, starting with the least recently updated. This maintains
# the invariant that we have scraped all repos pushed before the latest
# push date (and after EARLIEST_PUSHED_DATE).
while True:
start_date_iso = last_pushed_date.isoformat()
search_params = {
'q': '%s in:name pushed:>%s' % (repo_name, start_date_iso),
'sort': 'updated',
'order': 'asc',
}
per_page = 100
response, search_data = get_api_page('search/repositories',
query_params=search_params, page=1, per_page=per_page)
items = search_data.get('items', [])
for item in items:
try:
stats = _get_plugin_repos_from_dotfiles(item, repo_name)
except ApiRateLimitExceededError:
logging.exception('API rate limit exceeded.')
return repos_scraped, scraped_counter
except Exception:
logging.exception('Error scraping dotfiles repo %s' %
item['full_name'])
stats = {}
scraped_counter.update(stats)
# If we've scraped the number repos desired, we can quit.
repos_scraped += 1
if repos_scraped >= num:
return repos_scraped, scraped_counter
# If we're about to exceed the rate limit (20 requests / min),
# sleep until the limit resets.
maybe_wait_until_api_limit_resets(response.headers)
# If we've scraped all repos with this name, move on to the next
# repo name.
if len(items) < per_page:
break
else:
last_pushed_date = dateutil.parser.parse(
items[-1]['pushed_at'])
return repos_scraped, scraped_counter
| mit | 52068f0faa5afad9bb1a93288e68d27a | 36.40027 | 93 | 0.606681 | 3.860203 | false | false | false | false |
kytos/kytos | kytos/core/buffers.py | 2 | 5301 | """Kytos Buffer Classes, based on Python Queue."""
import logging
# from queue import Queue
from janus import Queue
from kytos.core.events import KytosEvent
__all__ = ('KytosBuffers', )
LOG = logging.getLogger(__name__)
class KytosEventBuffer:
"""KytosEventBuffer represents a queue to store a set of KytosEvents."""
def __init__(self, name, event_base_class=None, loop=None):
"""Contructor of KytosEventBuffer receive the parameters below.
Args:
name (string): name of KytosEventBuffer.
event_base_class (class): Class of KytosEvent.
"""
self.name = name
self._event_base_class = event_base_class
self._loop = loop
self._queue = Queue(loop=self._loop)
self._reject_new_events = False
def put(self, event):
"""Insert an event in KytosEventBuffer if reject_new_events is False.
Reject new events is True when a kytos/core.shutdown message was
received.
Args:
event (:class:`~kytos.core.events.KytosEvent`):
KytosEvent sent to queue.
"""
if not self._reject_new_events:
self._queue.sync_q.put(event)
LOG.debug('[buffer: %s] Added: %s', self.name, event.name)
if event.name == "kytos/core.shutdown":
LOG.info('[buffer: %s] Stop mode enabled. Rejecting new events.',
self.name)
self._reject_new_events = True
async def aput(self, event):
"""Insert a event in KytosEventBuffer if reject new events is False.
Reject new events is True when a kytos/core.shutdown message was
received.
Args:
event (:class:`~kytos.core.events.KytosEvent`):
KytosEvent sent to queue.
"""
# qsize = self._queue.async_q.qsize()
# print('qsize before:', qsize)
if not self._reject_new_events:
await self._queue.async_q.put(event)
LOG.debug('[buffer: %s] Added: %s', self.name, event.name)
# qsize = self._queue.async_q.qsize()
# print('qsize after:', qsize)
if event.name == "kytos/core.shutdown":
LOG.info('[buffer: %s] Stop mode enabled. Rejecting new events.',
self.name)
self._reject_new_events = True
def get(self):
"""Remove and return a event from top of queue.
Returns:
:class:`~kytos.core.events.KytosEvent`:
Event removed from top of queue.
"""
event = self._queue.sync_q.get()
LOG.debug('[buffer: %s] Removed: %s', self.name, event.name)
return event
async def aget(self):
"""Remove and return a event from top of queue.
Returns:
:class:`~kytos.core.events.KytosEvent`:
Event removed from top of queue.
"""
event = await self._queue.async_q.get()
LOG.debug('[buffer: %s] Removed: %s', self.name, event.name)
return event
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
If a :func:`~kytos.core.buffers.KytosEventBuffer.join` is currently
blocking, it will resume if all itens in KytosEventBuffer have been
processed (meaning that a task_done() call was received for every item
that had been put() into the KytosEventBuffer).
"""
self._queue.sync_q.task_done()
def join(self):
"""Block until all events are gotten and processed.
A item is processed when the method task_done is called.
"""
self._queue.sync_q.join()
def qsize(self):
"""Return the size of KytosEventBuffer."""
return self._queue.sync_q.qsize()
def empty(self):
"""Return True if KytosEventBuffer is empty."""
return self._queue.sync_q.empty()
def full(self):
"""Return True if KytosEventBuffer is full of KytosEvent."""
return self._queue.sync_q.full()
class KytosBuffers:
"""Set of KytosEventBuffer used in Kytos."""
def __init__(self, loop=None):
"""Build four KytosEventBuffers.
:attr:`raw`: :class:`~kytos.core.buffers.KytosEventBuffer` with events
received from network.
:attr:`msg_in`: :class:`~kytos.core.buffers.KytosEventBuffer` with
events to be received.
:attr:`msg_out`: :class:`~kytos.core.buffers.KytosEventBuffer` with
events to be sent.
:attr:`app`: :class:`~kytos.core.buffers.KytosEventBuffer` with events
sent to NApps.
"""
self._loop = loop
self.raw = KytosEventBuffer('raw_event', loop=self._loop)
self.msg_in = KytosEventBuffer('msg_in_event', loop=self._loop)
self.msg_out = KytosEventBuffer('msg_out_event', loop=self._loop)
self.app = KytosEventBuffer('app_event', loop=self._loop)
def send_stop_signal(self):
"""Send a ``kytos/core.shutdown`` event to each buffer."""
LOG.info('Stop signal received by Kytos buffers.')
LOG.info('Sending KytosShutdownEvent to all apps.')
event = KytosEvent(name='kytos/core.shutdown')
self.raw.put(event)
self.msg_in.put(event)
self.msg_out.put(event)
self.app.put(event)
| mit | a885fb2b4ff61c673671694b0e398613 | 31.521472 | 78 | 0.597434 | 3.696653 | false | false | false | false |
kinverarity1/lasio | lasio/las_items.py | 1 | 17824 | import json
import logging
# The standard library OrderedDict was introduced in Python 2.7 so
# we have a third-party option to support Python 2.6
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import numpy as np
logger = logging.getLogger(__name__)
class HeaderItem(OrderedDict):
"""Dictionary/namedtuple-style object for a LAS header line.
Arguments:
mnemonic (str): the mnemonic
unit (str): the unit (no whitespace!)
value (str): value
descr (str): description
These arguments are available for use as either items or attributes of the
object.
"""
def __init__(self, mnemonic="", unit="", value="", descr="", data=None):
super(HeaderItem, self).__init__()
# The original mnemonic needs to be stored for rewriting a new file.
# it might be nothing - '' - or a duplicate e.g. two 'RHO' curves,
# or unique - 'X11124' - or perhaps invalid??
# It will be used only when exporting.
self.original_mnemonic = mnemonic
# We also need to store a more useful mnemonic, which will be used
# (technically not, but read on) for people to access the curve while
# the LASFile object exists. For example, a curve which is unnamed
# and has an original_mnemonic of '' will be accessed as 'UNKNOWN'.
# It is used in contexts where duplicate mnemonics are acceptable.
# see property HeaderItem.useful_mnemonic
# But note that we need to (later) check (repeatedly) for duplicate
# mnemonics. Any duplicates will have ':1', ':2', ':3', etc., appended
# to them. The result of this will be stored as the
# HeaderItem.mnemonic attribute through the below method.
# It is used in contexts where duplicate mnemonics cannot exist.
self.set_session_mnemonic_only(self.useful_mnemonic)
self.unit = unit
self.value = value
self.descr = descr
self.data = data
@property
def useful_mnemonic(self):
if self.original_mnemonic.strip() == "":
return "UNKNOWN"
else:
return self.original_mnemonic
@useful_mnemonic.setter
def useful_mnemonic(self, value):
raise ValueError("Cannot set read-only attribute; try .mnemonic instead")
def set_session_mnemonic_only(self, value):
"""Set the mnemonic for session use.
See source comments for :class:`lasio.HeaderItem.__init__`
for a more in-depth explanation.
"""
super(HeaderItem, self).__setattr__("mnemonic", value)
def __getitem__(self, key):
"""Provide item dictionary-like access."""
if key == "mnemonic":
return self.mnemonic
elif key == "original_mnemonic":
return self.original_mnemonic
elif key == "useful_mnemonic":
return self.useful_mnemonic
elif key == "unit":
return self.unit
elif key == "value":
return self.value
elif key == "descr":
return self.descr
else:
raise KeyError("CurveItem only has restricted items (not %s)" % key)
def __setattr__(self, key, value):
if key == "mnemonic":
# The user wants to rename the item! This means we must send their
# new mnemonic to the original_mnemonic attribute. Remember that the
# mnemonic attribute is for session use only.
self.original_mnemonic = value
self.set_session_mnemonic_only(self.useful_mnemonic)
else:
super(HeaderItem, self).__setattr__(key, value)
def __repr__(self):
result = '%s(mnemonic="%s", unit="%s", value="%s", ' 'descr="%s")' % (
self.__class__.__name__,
self.mnemonic,
self.unit,
self.value,
self.descr,
)
while len(result) > 80:
result = result[:-3] + result[-2:]
return result
def _repr_pretty_(self, p, cycle):
return p.text(self.__repr__())
def __reduce__(self):
return self.__class__, (
self.mnemonic,
self.unit,
self.value,
self.descr,
self.data,
)
@property
def json(self):
return json.dumps(
{
"_type": self.__class__.__name__,
"mnemonic": self.original_mnemonic,
"unit": self.unit,
"value": self.value,
"descr": self.descr,
}
)
@json.setter
def json(self, value):
raise Exception("Cannot set objects from JSON")
class CurveItem(HeaderItem):
"""Dictionary/namedtuple-style object for a LAS curve.
See :class:`lasio.HeaderItem`` for the (keyword) arguments.
Keyword Arguments:
data (array-like, 1-D): the curve's data.
"""
def __init__(self, mnemonic="", unit="", value="", descr="", data=None):
if data is None:
data = []
super(CurveItem, self).__init__(mnemonic, unit, value, descr)
self.data = np.asarray(data)
@property
def API_code(self):
"""Equivalent to the ``value`` attribute."""
return self.value
def __repr__(self):
return (
'%s(mnemonic="%s", unit="%s", value="%s", '
'descr="%s", original_mnemonic="%s", data.shape=%s)'
% (
self.__class__.__name__,
self.mnemonic,
self.unit,
self.value,
self.descr,
self.original_mnemonic,
self.data.shape,
)
)
@property
def json(self):
return json.dumps(
{
"_type": self.__class__.__name__,
"mnemonic": self.original_mnemonic,
"unit": self.unit,
"value": self.value,
"descr": self.descr,
"data": list(self.data),
}
)
@json.setter
def json(self, value):
raise Exception("Cannot set objects from JSON")
class SectionItems(list):
"""Variant of a ``list`` which is used to represent a LAS section."""
def __init__(self, *args, **kwargs):
super(SectionItems, self).__init__(*args, **kwargs)
super(SectionItems, self).__setattr__("mnemonic_transforms", False)
def __str__(self):
rstr_lines = []
data = [
["Mnemonic", "Unit", "Value", "Description"],
["--------", "----", "-----", "-----------"],
]
data += [
[str(x) for x in [item.mnemonic, item.unit, item.value, item.descr]]
for item in self
]
col_widths = []
for i in range(len(data[0])):
col_widths.append(max([len(row[i]) for row in data]))
for row in data:
line_items = []
for i, item in enumerate(row):
line_items.append(item.ljust(col_widths[i] + 2))
rstr_lines.append("".join(line_items))
return "\n".join(rstr_lines)
def mnemonic_compare(self, one, two):
if self.mnemonic_transforms:
try:
if one.upper() == two.upper():
return True
except AttributeError:
pass
else:
if one == two:
return True
return False
def __contains__(self, testitem):
"""Check whether a header item or mnemonic is in the section.
Arguments:
testitem (HeaderItem, CurveItem, str): either an item or a mnemonic
Returns:
bool
"""
for item in self:
if self.mnemonic_compare(testitem, item.mnemonic):
return True
elif hasattr(testitem, "mnemonic"):
if self.mnemonic_compare(testitem.mnemonic, item.mnemonic):
return True
elif testitem is item:
return True
else:
return False
def keys(self):
"""Return mnemonics of all the HeaderItems in the section."""
return [item.mnemonic for item in self]
def values(self):
"""Return HeaderItems in the section."""
return self
def items(self):
"""Return pairs of (mnemonic, HeaderItem) from the section."""
return [(item.mnemonic, item) for item in self]
def iterkeys(self):
return iter(self.keys())
def itervalues(self):
return iter(self)
def iteritems(self):
return iter(self.items())
def __getslice__(self, i0, i1):
"""For Python 2.7 compatibility."""
return self.__getitem__(slice(i0, i1))
def __getitem__(self, key):
"""Item-style access by either mnemonic or index.
Arguments:
key (str, int, slice): either a mnemonic or the index to the list.
Returns:
item from the list (either HeaderItem or CurveItem)
"""
if isinstance(key, slice):
return SectionItems(super(SectionItems, self).__getitem__(key))
for item in self:
if self.mnemonic_compare(item.mnemonic, key):
return item
if isinstance(key, int):
return super(SectionItems, self).__getitem__(key)
else:
raise KeyError("%s not in %s" % (key, self.keys()))
def get(self, mnemonic, default="", add=False):
"""Get an item, with a default value for the situation when it is missing.
Arguments:
mnemonic (str): mnemonic of item to retrieve
default (str, HeaderItem, or CurveItem): default to provide
if *mnemonic* is missing from the section. If a string is
provided, it will be used as the ``value`` attribute of a new
HeaderItem or the ``descr`` attribute of a new CurveItem.
add (bool): if True, the returned HeaderItem/CurveItem will also
be appended to the SectionItems. By default this is not done.
Returns:
:class:`lasio.HeaderItem`/:class:`lasio.CurveItem`: item from
the section, if it is in there, or
a new item, if it is not. If a CurveItem is returned, the
``data`` attribute will contain ``numpy.nan`` values.
"""
if mnemonic in self:
return self[mnemonic]
else:
if not (
isinstance(default, HeaderItem)
or isinstance(default, CurveItem)
):
default = str(default)
# Determine appropriate type of item to create (HeaderItem
# or CurveItem).
if len(self):
first_item = self[0]
item_type = type(first_item)
else:
item_type = HeaderItem
if item_type is CurveItem:
new_data = np.asarray(first_item.data)
new_data = new_data * np.nan
item = CurveItem(
mnemonic=mnemonic,
descr=default,
data=new_data
)
else:
item = HeaderItem(
mnemonic=mnemonic,
value=default
)
else:
assert type(default) in (HeaderItem, CurveItem)
item = type(default)(
mnemonic=mnemonic,
unit=default.unit,
value=default.value,
descr=default.descr
)
if type(item) is CurveItem:
item.data = np.array(default.data)
if add:
self.append(item)
return item
def __delitem__(self, key):
"""Delete item by either mnemonic or index.
Arguments:
key (str, int): either a mnemonic or the index to the list.
"""
for ix, item in enumerate(self):
if self.mnemonic_compare(item.mnemonic, key):
super(SectionItems, self).__delitem__(ix)
return
if isinstance(key, int):
super(SectionItems, self).__delitem__(key)
return
else:
raise KeyError("%s not in %s" % (key, self.keys()))
def __setitem__(self, key, newitem):
"""Either replace the item or its value.
Arguments:
key (int, str): either the mnemonic or the index.
newitem (HeaderItem or str/float/int): the thing to be set.
If ``newitem`` is a :class:`lasio.HeaderItem` then the
existing item will be replaced. Otherwise the existing item's ``value``
attribute will be replaced.
i.e. this allows us to do
>>> from lasio import SectionItems, HeaderItem
>>> section = SectionItems(
... [HeaderItem(mnemonic="OPERATOR", value="John")]
... )
>>> section.OPERATOR
HeaderItem(mnemonic=OPERATOR, unit=, value=John, descr=)
>>> section.OPERATOR = 'Kent'
>>> section.OPERATOR
HeaderItem(mnemonic=OPERATOR, unit=, value=Kent, descr=)
See :meth:`lasio.SectionItems.set_item` and
:meth:`lasio.SectionItems.set_item_value`.
"""
if isinstance(newitem, HeaderItem):
self.set_item(key, newitem)
else:
self.set_item_value(key, newitem)
def __getattr__(self, key):
"""Provide attribute access via __contains__ e.g.
>>> from lasio import SectionItems, HeaderItem
>>> section = SectionItems(
... [HeaderItem(mnemonic="VERS", value=1.2)]
... )
>>> section['VERS']
HeaderItem(mnemonic=VERS, unit=, value=1.2, descr=)
>>> 'VERS' in section
True
>>> section.VERS
HeaderItem(mnemonic=VERS, unit=, value=1.2, descr=)
"""
known_attrs = [
"mnemonic_transforms",
]
if not key in known_attrs:
if key in self:
return self[key]
super(SectionItems, self).__getattr__(key)
def __setattr__(self, key, value):
"""Allow access to :meth:`lasio.SectionItems.__setitem__`
via attribute access.
"""
if key in self:
self[key] = value
elif isinstance(value, HeaderItem) or isinstance(value, CurveItem):
assert value.mnemonic == key
self.append(value)
else:
super(SectionItems, self).__setattr__(key, value)
def set_item(self, key, newitem):
"""Replace an item by comparison of session mnemonics.
Arguments:
key (str): the item mnemonic (or HeaderItem with mnemonic)
you want to replace.
newitem (HeaderItem): the new item
If **key** is not present, it appends **newitem**.
"""
for i, item in enumerate(self):
if self.mnemonic_compare(key, item.mnemonic):
# This is very important. We replace items where
# 'mnemonic' is equal - i.e. we do not check
# against useful_mnemonic or original_mnemonic.
return super(SectionItems, self).__setitem__(i, newitem)
else:
self.append(newitem)
def set_item_value(self, key, value):
"""Set the ``value`` attribute of an item.
Arguments:
key (str): the mnemonic of the item (or HeaderItem with the
mnemonic) you want to edit
value (str, int, float): the new value.
"""
self[key].value = value
def append(self, newitem):
"""Append a new HeaderItem to the object."""
super(SectionItems, self).append(newitem)
self.assign_duplicate_suffixes(newitem.useful_mnemonic)
def insert(self, i, newitem):
"""Insert a new HeaderItem to the object."""
super(SectionItems, self).insert(i, newitem)
self.assign_duplicate_suffixes(newitem.useful_mnemonic)
def assign_duplicate_suffixes(self, test_mnemonic=None):
"""Check and re-assign suffixes for duplicate mnemonics.
Arguments:
test_mnemonic (str, optional): check for duplicates of
this mnemonic. If it is None, check all mnemonics.
"""
if test_mnemonic is None:
for test_mnemonic in {i.useful_mnemonic for i in self}:
self.assign_duplicate_suffixes(test_mnemonic)
else:
existing = [item.useful_mnemonic for item in self]
locations = []
for i, item in enumerate(self):
if self.mnemonic_compare(item.useful_mnemonic, test_mnemonic):
locations.append(i)
if len(locations) > 1:
current_count = 1
for i, loc in enumerate(locations):
item = self[loc]
item.set_session_mnemonic_only(
item.useful_mnemonic + ":%d" % (i + 1)
)
def dictview(self):
"""View of mnemonics and values as a dict.
Returns:
dict - keys are the mnemonics and the values are the ``value``
attributes.
"""
return dict(zip(self.keys(), [i.value for i in self.values()]))
@property
def json(self):
return json.dumps([item.json for item in self.values()])
@json.setter
def json(self, value):
raise Exception("Cannot set objects from JSON")
| mit | f4b5d60aefb2b54146f072a377e5f747 | 31.466302 | 82 | 0.537421 | 4.34308 | false | false | false | false |
kinverarity1/lasio | tests/test_read_header_line.py | 3 | 3499 | # coding=utf-8
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from pprint import pprint
from lasio.reader import read_header_line
def test_time_str_and_colon_in_desc():
line = "TIML.hh:mm 23:15 23-JAN-2001: Time Logger: At Bottom"
result = read_header_line(line, section_name="Parameter")
assert result["value"] == "23:15 23-JAN-2001"
assert result["descr"] == "Time Logger: At Bottom"
def test_time_str_and_colon_in_desc_2():
# https://github.com/kinverarity1/lasio/issues/419
line = "STRT.DateTime 2012-09-16T07:44:12-05:00 : START DEPTH"
result = read_header_line(line, section_name="Well")
assert result["value"] == "2012-09-16T07:44:12-05:00"
assert result["descr"] == "START DEPTH"
assert result["unit"] == "DateTime"
def test_cyrillic_depth_unit():
line = u" DEPT.метер : 1 DEPTH"
result = read_header_line(line, section_name="Curves")
assert result["unit"] == u"метер"
def test_unit_stat_with_dot():
line = u" TDEP ..1IN : 0.1-in"
result = read_header_line(line, section_name="Curves")
assert result["unit"] == u".1IN"
def test_value_field_with_num_colon():
line = "RUN . 01: RUN NUMBER"
result = read_header_line(line, section_name="Parameter")
assert result["value"] == "01"
assert result["descr"] == "RUN NUMBER"
def test_non_delimiter_colon_in_desc():
line = "QI . : Survey quality: GOOD or BAD versus criteria"
result = read_header_line(line, section_name="Parameter")
assert result["value"] == ""
assert result["descr"] == "Survey quality: GOOD or BAD versus criteria"
def test_dot_in_name():
"""issue_264"""
line = "I. Res..OHM-M "
result = read_header_line(line, section_name="Curves")
assert result["name"] == "I. Res."
def test_pattern_arg():
line = "DEPT.M : 1 DEPTH"
name_re = "\\.?(?P<name>[^.]*)"
unit_re = "\\.(?P<unit>[^\\s]*)"
value_re = "(?P<value>.*)"
colon_delim = ":"
descr_re = "(?P<descr>.*)"
pattern_re = "".join((name_re, unit_re, value_re, colon_delim, descr_re))
result = read_header_line(line, section_name="Curves", pattern=pattern_re)
assert result["name"] == "DEPT"
assert result["unit"] == "M"
assert result["value"] == ""
def test_unit_with_space():
line = "HKLA .1000 lbf :(RT)"
result = read_header_line(line, section_name="Parameter")
assert result["name"] == "HKLA"
assert result["unit"] == "1000 lbf"
assert result["value"] == ""
assert result["descr"] == "(RT)"
def test_line_without_period():
line = " DRILLED :12/11/2010"
result = read_header_line(line)
assert result["name"] == "DRILLED"
assert result["value"] == "12/11/2010"
def test_line_without_period_with_space():
line = " PERM DAT :1"
result = read_header_line(line)
assert result["name"] == "PERM DAT"
assert result["value"] == "1"
def test_line_without_period_with_colon():
line = " TIME :14:00:32"
result = read_header_line(line)
assert result["name"] == "TIME"
assert result["value"] == "14:00:32"
def test_line_without_period_with_decimal_value():
line = " HOLE DIA :85.7"
result = read_header_line(line)
assert result["name"] == "HOLE DIA"
assert result["value"] == "85.7"
| mit | 5ca72a8b93a73502e7104580208496fd | 30.718182 | 80 | 0.584122 | 3.106857 | false | true | false | false |
kytos/kytos | kytos/core/napps/napp_dir_listener.py | 2 | 2648 | """Module to monitor installed napps."""
import logging
import re
from pathlib import Path
from watchdog.events import RegexMatchingEventHandler
from watchdog.observers import Observer
LOG = logging.getLogger(__name__)
class NAppDirListener(RegexMatchingEventHandler):
"""Class to handle directory changes."""
regexes = [re.compile(r".*\/kytos\/napps\/[a-zA-Z][^/]+\/[a-zA-Z].*")]
ignore_regexes = [re.compile(r".*\.installed")]
_controller = None
def __init__(self, controller):
"""Require controller to get NApps dir, load and unload NApps.
In order to watch the NApps dir for modifications, it must be created
if it doesn't exist (in this case, kytos-utils has not been run before
kytosd). We use the same dir permissions as in kytos-utils.
Args:
controller(kytos.core.controller): A controller instance.
"""
super().__init__()
self._controller = controller
self.napps_path = self._controller.options.napps
mode = 0o777 if self.napps_path.startswith('/var') else 0o755
Path(self.napps_path).mkdir(mode=mode, parents=True, exist_ok=True)
self.observer = Observer()
def start(self):
"""Start handling directory changes."""
self.observer.schedule(self, self.napps_path, True)
self.observer.start()
LOG.info('NAppDirListener Started...')
def stop(self):
"""Stop handling directory changes."""
self.observer.stop()
LOG.info('NAppDirListener Stopped...')
def _get_napp(self, absolute_path):
"""Get a username and napp_name from absolute path.
Args:
absolute_path(str): String with absolute path.
Returns:
tuple: Tuple with username and napp_name.
"""
relative_path = absolute_path.replace(self.napps_path, '')
return tuple(relative_path.split('/')[1:3])
def on_created(self, event):
"""Load a napp from created directory.
Args:
event(watchdog.events.DirCreatedEvent): Event received from an
observer.
"""
napp = self._get_napp(event.src_path)
LOG.debug('The NApp "%s/%s" was enabled.', *napp)
self._controller.load_napp(*napp)
def on_deleted(self, event):
"""Unload a napp from deleted directory.
Args:
event(watchdog.events.DirDeletedEvent): Event received from an
observer.
"""
napp = self._get_napp(event.src_path)
LOG.debug('The NApp "%s/%s" was disabled.', *napp)
self._controller.unload_napp(*napp)
| mit | 5f1e71bb46968dfaf294e41ae4af3d0a | 32.1 | 78 | 0.617447 | 3.877013 | false | false | false | false |
kytos/kytos | docs/blueprints/make_blueprints_table.py | 2 | 2703 | """Generate Blueprints table."""
import glob
import re
def create_table(directory):
"""Create the table header and cells."""
t_header = ''
t_cell = ''
bps_rst = []
bps_titles = []
bps_status = []
max_len_title = -1
max_len_status = -1
max_len_bps = -1
for fp_file in sorted(glob.glob(f'{directory}/EP*.rst')):
split_dir = ''.join(fp_file.split('./blueprints/'))
bp_rst = ''.join(split_dir.split('.rst'))
bps_rst.append(f" :doc:`{bp_rst}<{bp_rst}/>` ")
if max_len_bps < len(bps_rst[-1]):
max_len_bps = len(bps_rst[-1])
with open(fp_file) as origin_file:
title = ''
status = ''
for line in origin_file:
if re.findall(r':Title:', line):
title = ''.join(line.split(':Title:'))
bps_titles.append(''.join(title.split("\n")))
if max_len_title < len(title):
max_len_title = len(title)
if re.findall(r':Status:', line):
status = ''.join(line.split(':Status:'))
bps_status.append(''.join(status.split("\n")))
if max_len_status < len(status):
max_len_status = len(status)
break
th_title_len = max_len_title - len(' Title')
th_status_len = max_len_status - len(' Status')
th_bps_len = max_len_bps - len(' Blueprint')
t_header += f"+{'-' * max_len_bps}+"
t_header += f"{'-' * max_len_title}+{'-' * max_len_status}+\n"
t_header += f"|{' Blueprint'}{' ' * th_bps_len}|{' Title'}"
t_header += f"{' ' * th_title_len}|{' Status'}{' ' * th_status_len}|\n"
t_header += f"+{'=' * max_len_bps}+{'=' * max_len_title}+"
t_header += f"{'=' * max_len_status}+\n"
for i, _ in enumerate(bps_rst, start=0):
title_space = max_len_title - len(bps_titles[i])
status_space = max_len_status - len(bps_status[i])
bp_space = max_len_bps - len(bps_rst[i])
name = bps_rst[i]
title = bps_titles[i]
status = bps_status[i]
t_cell += f"|{name}{' ' * bp_space}|{title}{' ' * title_space}|"
t_cell += f"{status}{' ' * status_space}|\n"
t_cell += f"+{'-' * max_len_bps}+{'-' * max_len_title}+"
t_cell += f"{'-' * max_len_status}+\n"
return t_header + t_cell
def write_new_index_rst(directory):
"""Write table of blueprints in index.rst."""
blueprints_table = create_table(directory)
with open(f'{directory}/bp_table.rst', 'w') as fp_file:
fp_file.write(blueprints_table)
if __name__ == '__main__':
write_new_index_rst('./blueprints')
| mit | e4b1c748a55f76898e456fabc7a7302a | 32.7875 | 75 | 0.501665 | 3.089143 | false | false | false | false |
vitalik/django-ninja | tests/test_docs/test_auth.py | 1 | 3738 | from unittest.mock import Mock, patch
import pytest
from ninja import NinjaAPI
from ninja.testing import TestClient
def test_intro():
from docs.src.tutorial.authentication.code001 import api
client = TestClient(api)
assert client.get("/pets").status_code == 401
user = Mock()
user.is_authenticated = True
response = client.get("/pets", user=user)
assert response.status_code == 200
@pytest.mark.django_db
def test_examples():
from someapp.models import Client
api = NinjaAPI(csrf=True)
Client.objects.create(key="12345")
with patch("builtins.api", api, create=True):
import docs.src.tutorial.authentication.apikey01 # noqa: F401
import docs.src.tutorial.authentication.apikey02 # noqa: F401
import docs.src.tutorial.authentication.apikey03 # noqa: F401
import docs.src.tutorial.authentication.basic01 # noqa: F401
import docs.src.tutorial.authentication.bearer01 # noqa: F401
import docs.src.tutorial.authentication.code001 # noqa: F401
import docs.src.tutorial.authentication.code002 # noqa: F401
import docs.src.tutorial.authentication.multiple01 # noqa: F401
import docs.src.tutorial.authentication.schema01 # noqa: F401
client = TestClient(api)
response = client.get("/ipwhiltelist", META={"REMOTE_ADDR": "127.0.0.1"})
assert response.status_code == 401
response = client.get("/ipwhiltelist", META={"REMOTE_ADDR": "8.8.8.8"})
assert response.status_code == 200
# Api key --------------------------------
response = client.get("/apikey")
assert response.status_code == 401
response = client.get("/apikey?api_key=12345")
assert response.status_code == 200
response = client.get("/headerkey")
assert response.status_code == 401
response = client.get("/headerkey", headers={"X-API-Key": "supersecret"})
assert response.status_code == 200
response = client.get("/cookiekey")
assert response.status_code == 401
response = client.get("/cookiekey", COOKIES={"key": "supersecret"})
assert response.status_code == 200
# Basic http --------------------------------
response = client.get("/basic")
assert response.status_code == 401
response = client.get(
"/basic", headers={"Authorization": "Basic YWRtaW46c2VjcmV0"}
)
assert response.status_code == 200
assert response.json() == {"httpuser": "admin"}
# Bearer http --------------------------------
response = client.get("/bearer")
assert response.status_code == 401
response = client.get(
"/bearer", headers={"Authorization": "Bearer supersecret"}
)
assert response.status_code == 200
# Multiple ------------------------------------
assert client.get("/multiple").status_code == 401
assert client.get("/multiple?key=supersecret").status_code == 200
assert (
client.get("/multiple", headers={"key": "supersecret"}).status_code == 200
)
def test_global():
from docs.src.tutorial.authentication.global01 import api
@api.get("/somemethod")
def mustbeauthed(request):
return {"auth": request.auth}
client = TestClient(api)
assert client.get("/somemethod").status_code == 401
resp = client.post(
"/token", POST={"username": "admin", "password": "giraffethinnknslong"}
)
assert resp.status_code == 200
assert resp.json() == {"token": "supersecret"}
resp = client.get("/somemethod", headers={"Authorization": "Bearer supersecret"})
assert resp.status_code == 200
| mit | d0be4b7eb957b236e2755453305b3073 | 32.981818 | 86 | 0.613965 | 4.107692 | false | true | false | false |
vitalik/django-ninja | tests/test_openapi_params.py | 1 | 1954 | from ninja import NinjaAPI
api = NinjaAPI()
@api.get("/operation1", operation_id="my_id")
def operation_1(request):
"""
This will be in description
"""
return {"docstrings": True}
@api.get("/operation2", description="description from argument", deprecated=True)
def operation2(request):
return {"description": True, "deprecated": True}
@api.get("/operation3", summary="Summary from argument", description="description arg")
def operation3(request):
"This one also has docstring description"
return {"summary": True, "description": "multiple"}
@api.get("/operation4", tags=["tag1", "tag2"])
def operation4(request):
return {"tags": True}
@api.get("/not-included", include_in_schema=False)
def not_included(request):
return True
schema = api.get_openapi_schema()
def test_schema():
op1 = schema["paths"]["/api/operation1"]["get"]
op2 = schema["paths"]["/api/operation2"]["get"]
op3 = schema["paths"]["/api/operation3"]["get"]
op4 = schema["paths"]["/api/operation4"]["get"]
assert op1["operationId"] == "my_id"
assert op2["operationId"] == "test_openapi_params_operation2"
assert op3["operationId"] == "test_openapi_params_operation3"
assert op4["operationId"] == "test_openapi_params_operation4"
assert op1["summary"] == "Operation 1"
assert op2["summary"] == "Operation2"
assert op3["summary"] == "Summary from argument"
assert op4["summary"] == "Operation4"
assert op1["description"] == "This will be in description"
assert op2["description"] == "description from argument"
assert op2["deprecated"] is True
assert op3["description"] == "description arg"
assert op4["tags"] == ["tag1", "tag2"]
def test_not_included():
assert list(schema["paths"].keys()) == [
"/api/operation1",
"/api/operation2",
"/api/operation3",
"/api/operation4",
]
# checking that "/not-included" is not there
| mit | a4f3d1d39ab0ded174c3cfce04f542be | 26.521127 | 87 | 0.646878 | 3.578755 | false | true | false | false |
pytransitions/transitions | transitions/extensions/states.py | 1 | 9846 | """
transitions.extensions.states
-----------------------------
This module contains mix ins which can be used to extend state functionality.
"""
from collections import Counter
from threading import Timer
import logging
import inspect
from ..core import MachineError, listify, State
_LOGGER = logging.getLogger(__name__)
_LOGGER.addHandler(logging.NullHandler())
class Tags(State):
""" Allows states to be tagged.
Attributes:
tags (list): A list of tag strings. `State.is_<tag>` may be used
to check if <tag> is in the list.
"""
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contains `tags`, assign them to the attribute.
"""
self.tags = kwargs.pop('tags', [])
super(Tags, self).__init__(*args, **kwargs)
def __getattr__(self, item):
if item.startswith('is_'):
return item[3:] in self.tags
return super(Tags, self).__getattribute__(item)
class Error(Tags):
""" This mix in builds upon tag and should be used INSTEAD of Tags if final states that have
not been tagged with 'accepted' should throw an `MachineError`.
"""
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contains the keyword `accepted` add the 'accepted' tag to a tag list
which will be forwarded to the Tags constructor.
"""
tags = kwargs.get('tags', [])
accepted = kwargs.pop('accepted', False)
if accepted:
tags.append('accepted')
kwargs['tags'] = tags
super(Error, self).__init__(*args, **kwargs)
def enter(self, event_data):
""" Extends transitions.core.State.enter. Throws a `MachineError` if there is
no leaving transition from this state and 'accepted' is not in self.tags.
"""
if not event_data.machine.get_triggers(self.name) and not self.is_accepted:
raise MachineError("Error state '{0}' reached!".format(self.name))
super(Error, self).enter(event_data)
class Timeout(State):
""" Adds timeout functionality to a state. Timeouts are handled model-specific.
Attributes:
timeout (float): Seconds after which a timeout function should be called.
on_timeout (list): Functions to call when a timeout is triggered.
"""
dynamic_methods = ['on_timeout']
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contain 'timeout', assign the float value to self.timeout. If timeout
is set, 'on_timeout' needs to be passed with kwargs as well or an AttributeError will
be thrown. If timeout is not passed or equal 0.
"""
self.timeout = kwargs.pop('timeout', 0)
self._on_timeout = None
if self.timeout > 0:
try:
self.on_timeout = kwargs.pop('on_timeout')
except KeyError:
raise AttributeError("Timeout state requires 'on_timeout' when timeout is set.") # from KeyError
else:
self._on_timeout = kwargs.pop('on_timeout', [])
self.runner = {}
super(Timeout, self).__init__(*args, **kwargs)
def enter(self, event_data):
""" Extends `transitions.core.State.enter` by starting a timeout timer for the current model
when the state is entered and self.timeout is larger than 0.
"""
if self.timeout > 0:
timer = Timer(self.timeout, self._process_timeout, args=(event_data,))
timer.daemon = True
timer.start()
self.runner[id(event_data.model)] = timer
return super(Timeout, self).enter(event_data)
def exit(self, event_data):
""" Extends `transitions.core.State.exit` by canceling a timer for the current model. """
timer = self.runner.get(id(event_data.model), None)
if timer is not None and timer.is_alive():
timer.cancel()
return super(Timeout, self).exit(event_data)
def _process_timeout(self, event_data):
_LOGGER.debug("%sTimeout state %s. Processing callbacks...", event_data.machine.name, self.name)
for callback in self.on_timeout:
event_data.machine.callback(callback, event_data)
_LOGGER.info("%sTimeout state %s processed.", event_data.machine.name, self.name)
@property
def on_timeout(self):
""" List of strings and callables to be called when the state timeouts. """
return self._on_timeout
@on_timeout.setter
def on_timeout(self, value):
""" Listifies passed values and assigns them to on_timeout."""
self._on_timeout = listify(value)
class Volatile(State):
""" Adds scopes/temporal variables to the otherwise persistent state objects.
Attributes:
volatile_cls (cls): Class of the temporal object to be initiated.
volatile_hook (str): Model attribute name which will contain the volatile instance.
"""
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contains `volatile`, always create an instance of the passed class
whenever the state is entered. The instance is assigned to a model attribute which
can be passed with the kwargs keyword `hook`. If hook is not passed, the instance will
be assigned to the 'attribute' scope. If `volatile` is not passed, an empty object will
be assigned to the model's hook.
"""
self.volatile_cls = kwargs.pop('volatile', VolatileObject)
self.volatile_hook = kwargs.pop('hook', 'scope')
super(Volatile, self).__init__(*args, **kwargs)
self.initialized = True
def enter(self, event_data):
""" Extends `transitions.core.State.enter` by creating a volatile object and assign it to
the current model's hook. """
setattr(event_data.model, self.volatile_hook, self.volatile_cls())
super(Volatile, self).enter(event_data)
def exit(self, event_data):
""" Extends `transitions.core.State.exit` by deleting the temporal object from the model. """
super(Volatile, self).exit(event_data)
try:
delattr(event_data.model, self.volatile_hook)
except AttributeError:
pass
class Retry(State):
""" The Retry mix-in sets a limit on the number of times a state may be
re-entered from itself.
The first time a state is entered it does not count as a retry. Thus with
`retries=3` the state can be entered four times before it fails.
When the retry limit is exceeded, the state is not entered and instead the
`on_failure` callback is invoked on the model. For example,
Retry(retries=3, on_failure='to_failed')
transitions the model directly to the 'failed' state, if the machine has
automatic transitions enabled (the default).
Attributes:
retries (int): Number of retries to allow before failing.
on_failure (str): Function to invoke on the model when the retry limit
is exceeded.
"""
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contains `retries`, then limit the number of times
the state may be re-entered from itself. The argument `on_failure`,
which is the function to invoke on the model when the retry limit
is exceeded, must also be provided.
"""
self.retries = kwargs.pop('retries', 0)
self.on_failure = kwargs.pop('on_failure', None)
self.retry_counts = Counter()
if self.retries > 0 and self.on_failure is None:
raise AttributeError("Retry state requires 'on_failure' when "
"'retries' is set.")
super(Retry, self).__init__(*args, **kwargs)
def enter(self, event_data):
k = id(event_data.model)
# If we are entering from a different state, then this is our first try;
# reset the retry counter.
if event_data.transition.source != self.name:
_LOGGER.debug('%sRetry limit for state %s reset (came from %s)',
event_data.machine.name, self.name,
event_data.transition.source)
self.retry_counts[k] = 0
# If we have tried too many times, invoke our failure callback instead
if self.retry_counts[k] > self.retries > 0:
_LOGGER.info('%sRetry count for state %s exceeded limit (%i)',
event_data.machine.name, self.name, self.retries)
event_data.machine.callback(self.on_failure, event_data)
return
# Otherwise, increment the retry count and continue per normal
_LOGGER.debug('%sRetry count for state %s is now %i',
event_data.machine.name, self.name, self.retry_counts[k])
self.retry_counts.update((k,))
super(Retry, self).enter(event_data)
def add_state_features(*args):
""" State feature decorator. Should be used in conjunction with a custom Machine class. """
def _class_decorator(cls):
class CustomState(type('CustomState', args, {}), cls.state_cls):
""" The decorated State. It is based on the State class used by the decorated Machine. """
method_list = sum([c.dynamic_methods for c in inspect.getmro(CustomState) if hasattr(c, 'dynamic_methods')], [])
CustomState.dynamic_methods = list(set(method_list))
cls.state_cls = CustomState
return cls
return _class_decorator
class VolatileObject(object):
""" Empty Python object which can be used to assign attributes to."""
| mit | d27f8e585e8fc0a7f1dbeba7567e4b94 | 39.854772 | 120 | 0.610908 | 4.305203 | false | false | false | false |
heynemann/pyvows | pyvows/runner/abc.py | 1 | 2790 | # -*- coding: utf-8 -*-
'''Abstract base class for all PyVows Runner implementations.'''
# pyvows testing engine
# https://github.com/heynemann/pyvows
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 Bernardo Heynemann heynemann@gmail.com
import sys
import time
from pyvows.runner.utils import get_file_info_for
from pyvows.utils import elapsed
from pyvows.runner import SkipTest
#-------------------------------------------------------------------------------------------------
class VowsRunnerABC(object):
def __init__(self, suites, context_class, on_vow_success, on_vow_error, execution_plan, capture_output=False):
self.suites = suites # a suite is a file with pyvows tests
self.context_class = context_class
self.on_vow_success = on_vow_success
self.on_vow_error = on_vow_error
self.execution_plan = execution_plan
self.capture_output = capture_output
def run(self):
pass
def run_context(self):
pass
def get_vow_result(self, vow, topic, ctx_obj, vow_name, enumerated):
filename, lineno = get_file_info_for(vow)
vow_result = {
'context_instance': ctx_obj,
'name': vow_name,
'enumerated': enumerated,
'result': None,
'topic': topic,
'error': None,
'skip': None,
'succeeded': False,
'file': filename,
'lineno': lineno,
'elapsed': 0,
'stdout': '',
'stderr': ''
}
return vow_result
def run_vow(self, tests_collection, topic, ctx_obj, vow, vow_name, enumerated):
# FIXME: Add Docstring
start_time = time.time()
vow_result = self.get_vow_result(vow, topic, ctx_obj, vow_name, enumerated)
try:
result = vow(ctx_obj, topic)
vow_result['result'] = result
vow_result['succeeded'] = True
if self.on_vow_success:
self.on_vow_success(vow_result)
except SkipTest as se:
vow_result['skip'] = se
except:
err_type, err_value, err_traceback = sys.exc_info()
vow_result['error'] = {
'type': err_type,
'value': err_value,
'traceback': err_traceback
}
if self.on_vow_error:
self.on_vow_error(vow_result)
vow_result['elapsed'] = elapsed(start_time)
tests_collection.append(vow_result)
return vow_result
class VowsTopicError(Exception):
"""Wraps an error in the setup or topic functions."""
def __init__(self, source, exc_info):
self.source = source
self.exc_info = exc_info
| mit | 3ca05ef734ca19de33edab17c8d4f3de | 29.326087 | 114 | 0.554839 | 3.695364 | false | true | false | false |
heynemann/pyvows | tests/captured_output_vows.py | 2 | 4129 | from __future__ import print_function
import sys
from pyvows import Vows, expect
from pyvows.runner.gevent import VowsParallelRunner
from pyvows.runner.executionplan import ExecutionPlanner
from pyvows.runner import VowsRunner
@Vows.batch
class CapturedOutputVows(Vows.Context):
class ResultsFromContextThatExplicitlyCapturesOutput(Vows.Context):
def topic(self):
dummySuite = {'dummySuite': set([OutputSomeStuff])}
execution_plan = ExecutionPlanner(dummySuite, set(), set()).plan()
runner = VowsRunner(dummySuite, Vows.Context, None, None, execution_plan, False)
return runner.run()
def results_are_successful(self, topic):
expect(topic.successful).to_equal(True)
class TopContextStdout(Vows.Context):
def topic(self, results):
return results.contexts[0]['stdout']
def has_setup_topic_teardown(self, topic):
expect(topic).to_equal('setup\ntopic\nteardown\n')
class TopContextStderr(Vows.Context):
def topic(self, results):
return results.contexts[0]['stderr']
def has_setup_topic_teardown_err(self, topic):
expect(topic).to_equal('setup-err\ntopic-err\nteardown-err\n')
class SubcontextStdout(Vows.Context):
def topic(self, results):
return results.contexts[0]['contexts'][0]['stdout']
def has_subcontext_topic(self, topic):
expect(topic).to_equal('subcontext-topic\n')
class SubcontextStderr(Vows.Context):
def topic(self, results):
return results.contexts[0]['contexts'][0]['stderr']
def has_subcontext_topic_err(self, topic):
expect(topic).to_equal('subcontext-topic-err\n')
class TopContextVowStdout(Vows.Context):
def topic(self, results):
return results.contexts[0]['tests'][0]['stdout']
def has_vow(self, topic):
expect(topic).to_equal('vow\n')
class TopContextVowStderr(Vows.Context):
def topic(self, results):
return results.contexts[0]['tests'][0]['stderr']
def has_vow_err(self, topic):
expect(topic).to_equal('vow-err\n')
class ResultsFromContextThatPrintsWhenSysStreamsArePatched(ResultsFromContextThatExplicitlyCapturesOutput):
def topic(self):
dummySuite = {'dummySuite': set([PrintSomeStuff])}
execution_plan = ExecutionPlanner(dummySuite, set(), set()).plan()
runner = VowsRunner(dummySuite, Vows.Context, None, None, execution_plan, True)
return runner.run()
class OutputSomeStuff(Vows.Context):
def setup(self):
VowsParallelRunner.output.stdout.write('setup\n')
VowsParallelRunner.output.stderr.write('setup-err\n')
def topic(self):
VowsParallelRunner.output.stdout.write('topic\n')
VowsParallelRunner.output.stderr.write('topic-err\n')
def teardown(self):
VowsParallelRunner.output.stdout.write('teardown\n')
VowsParallelRunner.output.stderr.write('teardown-err\n')
def vow(self, topic):
VowsParallelRunner.output.stdout.write('vow\n')
VowsParallelRunner.output.stderr.write('vow-err\n')
class OutputFromSubcontext(Vows.Context):
def topic(self):
VowsParallelRunner.output.stdout.write('subcontext-topic\n')
VowsParallelRunner.output.stderr.write('subcontext-topic-err\n')
class PrintSomeStuff(Vows.Context):
def setup(self):
print('setup')
print('setup-err', file=sys.stderr)
def topic(self):
print('topic')
print('topic-err', file=sys.stderr)
def teardown(self):
print('teardown')
print('teardown-err', file=sys.stderr)
def vow(self, topic):
print('vow')
print('vow-err', file=sys.stderr)
class PrintFromSubcontext(Vows.Context):
def topic(self):
print('subcontext-topic')
print('subcontext-topic-err', file=sys.stderr)
| mit | 34f9a757ab0c67dcfb7172953a00692b | 34.594828 | 111 | 0.633567 | 3.851679 | false | false | false | false |
urinieto/msaf | setup.py | 1 | 2059 | from setuptools import setup, find_packages
import glob
import imp
import numpy.distutils.misc_util
version = imp.load_source('msaf.version', 'msaf/version.py')
# MSAF configuration
setup(
name='msaf',
version=version.version,
description='Python module to discover the structure of music files',
author='Oriol Nieto',
author_email='oriol@nyu.edu',
url='https://github.com/urinieto/msaf',
download_url='https://github.com/urinieto/msaf/releases',
packages=find_packages(),
package_data={'msaf': ['algorithms/olda/models/*.npy']},
data_files=[('msaf/algorithms/olda/models',
glob.glob('msaf/algorithms/olda/models/*.npy'))],
long_description="""A python module to segment audio into all its """
"""different large-scale sections and label them based on their """
"""acoustic similarity""",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
keywords='audio music sound',
license='MIT',
install_requires=[
'audioread',
'cvxopt',
'decorator',
'enum34',
'future',
'jams >= 0.3.0',
'joblib',
'librosa >= 0.6.0',
'mir_eval',
'matplotlib >= 1.5',
'numpy >= 1.8.0',
'pandas',
'scikit-learn >= 0.17.0',
'scipy >= 0.13.0',
'seaborn', # For notebook example (but everyone should have this :-))
'vmo >= 0.3.3'
],
extras_require={
'resample': 'scikits.samplerate>=0.3'
},
include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs()
)
| mit | b70315505eacaba6d77dcad18c7c9ed3 | 32.754098 | 78 | 0.588635 | 3.730072 | false | false | false | false |
urinieto/msaf | msaf/pymf/bnmf.py | 2 | 4326 | #!/usr/bin/python
#
# Copyright (C) Christian Thurau, 2010.
# Licensed under the GNU General Public License (GPL).
# http://www.gnu.org/licenses/gpl.txt
"""
PyMF Binary Matrix Factorization [1]
BNMF(NMF) : Class for binary matrix factorization
[1]Z. Zhang, T. Li, C. H. Q. Ding, X. Zhang: Binary Matrix Factorization with
Applications. ICDM 2007
"""
import numpy as np
from .nmf import NMF
__all__ = ["BNMF"]
class BNMF(NMF):
"""
BNMF(data, data, num_bases=4)
Binary Matrix Factorization. Factorize a data matrix into two matrices s.t.
F = | data - W*H | is minimal. H and W are restricted to binary values.
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying BNMF to some rather stupid data set:
>>> import numpy as np
>>> from bnmf import BNMF
>>> data = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]])
Use 2 basis vectors -> W shape(data_dimension, 2).
>>> bnmf_mdl = BNMF(data, num_bases=2)
Set number of iterations to 5 and start computing the factorization.
>>> bnmf_mdl.factorize(niter=5)
The basis vectors are now stored in bnmf_mdl.W, the coefficients in bnmf_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to bnmf_mdl.W, and set compute_w to False:
>>> data = np.array([[0.0], [1.0]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> bnmf_mdl = BNMF(data, num_bases=2)
>>> bnmf_mdl.W = W
>>> bnmf_mdl.factorize(niter=10, compute_w=False)
The result is a set of coefficients bnmf_mdl.H, s.t. data = W * bnmf_mdl.H.
"""
# controls how fast lambda should increase:
# this influence convergence to binary values during the update. A value
# <1 will result in non-binary decompositions as the update rule effectively
# is a conventional nmf update rule. Values >1 give more weight to making the
# factorization binary with increasing iterations.
# setting either W or H to 0 results make the resulting matrix non binary.
_LAMB_INCREASE_W = 1.1
_LAMB_INCREASE_H = 1.1
def update_h(self):
H1 = np.dot(self.W.T, self.data[:,:]) + 3.0*self._lamb_H*(self.H**2)
H2 = np.dot(np.dot(self.W.T,self.W), self.H) + 2*self._lamb_H*(self.H**3) + self._lamb_H*self.H + 10**-9
self.H *= H1/H2
self._lamb_W = self._LAMB_INCREASE_W * self._lamb_W
self._lamb_H = self._LAMB_INCREASE_H * self._lamb_H
def update_w(self):
W1 = np.dot(self.data[:,:], self.H.T) + 3.0*self._lamb_W*(self.W**2)
W2 = np.dot(self.W, np.dot(self.H, self.H.T)) + 2.0*self._lamb_W*(self.W**3) + self._lamb_W*self.W + 10**-9
self.W *= W1/W2
def factorize(self, niter=10, compute_w=True, compute_h=True,
show_progress=False, compute_err=True):
""" Factorize s.t. WH = data
Parameters
----------
niter : int
number of iterations.
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH| for each iteration.
"""
# init some learning parameters
self._lamb_W = 1.0/niter
self._lamb_H = 1.0/niter
NMF.factorize(self, niter=niter, compute_w=compute_w,
compute_h=compute_h, show_progress=show_progress,
compute_err=compute_err)
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | dd073ea577e9e7e512b4d60b6354e851 | 33.062992 | 116 | 0.58368 | 3.255079 | false | false | false | false |
urinieto/msaf | examples/compute_features.py | 1 | 3574 | #!/usr/bin/env python
"""
This script computes the features for a single or multiple audio files.
It uses the default parameters. These parameters can be changed the
`.msafrc` config file.
Examples:
Single file mode:
>> ./compute_features.py path_to_audio.mp3 -o my_features.json
Collection mode:
Run on 12 cores on the specified dataset:
>> ./compute_features.py path_to_dataset/ -j 12
"""
import argparse
from joblib import Parallel, delayed
import logging
import os
import time
# Local stuff
import msaf
from msaf.features import Features
def compute_all_features(file_struct, framesync):
"""Computes all features for the given file."""
for feature_id in msaf.features_registry:
logging.info("Computing %s for file %s" % (feature_id,
file_struct.audio_file))
feats = Features.select_features(feature_id, file_struct, False, framesync)
feats.features
def process(in_path, out_file, n_jobs, framesync):
"""Computes the features for the selected dataset or file."""
if os.path.isfile(in_path):
# Single file mode
# Get (if they exitst) or compute features
file_struct = msaf.io.FileStruct(in_path)
file_struct.features_file = out_file
compute_all_features(file_struct, framesync)
else:
# Collection mode
file_structs = msaf.io.get_dataset_files(in_path)
# Call in parallel
return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)(
file_struct, framesync) for file_struct in file_structs)
def main():
"""Main function to parse the arguments and call the main process."""
parser = argparse.ArgumentParser(
description="Extracts a set of features from a given dataset "
"or audio file and saves them into the 'features' folder of "
"the dataset or the specified single file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("in_path",
action="store",
help="Input dataset dir or audio file")
parser.add_argument("-j",
action="store",
dest="n_jobs",
type=int,
help="Number of jobs (only for collection mode)",
default=4)
parser.add_argument("-o",
action="store",
dest="out_file",
type=str,
help="Output file (only for single file mode)",
default="out.json")
parser.add_argument("-d",
action="store",
dest="ds_name",
default="*",
help="The prefix of the dataset to use "
"(e.g. Isophonics, SALAMI)")
parser.add_argument("-fs",
action="store_true",
dest="framesync",
help="Use frame-synchronous features",
default=False)
args = parser.parse_args()
start_time = time.time()
# Setup the logger
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
level=logging.INFO)
# Run the main process
process(args.in_path, out_file=args.out_file, n_jobs=args.n_jobs, framesync=args.framesync)
# Done!
logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
if __name__ == '__main__':
main()
| mit | d7b1df74881008dd5ad0dd66f5326b23 | 34.386139 | 95 | 0.570229 | 4.249703 | false | false | false | false |
fonttools/fonttools | Tests/voltLib/parser_test.py | 3 | 47859 | from fontTools.voltLib import ast
from fontTools.voltLib.error import VoltLibError
from fontTools.voltLib.parser import Parser
from io import StringIO
import unittest
class ParserTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
# and fires deprecation warnings if a program uses the old name.
if not hasattr(self, "assertRaisesRegex"):
self.assertRaisesRegex = self.assertRaisesRegexp
def assertSubEqual(self, sub, glyph_ref, replacement_ref):
glyphs = [[g.glyph for g in v] for v in sub.mapping.keys()]
replacement = [[g.glyph for g in v] for v in sub.mapping.values()]
self.assertEqual(glyphs, glyph_ref)
self.assertEqual(replacement, replacement_ref)
def test_def_glyph_base(self):
[def_glyph] = self.parse(
'DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
(".notdef", 0, None, "BASE", None))
def test_def_glyph_base_with_unicode(self):
[def_glyph] = self.parse(
'DEF_GLYPH "space" ID 3 UNICODE 32 TYPE BASE END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("space", 3, [0x0020], "BASE", None))
def test_def_glyph_base_with_unicodevalues(self):
[def_glyph] = self.parse_(
'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009" '
'TYPE BASE END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("CR", 2, [0x0009], "BASE", None))
def test_def_glyph_base_with_mult_unicodevalues(self):
[def_glyph] = self.parse(
'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009,U+000D" '
'TYPE BASE END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("CR", 2, [0x0009, 0x000D], "BASE", None))
def test_def_glyph_base_with_empty_unicodevalues(self):
[def_glyph] = self.parse_(
'DEF_GLYPH "i.locl" ID 269 UNICODEVALUES "" '
'TYPE BASE END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("i.locl", 269, None, "BASE", None))
def test_def_glyph_base_2_components(self):
[def_glyph] = self.parse(
'DEF_GLYPH "glyphBase" ID 320 TYPE BASE COMPONENTS 2 END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("glyphBase", 320, None, "BASE", 2))
def test_def_glyph_ligature_2_components(self):
[def_glyph] = self.parse(
'DEF_GLYPH "f_f" ID 320 TYPE LIGATURE COMPONENTS 2 END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("f_f", 320, None, "LIGATURE", 2))
def test_def_glyph_mark(self):
[def_glyph] = self.parse(
'DEF_GLYPH "brevecomb" ID 320 TYPE MARK END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("brevecomb", 320, None, "MARK", None))
def test_def_glyph_component(self):
[def_glyph] = self.parse(
'DEF_GLYPH "f.f_f" ID 320 TYPE COMPONENT END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("f.f_f", 320, None, "COMPONENT", None))
def test_def_glyph_no_type(self):
[def_glyph] = self.parse(
'DEF_GLYPH "glyph20" ID 20 END_GLYPH'
).statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
("glyph20", 20, None, None, None))
def test_def_glyph_case_sensitive(self):
def_glyphs = self.parse(
'DEF_GLYPH "A" ID 3 UNICODE 65 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "a" ID 4 UNICODE 97 TYPE BASE END_GLYPH'
).statements
self.assertEqual((def_glyphs[0].name, def_glyphs[0].id,
def_glyphs[0].unicode, def_glyphs[0].type,
def_glyphs[0].components),
("A", 3, [0x41], "BASE", None))
self.assertEqual((def_glyphs[1].name, def_glyphs[1].id,
def_glyphs[1].unicode, def_glyphs[1].type,
def_glyphs[1].components),
("a", 4, [0x61], "BASE", None))
def test_def_group_glyphs(self):
[def_group] = self.parse(
'DEF_GROUP "aaccented"\n'
' ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" '
'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" '
'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n'
'END_GROUP'
).statements
self.assertEqual((def_group.name, def_group.enum.glyphSet()),
("aaccented",
("aacute", "abreve", "acircumflex", "adieresis",
"ae", "agrave", "amacron", "aogonek", "aring",
"atilde")))
def test_def_group_groups(self):
[group1, group2, test_group] = self.parse(
'DEF_GROUP "Group1"\n'
' ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "Group2"\n'
' ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "TestGroup"\n'
' ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
'END_GROUP'
).statements
groups = [g.group for g in test_group.enum.enum]
self.assertEqual((test_group.name, groups),
("TestGroup", ["Group1", "Group2"]))
def test_def_group_groups_not_yet_defined(self):
[group1, test_group1, test_group2, test_group3, group2] = \
self.parse(
'DEF_GROUP "Group1"\n'
' ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "TestGroup1"\n'
' ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "TestGroup2"\n'
' ENUM GROUP "Group2" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "TestGroup3"\n'
' ENUM GROUP "Group2" GROUP "Group1" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "Group2"\n'
' ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n'
'END_GROUP'
).statements
groups = [g.group for g in test_group1.enum.enum]
self.assertEqual(
(test_group1.name, groups),
("TestGroup1", ["Group1", "Group2"]))
groups = [g.group for g in test_group2.enum.enum]
self.assertEqual(
(test_group2.name, groups),
("TestGroup2", ["Group2"]))
groups = [g.group for g in test_group3.enum.enum]
self.assertEqual(
(test_group3.name, groups),
("TestGroup3", ["Group2", "Group1"]))
# def test_def_group_groups_undefined(self):
# with self.assertRaisesRegex(
# VoltLibError,
# r'Group "Group2" is used but undefined.'):
# [group1, test_group, group2] = self.parse(
# 'DEF_GROUP "Group1"\n'
# 'ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n'
# 'END_GROUP\n'
# 'DEF_GROUP "TestGroup"\n'
# 'ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n'
# 'END_GROUP\n'
# ).statements
def test_def_group_glyphs_and_group(self):
[def_group1, def_group2] = self.parse(
'DEF_GROUP "aaccented"\n'
' ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" '
'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" '
'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "KERN_lc_a_2ND"\n'
' ENUM GLYPH "a" GROUP "aaccented" END_ENUM\n'
'END_GROUP'
).statements
items = def_group2.enum.enum
self.assertEqual((def_group2.name, items[0].glyphSet(), items[1].group),
("KERN_lc_a_2ND", ("a",), "aaccented"))
def test_def_group_range(self):
def_group = self.parse(
'DEF_GLYPH "a" ID 163 UNICODE 97 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "agrave" ID 194 UNICODE 224 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "aacute" ID 195 UNICODE 225 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "acircumflex" ID 196 UNICODE 226 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "atilde" ID 197 UNICODE 227 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "c" ID 165 UNICODE 99 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "ccaron" ID 209 UNICODE 269 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "ccedilla" ID 210 UNICODE 231 TYPE BASE END_GLYPH\n'
'DEF_GLYPH "cdotaccent" ID 210 UNICODE 267 TYPE BASE END_GLYPH\n'
'DEF_GROUP "KERN_lc_a_2ND"\n'
' ENUM RANGE "a" TO "atilde" GLYPH "b" RANGE "c" TO "cdotaccent" '
'END_ENUM\n'
'END_GROUP'
).statements[-1]
self.assertEqual((def_group.name, def_group.enum.glyphSet()),
("KERN_lc_a_2ND",
("a", "agrave", "aacute", "acircumflex", "atilde",
"b", "c", "ccaron", "ccedilla", "cdotaccent")))
def test_group_duplicate(self):
self.assertRaisesRegex(
VoltLibError,
'Glyph group "dupe" already defined, '
'group names are case insensitive',
self.parse, 'DEF_GROUP "dupe"\n'
'ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "dupe"\n'
'ENUM GLYPH "x" END_ENUM\n'
'END_GROUP'
)
def test_group_duplicate_case_insensitive(self):
self.assertRaisesRegex(
VoltLibError,
'Glyph group "Dupe" already defined, '
'group names are case insensitive',
self.parse, 'DEF_GROUP "dupe"\n'
'ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "Dupe"\n'
'ENUM GLYPH "x" END_ENUM\n'
'END_GROUP'
)
def test_script_without_langsys(self):
[script] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'END_SCRIPT'
).statements
self.assertEqual((script.name, script.tag, script.langs),
("Latin", "latn", []))
def test_langsys_normal(self):
[def_script] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n\n'
'END_LANGSYS\n'
'DEF_LANGSYS NAME "Moldavian" TAG "MOL "\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
self.assertEqual((def_script.name, def_script.tag),
("Latin",
"latn"))
def_lang = def_script.langs[0]
self.assertEqual((def_lang.name, def_lang.tag),
("Romanian",
"ROM "))
def_lang = def_script.langs[1]
self.assertEqual((def_lang.name, def_lang.tag),
("Moldavian",
"MOL "))
def test_langsys_no_script_name(self):
[langsys] = self.parse(
'DEF_SCRIPT TAG "latn"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
self.assertEqual((langsys.name, langsys.tag),
(None,
"latn"))
lang = langsys.langs[0]
self.assertEqual((lang.name, lang.tag),
("Default",
"dflt"))
def test_langsys_no_script_tag_fails(self):
with self.assertRaisesRegex(
VoltLibError,
r'.*Expected "TAG"'):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Latin"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
def test_langsys_duplicate_script(self):
with self.assertRaisesRegex(
VoltLibError,
'Script "DFLT" already defined, '
'script tags are case insensitive'):
[langsys1, langsys2] = self.parse(
'DEF_SCRIPT NAME "Default" TAG "DFLT"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
'END_LANGSYS\n'
'END_SCRIPT\n'
'DEF_SCRIPT TAG "DFLT"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
def test_langsys_duplicate_lang(self):
with self.assertRaisesRegex(
VoltLibError,
'Language "dflt" already defined in script "DFLT", '
'language tags are case insensitive'):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Default" TAG "DFLT"\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
'END_LANGSYS\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
def test_langsys_lang_in_separate_scripts(self):
[langsys1, langsys2] = self.parse(
'DEF_SCRIPT NAME "Default" TAG "DFLT"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
'END_LANGSYS\n'
'DEF_LANGSYS NAME "Default" TAG "ROM "\n\n'
'END_LANGSYS\n'
'END_SCRIPT\n'
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Default" TAG "dflt"\n\n'
'END_LANGSYS\n'
'DEF_LANGSYS NAME "Default" TAG "ROM "\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
self.assertEqual((langsys1.langs[0].tag, langsys1.langs[1].tag),
("dflt", "ROM "))
self.assertEqual((langsys2.langs[0].tag, langsys2.langs[1].tag),
("dflt", "ROM "))
def test_langsys_no_lang_name(self):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS TAG "dflt"\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
self.assertEqual((langsys.name, langsys.tag),
("Latin",
"latn"))
lang = langsys.langs[0]
self.assertEqual((lang.name, lang.tag),
(None,
"dflt"))
def test_langsys_no_langsys_tag_fails(self):
with self.assertRaisesRegex(
VoltLibError,
r'.*Expected "TAG"'):
[langsys] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Default"\n\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
def test_feature(self):
[def_script] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n\n'
'DEF_FEATURE NAME "Fractions" TAG "frac"\n'
' LOOKUP "fraclookup"\n'
'END_FEATURE\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
def_feature = def_script.langs[0].features[0]
self.assertEqual((def_feature.name, def_feature.tag,
def_feature.lookups),
("Fractions",
"frac",
["fraclookup"]))
[def_script] = self.parse(
'DEF_SCRIPT NAME "Latin" TAG "latn"\n\n'
'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n\n'
'DEF_FEATURE NAME "Kerning" TAG "kern"\n'
' LOOKUP "kern1" LOOKUP "kern2"\n'
'END_FEATURE\n'
'END_LANGSYS\n'
'END_SCRIPT'
).statements
def_feature = def_script.langs[0].features[0]
self.assertEqual((def_feature.name, def_feature.tag,
def_feature.lookups),
("Kerning",
"kern",
["kern1", "kern2"]))
def test_lookup_duplicate(self):
with self.assertRaisesRegex(
VoltLibError,
'Lookup "dupe" already defined, '
'lookup names are case insensitive',
):
[lookup1, lookup2] = self.parse(
'DEF_LOOKUP "dupe"\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION\n'
'DEF_LOOKUP "dupe"\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "b"\n'
'WITH GLYPH "b.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION\n'
).statements
def test_lookup_duplicate_insensitive_case(self):
with self.assertRaisesRegex(
VoltLibError,
'Lookup "Dupe" already defined, '
'lookup names are case insensitive',
):
[lookup1, lookup2] = self.parse(
'DEF_LOOKUP "dupe"\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION\n'
'DEF_LOOKUP "Dupe"\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "b"\n'
'WITH GLYPH "b.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION\n'
).statements
def test_lookup_name_starts_with_letter(self):
with self.assertRaisesRegex(
VoltLibError,
r'Lookup name "\\lookupname" must start with a letter'
):
[lookup] = self.parse(
'DEF_LOOKUP "\\lookupname"\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION\n'
).statements
def test_lookup_comments(self):
[lookup] = self.parse(
'DEF_LOOKUP "test" PROCESS_BASE PROCESS_MARKS ALL DIRECTION LTR\n'
'COMMENTS "Hello\\nWorld"\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "a"\n'
'WITH GLYPH "b"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(lookup.name, "test")
self.assertEqual(lookup.comments, "Hello\nWorld")
def test_substitution_empty(self):
with self.assertRaisesRegex(
VoltLibError,
r'Expected SUB'):
[lookup] = self.parse(
'DEF_LOOKUP "empty_substitution" PROCESS_BASE PROCESS_MARKS '
'ALL DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'END_SUBSTITUTION'
).statements
def test_substitution_invalid_many_to_many(self):
with self.assertRaisesRegex(
VoltLibError,
r'Invalid substitution type'):
[lookup] = self.parse(
'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS '
'ALL DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "f" GLYPH "i"\n'
'WITH GLYPH "f.alt" GLYPH "i.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
def test_substitution_invalid_reverse_chaining_single(self):
with self.assertRaisesRegex(
VoltLibError,
r'Invalid substitution type'):
[lookup] = self.parse(
'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS '
'ALL DIRECTION LTR REVERSAL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "f" GLYPH "i"\n'
'WITH GLYPH "f_i"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
def test_substitution_invalid_mixed(self):
with self.assertRaisesRegex(
VoltLibError,
r'Invalid substitution type'):
[lookup] = self.parse(
'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS '
'ALL DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "fi"\n'
'WITH GLYPH "f" GLYPH "i"\n'
'END_SUB\n'
'SUB GLYPH "f" GLYPH "l"\n'
'WITH GLYPH "f_l"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
def test_substitution_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "smcp" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "a"\n'
'WITH GLYPH "a.sc"\n'
'END_SUB\n'
'SUB GLYPH "b"\n'
'WITH GLYPH "b.sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(lookup.name, "smcp")
self.assertSubEqual(lookup.sub, [["a"], ["b"]], [["a.sc"], ["b.sc"]])
def test_substitution_single_in_context(self):
[group, lookup] = self.parse(
'DEF_GROUP "Denominators"\n'
' ENUM GLYPH "one.dnom" GLYPH "two.dnom" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "fracdnom" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
' LEFT ENUM GROUP "Denominators" GLYPH "fraction" END_ENUM\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "one"\n'
'WITH GLYPH "one.dnom"\n'
'END_SUB\n'
'SUB GLYPH "two"\n'
'WITH GLYPH "two.dnom"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
context = lookup.context[0]
self.assertEqual(lookup.name, "fracdnom")
self.assertEqual(context.ex_or_in, "IN_CONTEXT")
self.assertEqual(len(context.left), 1)
self.assertEqual(len(context.left[0]), 1)
self.assertEqual(len(context.left[0][0].enum), 2)
self.assertEqual(context.left[0][0].enum[0].group, "Denominators")
self.assertEqual(context.left[0][0].enum[1].glyph, "fraction")
self.assertEqual(context.right, [])
self.assertSubEqual(lookup.sub, [["one"], ["two"]],
[["one.dnom"], ["two.dnom"]])
def test_substitution_single_in_contexts(self):
[group, lookup] = self.parse(
'DEF_GROUP "Hebrew"\n'
' ENUM GLYPH "uni05D0" GLYPH "uni05D1" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "HebrewCurrency" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
' RIGHT GROUP "Hebrew"\n'
' RIGHT GLYPH "one.Hebr"\n'
'END_CONTEXT\n'
'IN_CONTEXT\n'
' LEFT GROUP "Hebrew"\n'
' LEFT GLYPH "one.Hebr"\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "dollar"\n'
'WITH GLYPH "dollar.Hebr"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
context1 = lookup.context[0]
context2 = lookup.context[1]
self.assertEqual(lookup.name, "HebrewCurrency")
self.assertEqual(context1.ex_or_in, "IN_CONTEXT")
self.assertEqual(context1.left, [])
self.assertEqual(len(context1.right), 2)
self.assertEqual(len(context1.right[0]), 1)
self.assertEqual(len(context1.right[1]), 1)
self.assertEqual(context1.right[0][0].group, "Hebrew")
self.assertEqual(context1.right[1][0].glyph, "one.Hebr")
self.assertEqual(context2.ex_or_in, "IN_CONTEXT")
self.assertEqual(len(context2.left), 2)
self.assertEqual(len(context2.left[0]), 1)
self.assertEqual(len(context2.left[1]), 1)
self.assertEqual(context2.left[0][0].group, "Hebrew")
self.assertEqual(context2.left[1][0].glyph, "one.Hebr")
self.assertEqual(context2.right, [])
def test_substitution_skip_base(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" SKIP_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_base),
("SomeSub", False))
def test_substitution_process_base(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_base),
("SomeSub", True))
def test_substitution_process_marks(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS "SomeMarks"\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_marks),
("SomeSub", 'SomeMarks'))
def test_substitution_process_marks_all(self):
[lookup] = self.parse(
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_marks),
("SomeSub", True))
def test_substitution_process_marks_none(self):
[lookup] = self.parse_(
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS "NONE"\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_marks),
("SomeSub", False))
def test_substitution_process_marks_bad(self):
with self.assertRaisesRegex(
VoltLibError,
'Expected ALL, NONE, MARK_GLYPH_SET or an ID'):
self.parse(
'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" '
'END_ENUM END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS SomeMarks '
'AS_SUBSTITUTION\n'
'SUB GLYPH "A" WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
)
def test_substitution_skip_marks(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "marka" GLYPH "markb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE SKIP_MARKS DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_marks),
("SomeSub", False))
def test_substitution_mark_attachment(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "acutecmb" GLYPH "gravecmb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE '
'PROCESS_MARKS "SomeMarks" DIRECTION RTL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_marks),
("SomeSub", "SomeMarks"))
def test_substitution_mark_glyph_set(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "acutecmb" GLYPH "gravecmb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE '
'PROCESS_MARKS MARK_GLYPH_SET "SomeMarks" DIRECTION RTL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.mark_glyph_set),
("SomeSub", "SomeMarks"))
def test_substitution_process_all_marks(self):
[group, lookup] = self.parse(
'DEF_GROUP "SomeMarks"\n'
' ENUM GLYPH "acutecmb" GLYPH "gravecmb" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION RTL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "A"\n'
'WITH GLYPH "A.c2sc"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.process_marks),
("SomeSub", True))
def test_substitution_no_reversal(self):
# TODO: check right context with no reversal
[lookup] = self.parse(
'DEF_LOOKUP "Lookup" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
' RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "a"\n'
'WITH GLYPH "a.alt"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(
(lookup.name, lookup.reversal),
("Lookup", None)
)
def test_substitution_reversal(self):
lookup = self.parse(
'DEF_GROUP "DFLT_Num_standardFigures"\n'
' ENUM GLYPH "zero" GLYPH "one" GLYPH "two" END_ENUM\n'
'END_GROUP\n'
'DEF_GROUP "DFLT_Num_numerators"\n'
' ENUM GLYPH "zero.numr" GLYPH "one.numr" GLYPH "two.numr" END_ENUM\n'
'END_GROUP\n'
'DEF_LOOKUP "RevLookup" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR REVERSAL\n'
'IN_CONTEXT\n'
' RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GROUP "DFLT_Num_standardFigures"\n'
'WITH GROUP "DFLT_Num_numerators"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements[-1]
self.assertEqual(
(lookup.name, lookup.reversal),
("RevLookup", True)
)
def test_substitution_single_to_multiple(self):
[lookup] = self.parse(
'DEF_LOOKUP "ccmp" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "aacute"\n'
'WITH GLYPH "a" GLYPH "acutecomb"\n'
'END_SUB\n'
'SUB GLYPH "agrave"\n'
'WITH GLYPH "a" GLYPH "gravecomb"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(lookup.name, "ccmp")
self.assertSubEqual(lookup.sub, [["aacute"], ["agrave"]],
[["a", "acutecomb"], ["a", "gravecomb"]])
def test_substitution_multiple_to_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "liga" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB GLYPH "f" GLYPH "i"\n'
'WITH GLYPH "f_i"\n'
'END_SUB\n'
'SUB GLYPH "f" GLYPH "t"\n'
'WITH GLYPH "f_t"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
self.assertEqual(lookup.name, "liga")
self.assertSubEqual(lookup.sub, [["f", "i"], ["f", "t"]],
[["f_i"], ["f_t"]])
def test_substitution_reverse_chaining_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "numr" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR REVERSAL\n'
'IN_CONTEXT\n'
' RIGHT ENUM '
'GLYPH "fraction" '
'RANGE "zero.numr" TO "nine.numr" '
'END_ENUM\n'
'END_CONTEXT\n'
'AS_SUBSTITUTION\n'
'SUB RANGE "zero" TO "nine"\n'
'WITH RANGE "zero.numr" TO "nine.numr"\n'
'END_SUB\n'
'END_SUBSTITUTION'
).statements
mapping = lookup.sub.mapping
glyphs = [[(r.start, r.end) for r in v] for v in mapping.keys()]
replacement = [[(r.start, r.end) for r in v] for v in mapping.values()]
self.assertEqual(lookup.name, "numr")
self.assertEqual(glyphs, [[('zero', 'nine')]])
self.assertEqual(replacement, [[('zero.numr', 'nine.numr')]])
self.assertEqual(len(lookup.context[0].right), 1)
self.assertEqual(len(lookup.context[0].right[0]), 1)
enum = lookup.context[0].right[0][0]
self.assertEqual(len(enum.enum), 2)
self.assertEqual(enum.enum[0].glyph, "fraction")
self.assertEqual((enum.enum[1].start, enum.enum[1].end),
('zero.numr', 'nine.numr'))
# GPOS
# ATTACH_CURSIVE
# ATTACH
# ADJUST_PAIR
# ADJUST_SINGLE
def test_position_empty(self):
with self.assertRaisesRegex(
VoltLibError,
'Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE'):
[lookup] = self.parse(
'DEF_LOOKUP "empty_position" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'EXCEPT_CONTEXT\n'
' LEFT GLYPH "glyph"\n'
'END_CONTEXT\n'
'AS_POSITION\n'
'END_POSITION'
).statements
def test_position_attach(self):
[lookup, anchor1, anchor2, anchor3, anchor4] = self.parse(
'DEF_LOOKUP "anchor_top" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION RTL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_POSITION\n'
'ATTACH GLYPH "a" GLYPH "e"\n'
'TO GLYPH "acutecomb" AT ANCHOR "top" '
'GLYPH "gravecomb" AT ANCHOR "top"\n'
'END_ATTACH\n'
'END_POSITION\n'
'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb COMPONENT 1 '
'AT POS DX 0 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "MARK_top" ON 121 GLYPH gravecomb COMPONENT 1 '
'AT POS DX 0 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "top" ON 31 GLYPH a COMPONENT 1 '
'AT POS DX 210 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "top" ON 35 GLYPH e COMPONENT 1 '
'AT POS DX 215 DY 450 END_POS END_ANCHOR'
).statements
pos = lookup.pos
coverage = [g.glyph for g in pos.coverage]
coverage_to = [[[g.glyph for g in e], a] for (e, a) in pos.coverage_to]
self.assertEqual(
(lookup.name, coverage, coverage_to),
("anchor_top", ["a", "e"],
[[["acutecomb"], "top"], [["gravecomb"], "top"]])
)
self.assertEqual(
(anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component,
anchor1.locked, anchor1.pos),
("MARK_top", 120, "acutecomb", 1, False, (None, 0, 450, {}, {},
{}))
)
self.assertEqual(
(anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component,
anchor2.locked, anchor2.pos),
("MARK_top", 121, "gravecomb", 1, False, (None, 0, 450, {}, {},
{}))
)
self.assertEqual(
(anchor3.name, anchor3.gid, anchor3.glyph_name, anchor3.component,
anchor3.locked, anchor3.pos),
("top", 31, "a", 1, False, (None, 210, 450, {}, {}, {}))
)
self.assertEqual(
(anchor4.name, anchor4.gid, anchor4.glyph_name, anchor4.component,
anchor4.locked, anchor4.pos),
("top", 35, "e", 1, False, (None, 215, 450, {}, {}, {}))
)
def test_position_attach_cursive(self):
[lookup] = self.parse(
'DEF_LOOKUP "SomeLookup" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION RTL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_POSITION\n'
'ATTACH_CURSIVE\nEXIT GLYPH "a" GLYPH "b"\nENTER GLYPH "c"\n'
'END_ATTACH\n'
'END_POSITION'
).statements
exit = [[g.glyph for g in v] for v in lookup.pos.coverages_exit]
enter = [[g.glyph for g in v] for v in lookup.pos.coverages_enter]
self.assertEqual(
(lookup.name, exit, enter),
("SomeLookup", [["a", "b"]], [["c"]])
)
def test_position_adjust_pair(self):
[lookup] = self.parse(
'DEF_LOOKUP "kern1" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION RTL\n'
'IN_CONTEXT\n'
'END_CONTEXT\n'
'AS_POSITION\n'
'ADJUST_PAIR\n'
' FIRST GLYPH "A"\n'
' SECOND GLYPH "V"\n'
' 1 2 BY POS ADV -30 END_POS POS END_POS\n'
' 2 1 BY POS ADV -30 END_POS POS END_POS\n\n'
'END_ADJUST\n'
'END_POSITION'
).statements
coverages_1 = [[g.glyph for g in v] for v in lookup.pos.coverages_1]
coverages_2 = [[g.glyph for g in v] for v in lookup.pos.coverages_2]
self.assertEqual(
(lookup.name, coverages_1, coverages_2,
lookup.pos.adjust_pair),
("kern1", [["A"]], [["V"]],
{(1, 2): ((-30, None, None, {}, {}, {}),
(None, None, None, {}, {}, {})),
(2, 1): ((-30, None, None, {}, {}, {}),
(None, None, None, {}, {}, {}))})
)
def test_position_adjust_single(self):
[lookup] = self.parse(
'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL '
'DIRECTION LTR\n'
'IN_CONTEXT\n'
# ' LEFT GLYPH "leftGlyph"\n'
# ' RIGHT GLYPH "rightGlyph"\n'
'END_CONTEXT\n'
'AS_POSITION\n'
'ADJUST_SINGLE'
' GLYPH "glyph1" BY POS ADV 0 DX 123 END_POS'
' GLYPH "glyph2" BY POS ADV 0 DX 456 END_POS\n'
'END_ADJUST\n'
'END_POSITION'
).statements
pos = lookup.pos
adjust = [[[g.glyph for g in a], b] for (a, b) in pos.adjust_single]
self.assertEqual(
(lookup.name, adjust),
("TestLookup",
[[["glyph1"], (0, 123, None, {}, {}, {})],
[["glyph2"], (0, 456, None, {}, {}, {})]])
)
def test_def_anchor(self):
[anchor1, anchor2, anchor3] = self.parse(
'DEF_ANCHOR "top" ON 120 GLYPH a '
'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb '
'COMPONENT 1 AT POS DX 0 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "bottom" ON 120 GLYPH a '
'COMPONENT 1 AT POS DX 250 DY 0 END_POS END_ANCHOR'
).statements
self.assertEqual(
(anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component,
anchor1.locked, anchor1.pos),
("top", 120, "a", 1,
False, (None, 250, 450, {}, {}, {}))
)
self.assertEqual(
(anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component,
anchor2.locked, anchor2.pos),
("MARK_top", 120, "acutecomb", 1,
False, (None, 0, 450, {}, {}, {}))
)
self.assertEqual(
(anchor3.name, anchor3.gid, anchor3.glyph_name, anchor3.component,
anchor3.locked, anchor3.pos),
("bottom", 120, "a", 1,
False, (None, 250, 0, {}, {}, {}))
)
def test_def_anchor_multi_component(self):
[anchor1, anchor2] = self.parse(
'DEF_ANCHOR "top" ON 120 GLYPH a '
'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "top" ON 120 GLYPH a '
'COMPONENT 2 AT POS DX 250 DY 450 END_POS END_ANCHOR'
).statements
self.assertEqual(
(anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component),
("top", 120, "a", 1)
)
self.assertEqual(
(anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component),
("top", 120, "a", 2)
)
def test_def_anchor_duplicate(self):
self.assertRaisesRegex(
VoltLibError,
'Anchor "dupe" already defined, '
'anchor names are case insensitive',
self.parse,
'DEF_ANCHOR "dupe" ON 120 GLYPH a '
'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n'
'DEF_ANCHOR "dupe" ON 120 GLYPH a '
'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR'
)
def test_def_anchor_locked(self):
[anchor] = self.parse(
'DEF_ANCHOR "top" ON 120 GLYPH a '
'COMPONENT 1 LOCKED AT POS DX 250 DY 450 END_POS END_ANCHOR'
).statements
self.assertEqual(
(anchor.name, anchor.gid, anchor.glyph_name, anchor.component,
anchor.locked, anchor.pos),
("top", 120, "a", 1,
True, (None, 250, 450, {}, {}, {}))
)
def test_anchor_adjust_device(self):
[anchor] = self.parse(
'DEF_ANCHOR "MARK_top" ON 123 GLYPH diacglyph '
'COMPONENT 1 AT POS DX 0 DY 456 ADJUST_BY 12 AT 34 '
'ADJUST_BY 56 AT 78 END_POS END_ANCHOR'
).statements
self.assertEqual(
(anchor.name, anchor.pos),
("MARK_top", (None, 0, 456, {}, {}, {34: 12, 78: 56}))
)
def test_ppem(self):
[grid_ppem, pres_ppem, ppos_ppem] = self.parse(
'GRID_PPEM 20\n'
'PRESENTATION_PPEM 72\n'
'PPOSITIONING_PPEM 144'
).statements
self.assertEqual(
((grid_ppem.name, grid_ppem.value),
(pres_ppem.name, pres_ppem.value),
(ppos_ppem.name, ppos_ppem.value)),
(("GRID_PPEM", 20), ("PRESENTATION_PPEM", 72),
("PPOSITIONING_PPEM", 144))
)
def test_compiler_flags(self):
[setting1, setting2] = self.parse(
'COMPILER_USEEXTENSIONLOOKUPS\n'
'COMPILER_USEPAIRPOSFORMAT2'
).statements
self.assertEqual(
((setting1.name, setting1.value),
(setting2.name, setting2.value)),
(("COMPILER_USEEXTENSIONLOOKUPS", True),
("COMPILER_USEPAIRPOSFORMAT2", True))
)
def test_cmap(self):
[cmap_format1, cmap_format2, cmap_format3] = self.parse(
'CMAP_FORMAT 0 3 4\n'
'CMAP_FORMAT 1 0 6\n'
'CMAP_FORMAT 3 1 4'
).statements
self.assertEqual(
((cmap_format1.name, cmap_format1.value),
(cmap_format2.name, cmap_format2.value),
(cmap_format3.name, cmap_format3.value)),
(("CMAP_FORMAT", (0, 3, 4)),
("CMAP_FORMAT", (1, 0, 6)),
("CMAP_FORMAT", (3, 1, 4)))
)
def test_do_not_touch_cmap(self):
[option1, option2, option3, option4] = self.parse(
'DO_NOT_TOUCH_CMAP\n'
'CMAP_FORMAT 0 3 4\n'
'CMAP_FORMAT 1 0 6\n'
'CMAP_FORMAT 3 1 4'
).statements
self.assertEqual(
((option1.name, option1.value),
(option2.name, option2.value),
(option3.name, option3.value),
(option4.name, option4.value)),
(("DO_NOT_TOUCH_CMAP", True),
("CMAP_FORMAT", (0, 3, 4)),
("CMAP_FORMAT", (1, 0, 6)),
("CMAP_FORMAT", (3, 1, 4)))
)
def test_stop_at_end(self):
doc = self.parse_(
'DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH END\0\0\0\0'
)
[def_glyph] = doc.statements
self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode,
def_glyph.type, def_glyph.components),
(".notdef", 0, None, "BASE", None))
self.assertEqual(str(doc),
'\nDEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH END\n')
def parse_(self, text):
return Parser(StringIO(text)).parse()
def parse(self, text):
doc = self.parse_(text)
self.assertEqual('\n'.join(str(s) for s in doc.statements), text)
return Parser(StringIO(text)).parse()
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| mit | 21cee13f47d76e90aad8a9d92e6f86d9 | 37.752227 | 82 | 0.504022 | 3.472573 | false | true | false | false |
fonttools/fonttools | Lib/fontTools/voltLib/lexer.py | 3 | 3417 | from fontTools.voltLib.error import VoltLibError
class Lexer(object):
NUMBER = "NUMBER"
STRING = "STRING"
NAME = "NAME"
NEWLINE = "NEWLINE"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_DIGIT_ = "0123456789"
CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz"
CHAR_UNDERSCORE_ = "_"
CHAR_PERIOD_ = "."
CHAR_NAME_START_ = CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + \
CHAR_UNDERSCORE_
CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type not in {Lexer.NEWLINE}:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return (self.filename_ or "<volt>", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += (2 if next_char == "\n" else 1)
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"\r\n')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
else:
raise VoltLibError("Expected '\"' to terminate string",
location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start:self.pos_]
return (Lexer.NAME, token, location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
raise VoltLibError("Unexpected character: '%s'" % cur_char,
location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
| mit | 74957969af72fd2220164c5d601108a2 | 33.515152 | 78 | 0.522973 | 3.654545 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/svgLib/path/shapes.py | 3 | 5331 | import re
def _prefer_non_zero(*args):
for arg in args:
if arg != 0:
return arg
return 0.
def _ntos(n):
# %f likes to add unnecessary 0's, %g isn't consistent about # decimals
return ('%.3f' % n).rstrip('0').rstrip('.')
def _strip_xml_ns(tag):
# ElementTree API doesn't provide a way to ignore XML namespaces in tags
# so we here strip them ourselves: cf. https://bugs.python.org/issue18304
return tag.split('}', 1)[1] if '}' in tag else tag
def _transform(raw_value):
# TODO assumes a 'matrix' transform.
# No other transform functions are supported at the moment.
# https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform
# start simple: if you aren't exactly matrix(...) then no love
match = re.match(r'matrix\((.*)\)', raw_value)
if not match:
raise NotImplementedError
matrix = tuple(float(p) for p in re.split(r'\s+|,', match.group(1)))
if len(matrix) != 6:
raise ValueError('wrong # of terms in %s' % raw_value)
return matrix
class PathBuilder(object):
def __init__(self):
self.paths = []
self.transforms = []
def _start_path(self, initial_path=''):
self.paths.append(initial_path)
self.transforms.append(None)
def _end_path(self):
self._add('z')
def _add(self, path_snippet):
path = self.paths[-1]
if path:
path += ' ' + path_snippet
else:
path = path_snippet
self.paths[-1] = path
def _move(self, c, x, y):
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
def M(self, x, y):
self._move('M', x, y)
def m(self, x, y):
self._move('m', x, y)
def _arc(self, c, rx, ry, x, y, large_arc):
self._add('%s%s,%s 0 %d 1 %s,%s' % (c, _ntos(rx), _ntos(ry), large_arc,
_ntos(x), _ntos(y)))
def A(self, rx, ry, x, y, large_arc=0):
self._arc('A', rx, ry, x, y, large_arc)
def a(self, rx, ry, x, y, large_arc=0):
self._arc('a', rx, ry, x, y, large_arc)
def _vhline(self, c, x):
self._add('%s%s' % (c, _ntos(x)))
def H(self, x):
self._vhline('H', x)
def h(self, x):
self._vhline('h', x)
def V(self, y):
self._vhline('V', y)
def v(self, y):
self._vhline('v', y)
def _line(self, c, x, y):
self._add('%s%s,%s' % (c, _ntos(x), _ntos(y)))
def L(self, x, y):
self._line('L', x, y)
def l(self, x, y):
self._line('l', x, y)
def _parse_line(self, line):
x1 = float(line.attrib.get('x1', 0))
y1 = float(line.attrib.get('y1', 0))
x2 = float(line.attrib.get('x2', 0))
y2 = float(line.attrib.get('y2', 0))
self._start_path()
self.M(x1, y1)
self.L(x2, y2)
def _parse_rect(self, rect):
x = float(rect.attrib.get('x', 0))
y = float(rect.attrib.get('y', 0))
w = float(rect.attrib.get('width'))
h = float(rect.attrib.get('height'))
rx = float(rect.attrib.get('rx', 0))
ry = float(rect.attrib.get('ry', 0))
rx = _prefer_non_zero(rx, ry)
ry = _prefer_non_zero(ry, rx)
# TODO there are more rules for adjusting rx, ry
self._start_path()
self.M(x + rx, y)
self.H(x + w - rx)
if rx > 0:
self.A(rx, ry, x + w, y + ry)
self.V(y + h - ry)
if rx > 0:
self.A(rx, ry, x + w - rx, y + h)
self.H(x + rx)
if rx > 0:
self.A(rx, ry, x, y + h - ry)
self.V(y + ry)
if rx > 0:
self.A(rx, ry, x + rx, y)
self._end_path()
def _parse_path(self, path):
if 'd' in path.attrib:
self._start_path(initial_path=path.attrib['d'])
def _parse_polygon(self, poly):
if 'points' in poly.attrib:
self._start_path('M' + poly.attrib['points'])
self._end_path()
def _parse_polyline(self, poly):
if 'points' in poly.attrib:
self._start_path('M' + poly.attrib['points'])
def _parse_circle(self, circle):
cx = float(circle.attrib.get('cx', 0))
cy = float(circle.attrib.get('cy', 0))
r = float(circle.attrib.get('r'))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
self.M(cx - r, cy)
self.A(r, r, cx + r, cy, large_arc=1)
self.A(r, r, cx - r, cy, large_arc=1)
def _parse_ellipse(self, ellipse):
cx = float(ellipse.attrib.get('cx', 0))
cy = float(ellipse.attrib.get('cy', 0))
rx = float(ellipse.attrib.get('rx'))
ry = float(ellipse.attrib.get('ry'))
# arc doesn't seem to like being a complete shape, draw two halves
self._start_path()
self.M(cx - rx, cy)
self.A(rx, ry, cx + rx, cy, large_arc=1)
self.A(rx, ry, cx - rx, cy, large_arc=1)
def add_path_from_element(self, el):
tag = _strip_xml_ns(el.tag)
parse_fn = getattr(self, '_parse_%s' % tag.lower(), None)
if not callable(parse_fn):
return False
parse_fn(el)
if 'transform' in el.attrib:
self.transforms[-1] = _transform(el.attrib['transform'])
return True
| mit | b8790f423299552750da8af1380d6777 | 28.453039 | 79 | 0.513975 | 3.015271 | false | false | false | false |
fonttools/fonttools | Tests/colorLib/unbuilder_test.py | 3 | 9001 | from fontTools.ttLib.tables import otTables as ot
from fontTools.colorLib.builder import buildColrV1
from fontTools.colorLib.unbuilder import unbuildColrV1
import pytest
TEST_COLOR_GLYPHS = {
"glyph00010": {
"Format": int(ot.PaintFormat.PaintColrLayers),
"Layers": [
{
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintSolid),
"PaletteIndex": 2,
"Alpha": 0.5,
},
"Glyph": "glyph00011",
},
{
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintVarLinearGradient),
"ColorLine": {
"Extend": "repeat",
"ColorStop": [
{
"StopOffset": 0.0,
"PaletteIndex": 3,
"Alpha": 1.0,
"VarIndexBase": 0,
},
{
"StopOffset": 0.5,
"PaletteIndex": 4,
"Alpha": 1.0,
"VarIndexBase": 1,
},
{
"StopOffset": 1.0,
"PaletteIndex": 5,
"Alpha": 1.0,
"VarIndexBase": 2,
},
],
},
"x0": 1,
"y0": 2,
"x1": -3,
"y1": -4,
"x2": 5,
"y2": 6,
"VarIndexBase": 0xFFFFFFFF,
},
"Glyph": "glyph00012",
},
{
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintVarTransform),
"Paint": {
"Format": int(ot.PaintFormat.PaintRadialGradient),
"ColorLine": {
"Extend": "pad",
"ColorStop": [
{
"StopOffset": 0,
"PaletteIndex": 6,
"Alpha": 1.0,
},
{
"StopOffset": 1.0,
"PaletteIndex": 7,
"Alpha": 0.4,
},
],
},
"x0": 7,
"y0": 8,
"r0": 9,
"x1": 10,
"y1": 11,
"r1": 12,
},
"Transform": {
"xx": -13.0,
"yx": 14.0,
"xy": 15.0,
"yy": -17.0,
"dx": 18.0,
"dy": 19.0,
"VarIndexBase": 3,
},
},
"Glyph": "glyph00013",
},
{
"Format": int(ot.PaintFormat.PaintVarTranslate),
"Paint": {
"Format": int(ot.PaintFormat.PaintRotate),
"Paint": {
"Format": int(ot.PaintFormat.PaintVarSkew),
"Paint": {
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintSolid),
"PaletteIndex": 2,
"Alpha": 0.5,
},
"Glyph": "glyph00011",
},
"xSkewAngle": -11.0,
"ySkewAngle": 5.0,
"VarIndexBase": 4,
},
"angle": 45.0,
},
"dx": 257.0,
"dy": 258.0,
"VarIndexBase": 5,
},
],
},
"glyph00014": {
"Format": int(ot.PaintFormat.PaintComposite),
"SourcePaint": {
"Format": int(ot.PaintFormat.PaintColrGlyph),
"Glyph": "glyph00010",
},
"CompositeMode": "src_over",
"BackdropPaint": {
"Format": int(ot.PaintFormat.PaintTransform),
"Paint": {
"Format": int(ot.PaintFormat.PaintColrGlyph),
"Glyph": "glyph00010",
},
"Transform": {
"xx": 1.0,
"yx": 0.0,
"xy": 0.0,
"yy": 1.0,
"dx": 300.0,
"dy": 0.0,
},
},
},
"glyph00015": {
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintSweepGradient),
"ColorLine": {
"Extend": "pad",
"ColorStop": [
{
"StopOffset": 0.0,
"PaletteIndex": 3,
"Alpha": 1.0,
},
{
"StopOffset": 1.0,
"PaletteIndex": 5,
"Alpha": 1.0,
},
],
},
"centerX": 259,
"centerY": 300,
"startAngle": 45.0,
"endAngle": 135.0,
},
"Glyph": "glyph00011",
},
"glyph00016": {
"Format": int(ot.PaintFormat.PaintColrLayers),
"Layers": [
{
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintVarSolid),
"PaletteIndex": 2,
"Alpha": 0.5,
"VarIndexBase": 6,
},
"Glyph": "glyph00011",
},
{
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintVarLinearGradient),
"ColorLine": {
"Extend": "repeat",
"ColorStop": [
{
"StopOffset": 0.0,
"PaletteIndex": 3,
"Alpha": 1.0,
"VarIndexBase": 7,
},
{
"StopOffset": 0.5,
"PaletteIndex": 4,
"Alpha": 1.0,
"VarIndexBase": 8,
},
{
"StopOffset": 1.0,
"PaletteIndex": 5,
"Alpha": 1.0,
"VarIndexBase": 9,
},
],
},
"x0": 1,
"y0": 2,
"x1": -3,
"y1": -4,
"x2": 5,
"y2": 6,
"VarIndexBase": 0xFFFFFFFF,
},
"Glyph": "glyph00012",
},
],
},
# When PaintColrLayers contains more than 255 layers, we build a tree
# of nested PaintColrLayers of max 255 items (NumLayers field is a uint8).
# Below we test that unbuildColrV1 restores a flat list of layers without
# nested PaintColrLayers.
"glyph00017": {
"Format": int(ot.PaintFormat.PaintColrLayers),
"Layers": [
{
"Format": int(ot.PaintFormat.PaintGlyph),
"Paint": {
"Format": int(ot.PaintFormat.PaintSolid),
"PaletteIndex": i,
"Alpha": 1.0,
},
"Glyph": "glyph{str(18 + i).zfill(5)}",
}
for i in range(256)
],
},
}
def test_unbuildColrV1():
layers, baseGlyphs = buildColrV1(TEST_COLOR_GLYPHS)
colorGlyphs = unbuildColrV1(layers, baseGlyphs)
assert colorGlyphs == TEST_COLOR_GLYPHS
def test_unbuildColrV1_noLayers():
_, baseGlyphsV1 = buildColrV1(TEST_COLOR_GLYPHS)
# Just looking to see we don't crash
unbuildColrV1(None, baseGlyphsV1)
| mit | 92606569015b6bb94ac05ad3b11ed79a | 34.160156 | 78 | 0.313965 | 4.899837 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/merge/tables.py | 3 | 8568 | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib, cffLib
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.merge.base import add_method, mergeObjects
from fontTools.merge.cmap import computeMegaCmap
from fontTools.merge.util import *
import logging
log = logging.getLogger("fontTools.merge")
ttLib.getTableClass('maxp').mergeMap = {
'*': max,
'tableTag': equal,
'tableVersion': equal,
'numGlyphs': sum,
'maxStorage': first,
'maxFunctionDefs': first,
'maxInstructionDefs': first,
# TODO When we correctly merge hinting data, update these values:
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
}
headFlagsMergeBitMap = {
'size': 16,
'*': bitwise_or,
1: bitwise_and, # Baseline at y = 0
2: bitwise_and, # lsb at x = 0
3: bitwise_and, # Force ppem to integer values. FIXME?
5: bitwise_and, # Font is vertical
6: lambda bit: 0, # Always set to zero
11: bitwise_and, # Font data is 'lossless'
13: bitwise_and, # Optimized for ClearType
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
15: lambda bit: 0, # Always set to zero
}
ttLib.getTableClass('head').mergeMap = {
'tableTag': equal,
'tableVersion': max,
'fontRevision': max,
'checkSumAdjustment': lambda lst: 0, # We need *something* here
'magicNumber': equal,
'flags': mergeBits(headFlagsMergeBitMap),
'unitsPerEm': equal,
'created': current_time,
'modified': current_time,
'xMin': min,
'yMin': min,
'xMax': max,
'yMax': max,
'macStyle': first,
'lowestRecPPEM': max,
'fontDirectionHint': lambda lst: 2,
'indexToLocFormat': first,
'glyphDataFormat': equal,
}
ttLib.getTableClass('hhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceWidthMax': max,
'minLeftSideBearing': min,
'minRightSideBearing': min,
'xMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfHMetrics': recalculate,
}
ttLib.getTableClass('vhea').mergeMap = {
'*': equal,
'tableTag': equal,
'tableVersion': max,
'ascent': max,
'descent': min,
'lineGap': max,
'advanceHeightMax': max,
'minTopSideBearing': min,
'minBottomSideBearing': min,
'yMaxExtent': max,
'caretSlopeRise': first,
'caretSlopeRun': first,
'caretOffset': first,
'numberOfVMetrics': recalculate,
}
os2FsTypeMergeBitMap = {
'size': 16,
'*': lambda bit: 0,
1: bitwise_or, # no embedding permitted
2: bitwise_and, # allow previewing and printing documents
3: bitwise_and, # allow editing documents
8: bitwise_or, # no subsetting permitted
9: bitwise_or, # no embedding of outlines permitted
}
def mergeOs2FsType(lst):
lst = list(lst)
if all(item == 0 for item in lst):
return 0
# Compute least restrictive logic for each fsType value
for i in range(len(lst)):
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
if lst[i] & 0x000C:
lst[i] &= ~0x0002
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
elif lst[i] & 0x0008:
lst[i] |= 0x0004
# set bits 2 and 3 if everything is allowed
elif lst[i] == 0:
lst[i] = 0x000C
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
if fsType & 0x0002:
fsType &= ~0x000C
return fsType
ttLib.getTableClass('OS/2').mergeMap = {
'*': first,
'tableTag': equal,
'version': max,
'xAvgCharWidth': first, # Will be recalculated at the end on the merged font
'fsType': mergeOs2FsType, # Will be overwritten
'panose': first, # FIXME: should really be the first Latin font
'ulUnicodeRange1': bitwise_or,
'ulUnicodeRange2': bitwise_or,
'ulUnicodeRange3': bitwise_or,
'ulUnicodeRange4': bitwise_or,
'fsFirstCharIndex': min,
'fsLastCharIndex': max,
'sTypoAscender': max,
'sTypoDescender': min,
'sTypoLineGap': max,
'usWinAscent': max,
'usWinDescent': max,
# Version 1
'ulCodePageRange1': onlyExisting(bitwise_or),
'ulCodePageRange2': onlyExisting(bitwise_or),
# Version 2, 3, 4
'sxHeight': onlyExisting(max),
'sCapHeight': onlyExisting(max),
'usDefaultChar': onlyExisting(first),
'usBreakChar': onlyExisting(first),
'usMaxContext': onlyExisting(max),
# version 5
'usLowerOpticalPointSize': onlyExisting(min),
'usUpperOpticalPointSize': onlyExisting(max),
}
@add_method(ttLib.getTableClass('OS/2'))
def merge(self, m, tables):
DefaultTable.merge(self, m, tables)
if self.version < 2:
# bits 8 and 9 are reserved and should be set to zero
self.fsType &= ~0x0300
if self.version >= 3:
# Only one of bits 1, 2, and 3 may be set. We already take
# care of bit 1 implications in mergeOs2FsType. So unset
# bit 2 if bit 3 is already set.
if self.fsType & 0x0008:
self.fsType &= ~0x0004
return self
ttLib.getTableClass('post').mergeMap = {
'*': first,
'tableTag': equal,
'formatType': max,
'isFixedPitch': min,
'minMemType42': max,
'maxMemType42': lambda lst: 0,
'minMemType1': max,
'maxMemType1': lambda lst: 0,
'mapping': onlyExisting(sumDicts),
'extraNames': lambda lst: [],
}
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
'tableTag': equal,
'metrics': sumDicts,
}
ttLib.getTableClass('name').mergeMap = {
'tableTag': equal,
'names': first, # FIXME? Does mixing name records make sense?
}
ttLib.getTableClass('loca').mergeMap = {
'*': recalculate,
'tableTag': equal,
}
ttLib.getTableClass('glyf').mergeMap = {
'tableTag': equal,
'glyphs': sumDicts,
'glyphOrder': sumLists,
}
@add_method(ttLib.getTableClass('glyf'))
def merge(self, m, tables):
for i,table in enumerate(tables):
for g in table.glyphs.values():
if i:
# Drop hints for all but first font, since
# we don't map functions / CVT values.
g.removeHinting()
# Expand composite glyphs to load their
# composite glyph names.
if g.isComposite():
g.expand(table)
return DefaultTable.merge(self, m, tables)
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
@add_method(ttLib.getTableClass('CFF '))
def merge(self, m, tables):
if any(hasattr(table, "FDSelect") for table in tables):
raise NotImplementedError(
"Merging CID-keyed CFF tables is not supported yet"
)
for table in tables:
table.cff.desubroutinize()
newcff = tables[0]
newfont = newcff.cff[0]
private = newfont.Private
storedNamesStrings = []
glyphOrderStrings = []
glyphOrder = set(newfont.getGlyphOrder())
for name in newfont.strings.strings:
if name not in glyphOrder:
storedNamesStrings.append(name)
else:
glyphOrderStrings.append(name)
chrset = list(newfont.charset)
newcs = newfont.CharStrings
log.debug("FONT 0 CharStrings: %d.", len(newcs))
for i, table in enumerate(tables[1:], start=1):
font = table.cff[0]
font.Private = private
fontGlyphOrder = set(font.getGlyphOrder())
for name in font.strings.strings:
if name in fontGlyphOrder:
glyphOrderStrings.append(name)
cs = font.CharStrings
gs = table.cff.GlobalSubrs
log.debug("Font %d CharStrings: %d.", i, len(cs))
chrset.extend(font.charset)
if newcs.charStringsAreIndexed:
for i, name in enumerate(cs.charStrings, start=len(newcs)):
newcs.charStrings[name] = i
newcs.charStringsIndex.items.append(None)
for name in cs.charStrings:
newcs[name] = cs[name]
newfont.charset = chrset
newfont.numGlyphs = len(chrset)
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
return newcff
@add_method(ttLib.getTableClass('cmap'))
def merge(self, m, tables):
# TODO Handle format=14.
if not hasattr(m, 'cmap'):
computeMegaCmap(m, tables)
cmap = m.cmap
cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
self.tables = []
module = ttLib.getTableModule('cmap')
if len(cmapBmpOnly) != len(cmap):
# format-12 required.
cmapTable = module.cmap_classes[12](12)
cmapTable.platformID = 3
cmapTable.platEncID = 10
cmapTable.language = 0
cmapTable.cmap = cmap
self.tables.append(cmapTable)
# always create format-4
cmapTable = module.cmap_classes[4](4)
cmapTable.platformID = 3
cmapTable.platEncID = 1
cmapTable.language = 0
cmapTable.cmap = cmapBmpOnly
# ordered by platform then encoding
self.tables.insert(0, cmapTable)
self.tableVersion = 0
self.numSubTables = len(self.tables)
return self
| mit | 659929c5d52e7ca28ab7213b56e47440 | 26.549839 | 99 | 0.709267 | 2.804583 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/tables/sbixStrike.py | 3 | 5381 | from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from .sbixGlyph import Glyph
import struct
sbixStrikeHeaderFormat = """
>
ppem: H # The PPEM for which this strike was designed (e.g., 9,
# 12, 24)
resolution: H # The screen resolution (in dpi) for which this strike
# was designed (e.g., 72)
"""
sbixGlyphDataOffsetFormat = """
>
glyphDataOffset: L # Offset from the beginning of the strike data record
# to data for the individual glyph
"""
sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat)
sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat)
class Strike(object):
def __init__(self, rawdata=None, ppem=0, resolution=72):
self.data = rawdata
self.ppem = ppem
self.resolution = resolution
self.glyphs = {}
def decompile(self, ttFont):
if self.data is None:
from fontTools import ttLib
raise ttLib.TTLibError
if len(self.data) < sbixStrikeHeaderFormatSize:
from fontTools import ttLib
raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \
% (sbixStrikeHeaderFormatSize, len(self.data))
# read Strike header from raw data
sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self)
# calculate number of glyphs
firstGlyphDataOffset, = struct.unpack(">L", \
self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize])
self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1
# ^ -1 because there's one more offset than glyphs
# build offset list for single glyph data offsets
self.glyphDataOffsets = []
for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs
start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize])
self.glyphDataOffsets.append(current_offset)
# iterate through offset list and slice raw data into glyph data records
for i in range(self.numGlyphs):
current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i)
current_glyph.decompile(ttFont)
self.glyphs[current_glyph.glyphName] = current_glyph
del self.glyphDataOffsets
del self.numGlyphs
del self.data
def compile(self, ttFont):
self.glyphDataOffsets = b""
self.bitmapData = b""
glyphOrder = ttFont.getGlyphOrder()
# first glyph starts right after the header
currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
for glyphName in glyphOrder:
if glyphName in self.glyphs:
# we have glyph data for this glyph
current_glyph = self.glyphs[glyphName]
else:
# must add empty glyph data record for this glyph
current_glyph = Glyph(glyphName=glyphName)
current_glyph.compile(ttFont)
current_glyph.glyphDataOffset = currentGlyphDataOffset
self.bitmapData += current_glyph.rawdata
currentGlyphDataOffset += len(current_glyph.rawdata)
self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph)
# add last "offset", really the end address of the last glyph data record
dummy = Glyph()
dummy.glyphDataOffset = currentGlyphDataOffset
self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
# pack header
self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
# add offsets and image data after header
self.data += self.glyphDataOffsets + self.bitmapData
def toXML(self, xmlWriter, ttFont):
xmlWriter.begintag("strike")
xmlWriter.newline()
xmlWriter.simpletag("ppem", value=self.ppem)
xmlWriter.newline()
xmlWriter.simpletag("resolution", value=self.resolution)
xmlWriter.newline()
glyphOrder = ttFont.getGlyphOrder()
for i in range(len(glyphOrder)):
if glyphOrder[i] in self.glyphs:
self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont)
# TODO: what if there are more glyph data records than (glyf table) glyphs?
xmlWriter.endtag("strike")
xmlWriter.newline()
def fromXML(self, name, attrs, content, ttFont):
if name in ["ppem", "resolution"]:
setattr(self, name, safeEval(attrs["value"]))
elif name == "glyph":
if "graphicType" in attrs:
myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
else:
myFormat = None
if "glyphname" in attrs:
myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
elif "name" in attrs:
myGlyphName = safeEval("'''" + attrs["name"] + "'''")
else:
from fontTools import ttLib
raise ttLib.TTLibError("Glyph must have a glyph name.")
if "originOffsetX" in attrs:
myOffsetX = safeEval(attrs["originOffsetX"])
else:
myOffsetX = 0
if "originOffsetY" in attrs:
myOffsetY = safeEval(attrs["originOffsetY"])
else:
myOffsetY = 0
current_glyph = Glyph(
glyphName=myGlyphName,
graphicType=myFormat,
originOffsetX=myOffsetX,
originOffsetY=myOffsetY,
)
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
current_glyph.fromXML(name, attrs, content, ttFont)
current_glyph.compile(ttFont)
self.glyphs[current_glyph.glyphName] = current_glyph
else:
from fontTools import ttLib
raise ttLib.TTLibError("can't handle '%s' element" % name)
| mit | 08305a2973000e500976a1b4de470acf | 35.358108 | 109 | 0.727746 | 3.235719 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ufoLib/validators.py | 3 | 26095 | """Various low level data validators."""
import calendar
from io import open
import fs.base
import fs.osfs
from collections.abc import Mapping
from fontTools.ufoLib.utils import numberTypes
# -------
# Generic
# -------
def isDictEnough(value):
"""
Some objects will likely come in that aren't
dicts but are dict-ish enough.
"""
if isinstance(value, Mapping):
return True
for attr in ("keys", "values", "items"):
if not hasattr(value, attr):
return False
return True
def genericTypeValidator(value, typ):
"""
Generic. (Added at version 2.)
"""
return isinstance(value, typ)
def genericIntListValidator(values, validValues):
"""
Generic. (Added at version 2.)
"""
if not isinstance(values, (list, tuple)):
return False
valuesSet = set(values)
validValuesSet = set(validValues)
if valuesSet - validValuesSet:
return False
for value in values:
if not isinstance(value, int):
return False
return True
def genericNonNegativeIntValidator(value):
"""
Generic. (Added at version 3.)
"""
if not isinstance(value, int):
return False
if value < 0:
return False
return True
def genericNonNegativeNumberValidator(value):
"""
Generic. (Added at version 3.)
"""
if not isinstance(value, numberTypes):
return False
if value < 0:
return False
return True
def genericDictValidator(value, prototype):
"""
Generic. (Added at version 3.)
"""
# not a dict
if not isinstance(value, Mapping):
return False
# missing required keys
for key, (typ, required) in prototype.items():
if not required:
continue
if key not in value:
return False
# unknown keys
for key in value.keys():
if key not in prototype:
return False
# incorrect types
for key, v in value.items():
prototypeType, required = prototype[key]
if v is None and not required:
continue
if not isinstance(v, prototypeType):
return False
return True
# --------------
# fontinfo.plist
# --------------
# Data Validators
def fontInfoStyleMapStyleNameValidator(value):
"""
Version 2+.
"""
options = ["regular", "italic", "bold", "bold italic"]
return value in options
def fontInfoOpenTypeGaspRangeRecordsValidator(value):
"""
Version 3+.
"""
if not isinstance(value, list):
return False
if len(value) == 0:
return True
validBehaviors = [0, 1, 2, 3]
dictPrototype = dict(rangeMaxPPEM=(int, True), rangeGaspBehavior=(list, True))
ppemOrder = []
for rangeRecord in value:
if not genericDictValidator(rangeRecord, dictPrototype):
return False
ppem = rangeRecord["rangeMaxPPEM"]
behavior = rangeRecord["rangeGaspBehavior"]
ppemValidity = genericNonNegativeIntValidator(ppem)
if not ppemValidity:
return False
behaviorValidity = genericIntListValidator(behavior, validBehaviors)
if not behaviorValidity:
return False
ppemOrder.append(ppem)
if ppemOrder != sorted(ppemOrder):
return False
return True
def fontInfoOpenTypeHeadCreatedValidator(value):
"""
Version 2+.
"""
# format: 0000/00/00 00:00:00
if not isinstance(value, str):
return False
# basic formatting
if not len(value) == 19:
return False
if value.count(" ") != 1:
return False
date, time = value.split(" ")
if date.count("/") != 2:
return False
if time.count(":") != 2:
return False
# date
year, month, day = date.split("/")
if len(year) != 4:
return False
if len(month) != 2:
return False
if len(day) != 2:
return False
try:
year = int(year)
month = int(month)
day = int(day)
except ValueError:
return False
if month < 1 or month > 12:
return False
monthMaxDay = calendar.monthrange(year, month)[1]
if day < 1 or day > monthMaxDay:
return False
# time
hour, minute, second = time.split(":")
if len(hour) != 2:
return False
if len(minute) != 2:
return False
if len(second) != 2:
return False
try:
hour = int(hour)
minute = int(minute)
second = int(second)
except ValueError:
return False
if hour < 0 or hour > 23:
return False
if minute < 0 or minute > 59:
return False
if second < 0 or second > 59:
return False
# fallback
return True
def fontInfoOpenTypeNameRecordsValidator(value):
"""
Version 3+.
"""
if not isinstance(value, list):
return False
dictPrototype = dict(nameID=(int, True), platformID=(int, True), encodingID=(int, True), languageID=(int, True), string=(str, True))
for nameRecord in value:
if not genericDictValidator(nameRecord, dictPrototype):
return False
return True
def fontInfoOpenTypeOS2WeightClassValidator(value):
"""
Version 2+.
"""
if not isinstance(value, int):
return False
if value < 0:
return False
return True
def fontInfoOpenTypeOS2WidthClassValidator(value):
"""
Version 2+.
"""
if not isinstance(value, int):
return False
if value < 1:
return False
if value > 9:
return False
return True
def fontInfoVersion2OpenTypeOS2PanoseValidator(values):
"""
Version 2.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) != 10:
return False
for value in values:
if not isinstance(value, int):
return False
# XXX further validation?
return True
def fontInfoVersion3OpenTypeOS2PanoseValidator(values):
"""
Version 3+.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) != 10:
return False
for value in values:
if not isinstance(value, int):
return False
if value < 0:
return False
# XXX further validation?
return True
def fontInfoOpenTypeOS2FamilyClassValidator(values):
"""
Version 2+.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) != 2:
return False
for value in values:
if not isinstance(value, int):
return False
classID, subclassID = values
if classID < 0 or classID > 14:
return False
if subclassID < 0 or subclassID > 15:
return False
return True
def fontInfoPostscriptBluesValidator(values):
"""
Version 2+.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) > 14:
return False
if len(values) % 2:
return False
for value in values:
if not isinstance(value, numberTypes):
return False
return True
def fontInfoPostscriptOtherBluesValidator(values):
"""
Version 2+.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) > 10:
return False
if len(values) % 2:
return False
for value in values:
if not isinstance(value, numberTypes):
return False
return True
def fontInfoPostscriptStemsValidator(values):
"""
Version 2+.
"""
if not isinstance(values, (list, tuple)):
return False
if len(values) > 12:
return False
for value in values:
if not isinstance(value, numberTypes):
return False
return True
def fontInfoPostscriptWindowsCharacterSetValidator(value):
"""
Version 2+.
"""
validValues = list(range(1, 21))
if value not in validValues:
return False
return True
def fontInfoWOFFMetadataUniqueIDValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(id=(str, True))
if not genericDictValidator(value, dictPrototype):
return False
return True
def fontInfoWOFFMetadataVendorValidator(value):
"""
Version 3+.
"""
dictPrototype = {"name" : (str, True), "url" : (str, False), "dir" : (str, False), "class" : (str, False)}
if not genericDictValidator(value, dictPrototype):
return False
if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
return False
return True
def fontInfoWOFFMetadataCreditsValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(credits=(list, True))
if not genericDictValidator(value, dictPrototype):
return False
if not len(value["credits"]):
return False
dictPrototype = {"name" : (str, True), "url" : (str, False), "role" : (str, False), "dir" : (str, False), "class" : (str, False)}
for credit in value["credits"]:
if not genericDictValidator(credit, dictPrototype):
return False
if "dir" in credit and credit.get("dir") not in ("ltr", "rtl"):
return False
return True
def fontInfoWOFFMetadataDescriptionValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(url=(str, False), text=(list, True))
if not genericDictValidator(value, dictPrototype):
return False
for text in value["text"]:
if not fontInfoWOFFMetadataTextValue(text):
return False
return True
def fontInfoWOFFMetadataLicenseValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(url=(str, False), text=(list, False), id=(str, False))
if not genericDictValidator(value, dictPrototype):
return False
if "text" in value:
for text in value["text"]:
if not fontInfoWOFFMetadataTextValue(text):
return False
return True
def fontInfoWOFFMetadataTrademarkValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(text=(list, True))
if not genericDictValidator(value, dictPrototype):
return False
for text in value["text"]:
if not fontInfoWOFFMetadataTextValue(text):
return False
return True
def fontInfoWOFFMetadataCopyrightValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(text=(list, True))
if not genericDictValidator(value, dictPrototype):
return False
for text in value["text"]:
if not fontInfoWOFFMetadataTextValue(text):
return False
return True
def fontInfoWOFFMetadataLicenseeValidator(value):
"""
Version 3+.
"""
dictPrototype = {"name" : (str, True), "dir" : (str, False), "class" : (str, False)}
if not genericDictValidator(value, dictPrototype):
return False
if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
return False
return True
def fontInfoWOFFMetadataTextValue(value):
"""
Version 3+.
"""
dictPrototype = {"text" : (str, True), "language" : (str, False), "dir" : (str, False), "class" : (str, False)}
if not genericDictValidator(value, dictPrototype):
return False
if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
return False
return True
def fontInfoWOFFMetadataExtensionsValidator(value):
"""
Version 3+.
"""
if not isinstance(value, list):
return False
if not value:
return False
for extension in value:
if not fontInfoWOFFMetadataExtensionValidator(extension):
return False
return True
def fontInfoWOFFMetadataExtensionValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(names=(list, False), items=(list, True), id=(str, False))
if not genericDictValidator(value, dictPrototype):
return False
if "names" in value:
for name in value["names"]:
if not fontInfoWOFFMetadataExtensionNameValidator(name):
return False
for item in value["items"]:
if not fontInfoWOFFMetadataExtensionItemValidator(item):
return False
return True
def fontInfoWOFFMetadataExtensionItemValidator(value):
"""
Version 3+.
"""
dictPrototype = dict(id=(str, False), names=(list, True), values=(list, True))
if not genericDictValidator(value, dictPrototype):
return False
for name in value["names"]:
if not fontInfoWOFFMetadataExtensionNameValidator(name):
return False
for val in value["values"]:
if not fontInfoWOFFMetadataExtensionValueValidator(val):
return False
return True
def fontInfoWOFFMetadataExtensionNameValidator(value):
"""
Version 3+.
"""
dictPrototype = {"text" : (str, True), "language" : (str, False), "dir" : (str, False), "class" : (str, False)}
if not genericDictValidator(value, dictPrototype):
return False
if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
return False
return True
def fontInfoWOFFMetadataExtensionValueValidator(value):
"""
Version 3+.
"""
dictPrototype = {"text" : (str, True), "language" : (str, False), "dir" : (str, False), "class" : (str, False)}
if not genericDictValidator(value, dictPrototype):
return False
if "dir" in value and value.get("dir") not in ("ltr", "rtl"):
return False
return True
# ----------
# Guidelines
# ----------
def guidelinesValidator(value, identifiers=None):
"""
Version 3+.
"""
if not isinstance(value, list):
return False
if identifiers is None:
identifiers = set()
for guide in value:
if not guidelineValidator(guide):
return False
identifier = guide.get("identifier")
if identifier is not None:
if identifier in identifiers:
return False
identifiers.add(identifier)
return True
_guidelineDictPrototype = dict(
x=((int, float), False), y=((int, float), False), angle=((int, float), False),
name=(str, False), color=(str, False), identifier=(str, False)
)
def guidelineValidator(value):
"""
Version 3+.
"""
if not genericDictValidator(value, _guidelineDictPrototype):
return False
x = value.get("x")
y = value.get("y")
angle = value.get("angle")
# x or y must be present
if x is None and y is None:
return False
# if x or y are None, angle must not be present
if x is None or y is None:
if angle is not None:
return False
# if x and y are defined, angle must be defined
if x is not None and y is not None and angle is None:
return False
# angle must be between 0 and 360
if angle is not None:
if angle < 0:
return False
if angle > 360:
return False
# identifier must be 1 or more characters
identifier = value.get("identifier")
if identifier is not None and not identifierValidator(identifier):
return False
# color must follow the proper format
color = value.get("color")
if color is not None and not colorValidator(color):
return False
return True
# -------
# Anchors
# -------
def anchorsValidator(value, identifiers=None):
"""
Version 3+.
"""
if not isinstance(value, list):
return False
if identifiers is None:
identifiers = set()
for anchor in value:
if not anchorValidator(anchor):
return False
identifier = anchor.get("identifier")
if identifier is not None:
if identifier in identifiers:
return False
identifiers.add(identifier)
return True
_anchorDictPrototype = dict(
x=((int, float), False), y=((int, float), False),
name=(str, False), color=(str, False),
identifier=(str, False)
)
def anchorValidator(value):
"""
Version 3+.
"""
if not genericDictValidator(value, _anchorDictPrototype):
return False
x = value.get("x")
y = value.get("y")
# x and y must be present
if x is None or y is None:
return False
# identifier must be 1 or more characters
identifier = value.get("identifier")
if identifier is not None and not identifierValidator(identifier):
return False
# color must follow the proper format
color = value.get("color")
if color is not None and not colorValidator(color):
return False
return True
# ----------
# Identifier
# ----------
def identifierValidator(value):
"""
Version 3+.
>>> identifierValidator("a")
True
>>> identifierValidator("")
False
>>> identifierValidator("a" * 101)
False
"""
validCharactersMin = 0x20
validCharactersMax = 0x7E
if not isinstance(value, str):
return False
if not value:
return False
if len(value) > 100:
return False
for c in value:
c = ord(c)
if c < validCharactersMin or c > validCharactersMax:
return False
return True
# -----
# Color
# -----
def colorValidator(value):
"""
Version 3+.
>>> colorValidator("0,0,0,0")
True
>>> colorValidator(".5,.5,.5,.5")
True
>>> colorValidator("0.5,0.5,0.5,0.5")
True
>>> colorValidator("1,1,1,1")
True
>>> colorValidator("2,0,0,0")
False
>>> colorValidator("0,2,0,0")
False
>>> colorValidator("0,0,2,0")
False
>>> colorValidator("0,0,0,2")
False
>>> colorValidator("1r,1,1,1")
False
>>> colorValidator("1,1g,1,1")
False
>>> colorValidator("1,1,1b,1")
False
>>> colorValidator("1,1,1,1a")
False
>>> colorValidator("1 1 1 1")
False
>>> colorValidator("1 1,1,1")
False
>>> colorValidator("1,1 1,1")
False
>>> colorValidator("1,1,1 1")
False
>>> colorValidator("1, 1, 1, 1")
True
"""
if not isinstance(value, str):
return False
parts = value.split(",")
if len(parts) != 4:
return False
for part in parts:
part = part.strip()
converted = False
try:
part = int(part)
converted = True
except ValueError:
pass
if not converted:
try:
part = float(part)
converted = True
except ValueError:
pass
if not converted:
return False
if part < 0:
return False
if part > 1:
return False
return True
# -----
# image
# -----
pngSignature = b"\x89PNG\r\n\x1a\n"
_imageDictPrototype = dict(
fileName=(str, True),
xScale=((int, float), False), xyScale=((int, float), False),
yxScale=((int, float), False), yScale=((int, float), False),
xOffset=((int, float), False), yOffset=((int, float), False),
color=(str, False)
)
def imageValidator(value):
"""
Version 3+.
"""
if not genericDictValidator(value, _imageDictPrototype):
return False
# fileName must be one or more characters
if not value["fileName"]:
return False
# color must follow the proper format
color = value.get("color")
if color is not None and not colorValidator(color):
return False
return True
def pngValidator(path=None, data=None, fileObj=None):
"""
Version 3+.
This checks the signature of the image data.
"""
assert path is not None or data is not None or fileObj is not None
if path is not None:
with open(path, "rb") as f:
signature = f.read(8)
elif data is not None:
signature = data[:8]
elif fileObj is not None:
pos = fileObj.tell()
signature = fileObj.read(8)
fileObj.seek(pos)
if signature != pngSignature:
return False, "Image does not begin with the PNG signature."
return True, None
# -------------------
# layercontents.plist
# -------------------
def layerContentsValidator(value, ufoPathOrFileSystem):
"""
Check the validity of layercontents.plist.
Version 3+.
"""
if isinstance(ufoPathOrFileSystem, fs.base.FS):
fileSystem = ufoPathOrFileSystem
else:
fileSystem = fs.osfs.OSFS(ufoPathOrFileSystem)
bogusFileMessage = "layercontents.plist in not in the correct format."
# file isn't in the right format
if not isinstance(value, list):
return False, bogusFileMessage
# work through each entry
usedLayerNames = set()
usedDirectories = set()
contents = {}
for entry in value:
# layer entry in the incorrect format
if not isinstance(entry, list):
return False, bogusFileMessage
if not len(entry) == 2:
return False, bogusFileMessage
for i in entry:
if not isinstance(i, str):
return False, bogusFileMessage
layerName, directoryName = entry
# check directory naming
if directoryName != "glyphs":
if not directoryName.startswith("glyphs."):
return False, "Invalid directory name (%s) in layercontents.plist." % directoryName
if len(layerName) == 0:
return False, "Empty layer name in layercontents.plist."
# directory doesn't exist
if not fileSystem.exists(directoryName):
return False, "A glyphset does not exist at %s." % directoryName
# default layer name
if layerName == "public.default" and directoryName != "glyphs":
return False, "The name public.default is being used by a layer that is not the default."
# check usage
if layerName in usedLayerNames:
return False, "The layer name %s is used by more than one layer." % layerName
usedLayerNames.add(layerName)
if directoryName in usedDirectories:
return False, "The directory %s is used by more than one layer." % directoryName
usedDirectories.add(directoryName)
# store
contents[layerName] = directoryName
# missing default layer
foundDefault = "glyphs" in contents.values()
if not foundDefault:
return False, "The required default glyph set is not in the UFO."
return True, None
# ------------
# groups.plist
# ------------
def groupsValidator(value):
"""
Check the validity of the groups.
Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
>>> groups = {"A" : ["A", "A"], "A2" : ["A"]}
>>> groupsValidator(groups)
(True, None)
>>> groups = {"" : ["A"]}
>>> valid, msg = groupsValidator(groups)
>>> valid
False
>>> print(msg)
A group has an empty name.
>>> groups = {"public.awesome" : ["A"]}
>>> groupsValidator(groups)
(True, None)
>>> groups = {"public.kern1." : ["A"]}
>>> valid, msg = groupsValidator(groups)
>>> valid
False
>>> print(msg)
The group data contains a kerning group with an incomplete name.
>>> groups = {"public.kern2." : ["A"]}
>>> valid, msg = groupsValidator(groups)
>>> valid
False
>>> print(msg)
The group data contains a kerning group with an incomplete name.
>>> groups = {"public.kern1.A" : ["A"], "public.kern2.A" : ["A"]}
>>> groupsValidator(groups)
(True, None)
>>> groups = {"public.kern1.A1" : ["A"], "public.kern1.A2" : ["A"]}
>>> valid, msg = groupsValidator(groups)
>>> valid
False
>>> print(msg)
The glyph "A" occurs in too many kerning groups.
"""
bogusFormatMessage = "The group data is not in the correct format."
if not isDictEnough(value):
return False, bogusFormatMessage
firstSideMapping = {}
secondSideMapping = {}
for groupName, glyphList in value.items():
if not isinstance(groupName, (str)):
return False, bogusFormatMessage
if not isinstance(glyphList, (list, tuple)):
return False, bogusFormatMessage
if not groupName:
return False, "A group has an empty name."
if groupName.startswith("public."):
if not groupName.startswith("public.kern1.") and not groupName.startswith("public.kern2."):
# unknown public.* name. silently skip.
continue
else:
if len("public.kernN.") == len(groupName):
return False, "The group data contains a kerning group with an incomplete name."
if groupName.startswith("public.kern1."):
d = firstSideMapping
else:
d = secondSideMapping
for glyphName in glyphList:
if not isinstance(glyphName, str):
return False, "The group data %s contains an invalid member." % groupName
if glyphName in d:
return False, "The glyph \"%s\" occurs in too many kerning groups." % glyphName
d[glyphName] = groupName
return True, None
# -------------
# kerning.plist
# -------------
def kerningValidator(data):
"""
Check the validity of the kerning data structure.
Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
>>> kerning = {"A" : {"B" : 100}}
>>> kerningValidator(kerning)
(True, None)
>>> kerning = {"A" : ["B"]}
>>> valid, msg = kerningValidator(kerning)
>>> valid
False
>>> print(msg)
The kerning data is not in the correct format.
>>> kerning = {"A" : {"B" : "100"}}
>>> valid, msg = kerningValidator(kerning)
>>> valid
False
>>> print(msg)
The kerning data is not in the correct format.
"""
bogusFormatMessage = "The kerning data is not in the correct format."
if not isinstance(data, Mapping):
return False, bogusFormatMessage
for first, secondDict in data.items():
if not isinstance(first, str):
return False, bogusFormatMessage
elif not isinstance(secondDict, Mapping):
return False, bogusFormatMessage
for second, value in secondDict.items():
if not isinstance(second, str):
return False, bogusFormatMessage
elif not isinstance(value, numberTypes):
return False, bogusFormatMessage
return True, None
# -------------
# lib.plist/lib
# -------------
_bogusLibFormatMessage = "The lib data is not in the correct format: %s"
def fontLibValidator(value):
"""
Check the validity of the lib.
Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
>>> lib = {"foo" : "bar"}
>>> fontLibValidator(lib)
(True, None)
>>> lib = {"public.awesome" : "hello"}
>>> fontLibValidator(lib)
(True, None)
>>> lib = {"public.glyphOrder" : ["A", "C", "B"]}
>>> fontLibValidator(lib)
(True, None)
>>> lib = "hello"
>>> valid, msg = fontLibValidator(lib)
>>> valid
False
>>> print(msg) # doctest: +ELLIPSIS
The lib data is not in the correct format: expected a dictionary, ...
>>> lib = {1: "hello"}
>>> valid, msg = fontLibValidator(lib)
>>> valid
False
>>> print(msg)
The lib key is not properly formatted: expected str, found int: 1
>>> lib = {"public.glyphOrder" : "hello"}
>>> valid, msg = fontLibValidator(lib)
>>> valid
False
>>> print(msg) # doctest: +ELLIPSIS
public.glyphOrder is not properly formatted: expected list or tuple,...
>>> lib = {"public.glyphOrder" : ["A", 1, "B"]}
>>> valid, msg = fontLibValidator(lib)
>>> valid
False
>>> print(msg) # doctest: +ELLIPSIS
public.glyphOrder is not properly formatted: expected str,...
"""
if not isDictEnough(value):
reason = "expected a dictionary, found %s" % type(value).__name__
return False, _bogusLibFormatMessage % reason
for key, value in value.items():
if not isinstance(key, str):
return False, (
"The lib key is not properly formatted: expected str, found %s: %r" %
(type(key).__name__, key))
# public.glyphOrder
if key == "public.glyphOrder":
bogusGlyphOrderMessage = "public.glyphOrder is not properly formatted: %s"
if not isinstance(value, (list, tuple)):
reason = "expected list or tuple, found %s" % type(value).__name__
return False, bogusGlyphOrderMessage % reason
for glyphName in value:
if not isinstance(glyphName, str):
reason = "expected str, found %s" % type(glyphName).__name__
return False, bogusGlyphOrderMessage % reason
return True, None
# --------
# GLIF lib
# --------
def glyphLibValidator(value):
"""
Check the validity of the lib.
Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).
>>> lib = {"foo" : "bar"}
>>> glyphLibValidator(lib)
(True, None)
>>> lib = {"public.awesome" : "hello"}
>>> glyphLibValidator(lib)
(True, None)
>>> lib = {"public.markColor" : "1,0,0,0.5"}
>>> glyphLibValidator(lib)
(True, None)
>>> lib = {"public.markColor" : 1}
>>> valid, msg = glyphLibValidator(lib)
>>> valid
False
>>> print(msg)
public.markColor is not properly formatted.
"""
if not isDictEnough(value):
reason = "expected a dictionary, found %s" % type(value).__name__
return False, _bogusLibFormatMessage % reason
for key, value in value.items():
if not isinstance(key, str):
reason = "key (%s) should be a string" % key
return False, _bogusLibFormatMessage % reason
# public.markColor
if key == "public.markColor":
if not colorValidator(value):
return False, "public.markColor is not properly formatted."
return True, None
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | 872d7696a770c1e013b4e1ec0bf62038 | 23.617925 | 133 | 0.682353 | 3.190488 | false | false | false | false |
fonttools/fonttools | Snippets/layout-features.py | 3 | 1696 | #! /usr/bin/env python3
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otTables
import sys
if len(sys.argv) != 2:
print("usage: layout-features.py fontfile.ttf")
sys.exit(1)
fontfile = sys.argv[1]
if fontfile.rsplit(".", 1)[-1] == "ttx":
font = TTFont()
font.importXML(fontfile)
else:
font = TTFont(fontfile)
for tag in ('GSUB', 'GPOS'):
if not tag in font: continue
print("Table:", tag)
table = font[tag].table
if not table.ScriptList or not table.FeatureList: continue
featureRecords = table.FeatureList.FeatureRecord
for script in table.ScriptList.ScriptRecord:
print(" Script:", script.ScriptTag)
if not script.Script:
print (" Null script.")
continue
languages = list(script.Script.LangSysRecord)
if script.Script.DefaultLangSys:
defaultlangsys = otTables.LangSysRecord()
defaultlangsys.LangSysTag = "default"
defaultlangsys.LangSys = script.Script.DefaultLangSys
languages.insert(0, defaultlangsys)
for langsys in languages:
print(" Language:", langsys.LangSysTag)
if not langsys.LangSys:
print (" Null language.")
continue
features = [featureRecords[index] for index in langsys.LangSys.FeatureIndex]
if langsys.LangSys.ReqFeatureIndex != 0xFFFF:
record = featureRecords[langsys.LangSys.ReqFeatureIndex]
requiredfeature = otTables.FeatureRecord()
requiredfeature.FeatureTag = 'required(%s)' % record.FeatureTag
requiredfeature.Feature = record.Feature
features.insert(0, requiredfeature)
for feature in features:
print(" Feature:", feature.FeatureTag)
lookups = feature.Feature.LookupListIndex
print(" Lookups:", ','.join(str(l) for l in lookups))
| mit | ebe355a297a273ee6b7a8ad0f67213ee | 33.612245 | 79 | 0.723467 | 3.249042 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ufoLib/plistlib.py | 3 | 1498 | """DEPRECATED - This module is kept here only as a backward compatibility shim
for the old ufoLib.plistlib module, which was moved to fontTools.misc.plistlib.
Please use the latter instead.
"""
from fontTools.misc.plistlib import dump, dumps, load, loads
from fontTools.misc.textTools import tobytes
# The following functions were part of the old py2-like ufoLib.plistlib API.
# They are kept only for backward compatiblity.
from fontTools.ufoLib.utils import deprecated
@deprecated("Use 'fontTools.misc.plistlib.load' instead")
def readPlist(path_or_file):
did_open = False
if isinstance(path_or_file, str):
path_or_file = open(path_or_file, "rb")
did_open = True
try:
return load(path_or_file, use_builtin_types=False)
finally:
if did_open:
path_or_file.close()
@deprecated("Use 'fontTools.misc.plistlib.dump' instead")
def writePlist(value, path_or_file):
did_open = False
if isinstance(path_or_file, str):
path_or_file = open(path_or_file, "wb")
did_open = True
try:
dump(value, path_or_file, use_builtin_types=False)
finally:
if did_open:
path_or_file.close()
@deprecated("Use 'fontTools.misc.plistlib.loads' instead")
def readPlistFromString(data):
return loads(tobytes(data, encoding="utf-8"), use_builtin_types=False)
@deprecated("Use 'fontTools.misc.plistlib.dumps' instead")
def writePlistToString(value):
return dumps(value, use_builtin_types=False)
| mit | 5ceebcaafcbb9bc826836a964a883b72 | 31.565217 | 79 | 0.699599 | 3.404545 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/tables/F__e_a_t.py | 3 | 4996 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
from . import DefaultTable
from . import grUtils
import struct
Feat_hdr_format='''
>
version: 16.16F
'''
class table_F__e_a_t(DefaultTable.DefaultTable):
"""The ``Feat`` table is used exclusively by the Graphite shaping engine
to store features and possible settings specified in GDL. Graphite features
determine what rules are applied to transform a glyph stream.
Not to be confused with ``feat``, or the OpenType Layout tables
``GSUB``/``GPOS``."""
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.features = {}
def decompile(self, data, ttFont):
(_, data) = sstruct.unpack2(Feat_hdr_format, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
numFeats, = struct.unpack('>H', data[:2])
data = data[8:]
allfeats = []
maxsetting = 0
for i in range(numFeats):
if self.version >= 2.0:
(fid, nums, _, offset, flags, lid) = struct.unpack(">LHHLHH",
data[16*i:16*(i+1)])
offset = int((offset - 12 - 16 * numFeats) / 4)
else:
(fid, nums, offset, flags, lid) = struct.unpack(">HHLHH",
data[12*i:12*(i+1)])
offset = int((offset - 12 - 12 * numFeats) / 4)
allfeats.append((fid, nums, offset, flags, lid))
maxsetting = max(maxsetting, offset + nums)
data = data[16*numFeats:]
allsettings = []
for i in range(maxsetting):
if len(data) >= 4 * (i + 1):
(val, lid) = struct.unpack(">HH", data[4*i:4*(i+1)])
allsettings.append((val, lid))
for i,f in enumerate(allfeats):
(fid, nums, offset, flags, lid) = f
fobj = Feature()
fobj.flags = flags
fobj.label = lid
self.features[grUtils.num2tag(fid)] = fobj
fobj.settings = {}
fobj.default = None
fobj.index = i
for i in range(offset, offset + nums):
if i >= len(allsettings): continue
(vid, vlid) = allsettings[i]
fobj.settings[vid] = vlid
if fobj.default is None:
fobj.default = vid
def compile(self, ttFont):
fdat = b""
vdat = b""
offset = 0
for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
fnum = grUtils.tag2num(f)
if self.version >= 2.0:
fdat += struct.pack(">LHHLHH", grUtils.tag2num(f), len(v.settings),
0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label)
elif fnum > 65535: # self healing for alphabetic ids
self.version = 2.0
return self.compile(ttFont)
else:
fdat += struct.pack(">HHLHH", grUtils.tag2num(f), len(v.settings),
offset * 4 + 12 + 12 * len(self.features), v.flags, v.label)
for s, l in sorted(v.settings.items(), key=lambda x:(-1, x[1]) if x[0] == v.default else x):
vdat += struct.pack(">HH", s, l)
offset += len(v.settings)
hdr = sstruct.pack(Feat_hdr_format, self)
return hdr + struct.pack('>HHL', len(self.features), 0, 0) + fdat + vdat
def toXML(self, writer, ttFont):
writer.simpletag('version', version=self.version)
writer.newline()
for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
writer.begintag('feature', fid=f, label=v.label, flags=v.flags,
default=(v.default if v.default else 0))
writer.newline()
for s, l in sorted(v.settings.items()):
writer.simpletag('setting', value=s, label=l)
writer.newline()
writer.endtag('feature')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version':
self.version = float(safeEval(attrs['version']))
elif name == 'feature':
fid = attrs['fid']
fobj = Feature()
fobj.flags = int(safeEval(attrs['flags']))
fobj.label = int(safeEval(attrs['label']))
fobj.default = int(safeEval(attrs.get('default','0')))
fobj.index = len(self.features)
self.features[fid] = fobj
fobj.settings = {}
for element in content:
if not isinstance(element, tuple): continue
tag, a, c = element
if tag == 'setting':
fobj.settings[int(safeEval(a['value']))] = int(safeEval(a['label']))
class Feature(object):
pass
| mit | 22e4d2f4e922d8130ddcd5d25dcfdf5a | 40.983193 | 104 | 0.527222 | 3.796353 | false | false | false | false |
fonttools/fonttools | Tests/ttLib/removeOverlaps_test.py | 3 | 1714 | import logging
import pytest
pathops = pytest.importorskip("pathops")
from fontTools.ttLib.removeOverlaps import _simplify, _round_path
def test_pathops_simplify_bug_workaround(caplog):
# Paths extracted from Noto Sans Ethiopic instance that fails skia-pathops
# https://github.com/google/fonts/issues/3365
# https://bugs.chromium.org/p/skia/issues/detail?id=11958
path = pathops.Path()
path.moveTo(550.461, 0)
path.lineTo(550.461, 366.308)
path.lineTo(713.229, 366.308)
path.lineTo(713.229, 0)
path.close()
path.moveTo(574.46, 0)
path.lineTo(574.46, 276.231)
path.lineTo(737.768, 276.231)
path.quadTo(820.075, 276.231, 859.806, 242.654)
path.quadTo(899.537, 209.077, 899.537, 144.154)
path.quadTo(899.537, 79, 853.46, 39.5)
path.quadTo(807.383, 0, 712.383, 0)
path.close()
# check that it fails without workaround
with pytest.raises(pathops.PathOpsError):
pathops.simplify(path)
# check our workaround works (but with a warning)
with caplog.at_level(logging.DEBUG, logger="fontTools.ttLib.removeOverlaps"):
result = _simplify(path, debugGlyphName="a")
assert "skia-pathops failed to simplify 'a' with float coordinates" in caplog.text
expected = pathops.Path()
expected.moveTo(550, 0)
expected.lineTo(550, 366)
expected.lineTo(713, 366)
expected.lineTo(713, 276)
expected.lineTo(738, 276)
expected.quadTo(820, 276, 860, 243)
expected.quadTo(900, 209, 900, 144)
expected.quadTo(900, 79, 853, 40)
expected.quadTo(807.242, 0.211, 713, 0.001)
expected.lineTo(713, 0)
expected.close()
assert expected == _round_path(result, round=lambda v: round(v, 3))
| mit | 55ddc12d8b68867ffb113bce9f2a0d33 | 32.607843 | 86 | 0.683781 | 2.875839 | false | true | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/tables/_h_e_a_d.py | 3 | 3965 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
log = logging.getLogger(__name__)
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ['maxp', 'loca', 'CFF ', 'CFF2']
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
log.warning("extra bytes at the end of 'head' table")
assert rest == b"\0\0"
# For timestamp fields, ignore the top four bytes. Some fonts have
# bogus values there. Since till 2038 those bytes only can be zero,
# ignore them.
#
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
for stamp in 'created', 'modified':
value = getattr(self, stamp)
if value > 0xFFFFFFFF:
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
value &= 0xFFFFFFFF
setattr(self, stamp, value)
if value < 0x7C259DC0: # January 1, 1970 00:00:00
log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp)
value += 0x7C259DC0
setattr(self, stamp, value)
def compile(self, ttFont):
if ttFont.recalcBBoxes:
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if 'CFF ' in ttFont:
topDict = ttFont['CFF '].cff.topDictIndex[0]
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
elif 'CFF2' in ttFont:
topDict = ttFont['CFF2'].cff.topDictIndex[0]
charStrings = topDict.CharStrings
fontBBox = None
for charString in charStrings.values():
bounds = charString.calcBounds(charStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is not None:
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
_, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in fixes:
value = floatToFixedToStr(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampToString(value)
elif name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
fixes = sstruct.getformat(headFormat)[2]
if name in fixes:
value = strToFixedToFloat(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampFromString(value)
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
| mit | 3623da5904a786216bd52ec04edd0c57 | 32.888889 | 89 | 0.67541 | 3.136867 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/merge/layout.py | 3 | 13314 | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
from fontTools import ttLib
from fontTools.ttLib.tables.DefaultTable import DefaultTable
from fontTools.ttLib.tables import otTables
from fontTools.merge.base import add_method, mergeObjects
from fontTools.merge.util import *
import logging
log = logging.getLogger("fontTools.merge")
def mergeLookupLists(lst):
# TODO Do smarter merge.
return sumLists(lst)
def mergeFeatures(lst):
assert lst
self = otTables.Feature()
self.FeatureParams = None
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
self.LookupCount = len(self.LookupListIndex)
return self
def mergeFeatureLists(lst):
d = {}
for l in lst:
for f in l:
tag = f.FeatureTag
if tag not in d:
d[tag] = []
d[tag].append(f.Feature)
ret = []
for tag in sorted(d.keys()):
rec = otTables.FeatureRecord()
rec.FeatureTag = tag
rec.Feature = mergeFeatures(d[tag])
ret.append(rec)
return ret
def mergeLangSyses(lst):
assert lst
# TODO Support merging ReqFeatureIndex
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
self = otTables.LangSys()
self.LookupOrder = None
self.ReqFeatureIndex = 0xFFFF
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
self.FeatureCount = len(self.FeatureIndex)
return self
def mergeScripts(lst):
assert lst
if len(lst) == 1:
return lst[0]
langSyses = {}
for sr in lst:
for lsr in sr.LangSysRecord:
if lsr.LangSysTag not in langSyses:
langSyses[lsr.LangSysTag] = []
langSyses[lsr.LangSysTag].append(lsr.LangSys)
lsrecords = []
for tag, langSys_list in sorted(langSyses.items()):
lsr = otTables.LangSysRecord()
lsr.LangSys = mergeLangSyses(langSys_list)
lsr.LangSysTag = tag
lsrecords.append(lsr)
self = otTables.Script()
self.LangSysRecord = lsrecords
self.LangSysCount = len(lsrecords)
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
if dfltLangSyses:
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
else:
self.DefaultLangSys = None
return self
def mergeScriptRecords(lst):
d = {}
for l in lst:
for s in l:
tag = s.ScriptTag
if tag not in d:
d[tag] = []
d[tag].append(s.Script)
ret = []
for tag in sorted(d.keys()):
rec = otTables.ScriptRecord()
rec.ScriptTag = tag
rec.Script = mergeScripts(d[tag])
ret.append(rec)
return ret
otTables.ScriptList.mergeMap = {
'ScriptCount': lambda lst: None, # TODO
'ScriptRecord': mergeScriptRecords,
}
otTables.BaseScriptList.mergeMap = {
'BaseScriptCount': lambda lst: None, # TODO
# TODO: Merge duplicate entries
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
}
otTables.FeatureList.mergeMap = {
'FeatureCount': sum,
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
}
otTables.LookupList.mergeMap = {
'LookupCount': sum,
'Lookup': sumLists,
}
otTables.Coverage.mergeMap = {
'Format': min,
'glyphs': sumLists,
}
otTables.ClassDef.mergeMap = {
'Format': min,
'classDefs': sumDicts,
}
otTables.LigCaretList.mergeMap = {
'Coverage': mergeObjects,
'LigGlyphCount': sum,
'LigGlyph': sumLists,
}
otTables.AttachList.mergeMap = {
'Coverage': mergeObjects,
'GlyphCount': sum,
'AttachPoint': sumLists,
}
# XXX Renumber MarkFilterSets of lookups
otTables.MarkGlyphSetsDef.mergeMap = {
'MarkSetTableFormat': equal,
'MarkSetCount': sum,
'Coverage': sumLists,
}
otTables.Axis.mergeMap = {
'*': mergeObjects,
}
# XXX Fix BASE table merging
otTables.BaseTagList.mergeMap = {
'BaseTagCount': sum,
'BaselineTag': sumLists,
}
otTables.GDEF.mergeMap = \
otTables.GSUB.mergeMap = \
otTables.GPOS.mergeMap = \
otTables.BASE.mergeMap = \
otTables.JSTF.mergeMap = \
otTables.MATH.mergeMap = \
{
'*': mergeObjects,
'Version': max,
}
ttLib.getTableClass('GDEF').mergeMap = \
ttLib.getTableClass('GSUB').mergeMap = \
ttLib.getTableClass('GPOS').mergeMap = \
ttLib.getTableClass('BASE').mergeMap = \
ttLib.getTableClass('JSTF').mergeMap = \
ttLib.getTableClass('MATH').mergeMap = \
{
'tableTag': onlyExisting(equal), # XXX clean me up
'table': mergeObjects,
}
@add_method(ttLib.getTableClass('GSUB'))
def merge(self, m, tables):
assert len(tables) == len(m.duplicateGlyphsPerFont)
for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
if not dups: continue
if table is None or table is NotImplemented:
log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
continue
synthFeature = None
synthLookup = None
for script in table.table.ScriptList.ScriptRecord:
if script.ScriptTag == 'DFLT': continue # XXX
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
if langsys is None: continue # XXX Create!
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
assert len(feature) <= 1
if feature:
feature = feature[0]
else:
if not synthFeature:
synthFeature = otTables.FeatureRecord()
synthFeature.FeatureTag = 'locl'
f = synthFeature.Feature = otTables.Feature()
f.FeatureParams = None
f.LookupCount = 0
f.LookupListIndex = []
table.table.FeatureList.FeatureRecord.append(synthFeature)
table.table.FeatureList.FeatureCount += 1
feature = synthFeature
langsys.FeatureIndex.append(feature)
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
if not synthLookup:
subtable = otTables.SingleSubst()
subtable.mapping = dups
synthLookup = otTables.Lookup()
synthLookup.LookupFlag = 0
synthLookup.LookupType = 1
synthLookup.SubTableCount = 1
synthLookup.SubTable = [subtable]
if table.table.LookupList is None:
# mtiLib uses None as default value for LookupList,
# while feaLib points to an empty array with count 0
# TODO: make them do the same
table.table.LookupList = otTables.LookupList()
table.table.LookupList.Lookup = []
table.table.LookupList.LookupCount = 0
table.table.LookupList.Lookup.append(synthLookup)
table.table.LookupList.LookupCount += 1
if feature.Feature.LookupListIndex[:1] != [synthLookup]:
feature.Feature.LookupListIndex[:0] = [synthLookup]
feature.Feature.LookupCount += 1
DefaultTable.merge(self, m, tables)
return self
@add_method(otTables.SingleSubst,
otTables.MultipleSubst,
otTables.AlternateSubst,
otTables.LigatureSubst,
otTables.ReverseChainSingleSubst,
otTables.SinglePos,
otTables.PairPos,
otTables.CursivePos,
otTables.MarkBasePos,
otTables.MarkLigPos,
otTables.MarkMarkPos)
def mapLookups(self, lookupMap):
pass
# Copied and trimmed down from subset.py
@add_method(otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
def __merge_classify_context(self):
class ContextHelper(object):
def __init__(self, klass, Format):
if klass.__name__.endswith('Subst'):
Typ = 'Sub'
Type = 'Subst'
else:
Typ = 'Pos'
Type = 'Pos'
if klass.__name__.startswith('Chain'):
Chain = 'Chain'
else:
Chain = ''
ChainTyp = Chain+Typ
self.Typ = Typ
self.Type = Type
self.Chain = Chain
self.ChainTyp = ChainTyp
self.LookupRecord = Type+'LookupRecord'
if Format == 1:
self.Rule = ChainTyp+'Rule'
self.RuleSet = ChainTyp+'RuleSet'
elif Format == 2:
self.Rule = ChainTyp+'ClassRule'
self.RuleSet = ChainTyp+'ClassSet'
if self.Format not in [1, 2, 3]:
return None # Don't shoot the messenger; let it go
if not hasattr(self.__class__, "_merge__ContextHelpers"):
self.__class__._merge__ContextHelpers = {}
if self.Format not in self.__class__._merge__ContextHelpers:
helper = ContextHelper(self.__class__, self.Format)
self.__class__._merge__ContextHelpers[self.Format] = helper
return self.__class__._merge__ContextHelpers[self.Format]
@add_method(otTables.ContextSubst,
otTables.ChainContextSubst,
otTables.ContextPos,
otTables.ChainContextPos)
def mapLookups(self, lookupMap):
c = self.__merge_classify_context()
if self.Format in [1, 2]:
for rs in getattr(self, c.RuleSet):
if not rs: continue
for r in getattr(rs, c.Rule):
if not r: continue
for ll in getattr(r, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
elif self.Format == 3:
for ll in getattr(self, c.LookupRecord):
if not ll: continue
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.ExtensionSubst,
otTables.ExtensionPos)
def mapLookups(self, lookupMap):
if self.Format == 1:
self.ExtSubTable.mapLookups(lookupMap)
else:
assert 0, "unknown format: %s" % self.Format
@add_method(otTables.Lookup)
def mapLookups(self, lookupMap):
for st in self.SubTable:
if not st: continue
st.mapLookups(lookupMap)
@add_method(otTables.LookupList)
def mapLookups(self, lookupMap):
for l in self.Lookup:
if not l: continue
l.mapLookups(lookupMap)
@add_method(otTables.Lookup)
def mapMarkFilteringSets(self, markFilteringSetMap):
if self.LookupFlag & 0x0010:
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
@add_method(otTables.LookupList)
def mapMarkFilteringSets(self, markFilteringSetMap):
for l in self.Lookup:
if not l: continue
l.mapMarkFilteringSets(markFilteringSetMap)
@add_method(otTables.Feature)
def mapLookups(self, lookupMap):
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
@add_method(otTables.FeatureList)
def mapLookups(self, lookupMap):
for f in self.FeatureRecord:
if not f or not f.Feature: continue
f.Feature.mapLookups(lookupMap)
@add_method(otTables.DefaultLangSys,
otTables.LangSys)
def mapFeatures(self, featureMap):
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
if self.ReqFeatureIndex != 65535:
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
@add_method(otTables.Script)
def mapFeatures(self, featureMap):
if self.DefaultLangSys:
self.DefaultLangSys.mapFeatures(featureMap)
for l in self.LangSysRecord:
if not l or not l.LangSys: continue
l.LangSys.mapFeatures(featureMap)
@add_method(otTables.ScriptList)
def mapFeatures(self, featureMap):
for s in self.ScriptRecord:
if not s or not s.Script: continue
s.Script.mapFeatures(featureMap)
def layoutPreMerge(font):
# Map indices to references
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
for t in [GSUB, GPOS]:
if not t: continue
if t.table.LookupList:
lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
t.table.LookupList.mapLookups(lookupMap)
t.table.FeatureList.mapLookups(lookupMap)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
if t.table.FeatureList and t.table.ScriptList:
featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
t.table.ScriptList.mapFeatures(featureMap)
# TODO FeatureParams nameIDs
def layoutPostMerge(font):
# Map references back to indices
GDEF = font.get('GDEF')
GSUB = font.get('GSUB')
GPOS = font.get('GPOS')
for t in [GSUB, GPOS]:
if not t: continue
if t.table.FeatureList and t.table.ScriptList:
# Collect unregistered (new) features.
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
# Record used features.
featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
usedIndices = featureMap.s
# Remove unused features
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
# Map back to indices.
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
t.table.ScriptList.mapFeatures(featureMap)
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
if t.table.LookupList:
# Collect unregistered (new) lookups.
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
# Record used lookups.
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
usedIndices = lookupMap.s
# Remove unused lookups
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
# Map back to indices.
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
t.table.FeatureList.mapLookups(lookupMap)
t.table.LookupList.mapLookups(lookupMap)
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
if GDEF and GDEF.table.Version >= 0x00010002:
markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
# TODO FeatureParams nameIDs
| mit | 6253b945582bab9d531bb768b9f1147a | 27.570815 | 141 | 0.726754 | 2.975861 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/tables/_c_v_a_r.py | 3 | 3299 | from . import DefaultTable
from fontTools.misc import sstruct
from fontTools.misc.textTools import bytesjoin
from fontTools.ttLib.tables.TupleVariation import \
compileTupleVariationStore, decompileTupleVariationStore, TupleVariation
# https://www.microsoft.com/typography/otspec/cvar.htm
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html
CVAR_HEADER_FORMAT = """
> # big endian
majorVersion: H
minorVersion: H
tupleVariationCount: H
offsetToData: H
"""
CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT)
class table__c_v_a_r(DefaultTable.DefaultTable):
dependencies = ["cvt ", "fvar"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.majorVersion, self.minorVersion = 1, 0
self.variations = []
def compile(self, ttFont, useSharedPoints=False):
tupleVariationCount, tuples, data = compileTupleVariationStore(
variations=[v for v in self.variations if v.hasImpact()],
pointCount=len(ttFont["cvt "].values),
axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
sharedTupleIndices={},
useSharedPoints=useSharedPoints)
header = {
"majorVersion": self.majorVersion,
"minorVersion": self.minorVersion,
"tupleVariationCount": tupleVariationCount,
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
}
return b''.join([
sstruct.pack(CVAR_HEADER_FORMAT, header),
tuples,
data
])
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
header = {}
sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header)
self.majorVersion = header["majorVersion"]
self.minorVersion = header["minorVersion"]
assert self.majorVersion == 1, self.majorVersion
self.variations = decompileTupleVariationStore(
tableTag=self.tableTag, axisTags=axisTags,
tupleVariationCount=header["tupleVariationCount"],
pointCount=len(ttFont["cvt "].values), sharedTuples=None,
data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"])
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.majorVersion = int(attrs.get("major", "1"))
self.minorVersion = int(attrs.get("minor", "0"))
elif name == "tuple":
valueCount = len(ttFont["cvt "].values)
var = TupleVariation({}, [None] * valueCount)
self.variations.append(var)
for tupleElement in content:
if isinstance(tupleElement, tuple):
tupleName, tupleAttrs, tupleContent = tupleElement
var.fromXML(tupleName, tupleAttrs, tupleContent)
def toXML(self, writer, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
writer.simpletag("version",
major=self.majorVersion, minor=self.minorVersion)
writer.newline()
for var in self.variations:
var.toXML(writer, axisTags)
| mit | d76de193ddd5412d952c2c27e87689ec | 39.231707 | 81 | 0.633222 | 3.840512 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/designspaceLib/__init__.py | 1 | 120195 | from __future__ import annotations
import collections
import copy
import itertools
import math
import os
import posixpath
from io import BytesIO, StringIO
from textwrap import indent
from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union, cast
from fontTools.misc import etree as ET
from fontTools.misc import plistlib
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.textTools import tobytes, tostr
"""
designSpaceDocument
- read and write designspace files
"""
__all__ = [
'AxisDescriptor',
'AxisLabelDescriptor',
'BaseDocReader',
'BaseDocWriter',
'DesignSpaceDocument',
'DesignSpaceDocumentError',
'DiscreteAxisDescriptor',
'InstanceDescriptor',
'LocationLabelDescriptor',
'RangeAxisSubsetDescriptor',
'RuleDescriptor',
'SourceDescriptor',
'ValueAxisSubsetDescriptor',
'VariableFontDescriptor',
]
# ElementTree allows to find namespace-prefixed elements, but not attributes
# so we have to do it ourselves for 'xml:lang'
XML_NS = "{http://www.w3.org/XML/1998/namespace}"
XML_LANG = XML_NS + "lang"
def posix(path):
"""Normalize paths using forward slash to work also on Windows."""
new_path = posixpath.join(*path.split(os.path.sep))
if path.startswith('/'):
# The above transformation loses absolute paths
new_path = '/' + new_path
elif path.startswith(r'\\'):
# The above transformation loses leading slashes of UNC path mounts
new_path = '//' + new_path
return new_path
def posixpath_property(private_name):
"""Generate a propery that holds a path always using forward slashes."""
def getter(self):
# Normal getter
return getattr(self, private_name)
def setter(self, value):
# The setter rewrites paths using forward slashes
if value is not None:
value = posix(value)
setattr(self, private_name, value)
return property(getter, setter)
class DesignSpaceDocumentError(Exception):
def __init__(self, msg, obj=None):
self.msg = msg
self.obj = obj
def __str__(self):
return str(self.msg) + (
": %r" % self.obj if self.obj is not None else "")
class AsDictMixin(object):
def asdict(self):
d = {}
for attr, value in self.__dict__.items():
if attr.startswith("_"):
continue
if hasattr(value, "asdict"):
value = value.asdict()
elif isinstance(value, list):
value = [
v.asdict() if hasattr(v, "asdict") else v for v in value
]
d[attr] = value
return d
class SimpleDescriptor(AsDictMixin):
""" Containers for a bunch of attributes"""
# XXX this is ugly. The 'print' is inappropriate here, and instead of
# assert, it should simply return True/False
def compare(self, other):
# test if this object contains the same data as the other
for attr in self._attrs:
try:
assert(getattr(self, attr) == getattr(other, attr))
except AssertionError:
print("failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr))
def __repr__(self):
attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs]
attrs = indent('\n'.join(attrs), ' ')
return f"{self.__class__.__name__}(\n{attrs}\n)"
class SourceDescriptor(SimpleDescriptor):
"""Simple container for data related to the source
.. code:: python
doc = DesignSpaceDocument()
s1 = SourceDescriptor()
s1.path = masterPath1
s1.name = "master.ufo1"
s1.font = defcon.Font("master.ufo1")
s1.location = dict(weight=0)
s1.familyName = "MasterFamilyName"
s1.styleName = "MasterStyleNameOne"
s1.localisedFamilyName = dict(fr="Caractère")
s1.mutedGlyphNames.append("A")
s1.mutedGlyphNames.append("Z")
doc.addSource(s1)
"""
flavor = "source"
_attrs = ['filename', 'path', 'name', 'layerName',
'location', 'copyLib',
'copyGroups', 'copyFeatures',
'muteKerning', 'muteInfo',
'mutedGlyphNames',
'familyName', 'styleName', 'localisedFamilyName']
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
designLocation=None,
layerName=None,
familyName=None,
styleName=None,
localisedFamilyName=None,
copyLib=False,
copyInfo=False,
copyGroups=False,
copyFeatures=False,
muteKerning=False,
muteInfo=False,
mutedGlyphNames=None,
):
self.filename = filename
"""string. A relative path to the source file, **as it is in the document**.
MutatorMath + VarLib.
"""
self.path = path
"""The absolute path, calculated from filename."""
self.font = font
"""Any Python object. Optional. Points to a representation of this
source font that is loaded in memory, as a Python object (e.g. a
``defcon.Font`` or a ``fontTools.ttFont.TTFont``).
The default document reader will not fill-in this attribute, and the
default writer will not use this attribute. It is up to the user of
``designspaceLib`` to either load the resource identified by
``filename`` and store it in this field, or write the contents of
this field to the disk and make ```filename`` point to that.
"""
self.name = name
"""string. Optional. Unique identifier name for this source.
MutatorMath + Varlib.
"""
self.designLocation = designLocation if designLocation is not None else location or {}
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + Varlib.
This may be only part of the full design location.
See :meth:`getFullDesignLocation()`
.. versionadded:: 5.0
"""
self.layerName = layerName
"""string. The name of the layer in the source to look for
outline data. Default ``None`` which means ``foreground``.
"""
self.familyName = familyName
"""string. Family name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
Varlib.
"""
self.styleName = styleName
"""string. Style name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
Varlib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name strings, keyed by
language code.
If present, will be used to build localized names for all instances.
.. versionadded:: 5.0
"""
self.copyLib = copyLib
"""bool. Indicates if the contents of the font.lib need to
be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyInfo = copyInfo
"""bool. Indicates if the non-interpolating font.info needs
to be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyGroups = copyGroups
"""bool. Indicates if the groups need to be copied to the
instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyFeatures = copyFeatures
"""bool. Indicates if the feature text needs to be
copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.muteKerning = muteKerning
"""bool. Indicates if the kerning data from this source
needs to be muted (i.e. not be part of the calculations).
MutatorMath only.
"""
self.muteInfo = muteInfo
"""bool. Indicated if the interpolating font.info data for
this source needs to be muted.
MutatorMath only.
"""
self.mutedGlyphNames = mutedGlyphNames or []
"""list. Glyphnames that need to be muted in the
instances.
MutatorMath only.
"""
@property
def location(self):
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + Varlib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setFamilyName(self, familyName, languageCode="en"):
"""Setter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
"""Getter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
return self.localisedFamilyName.get(languageCode)
def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
"""Get the complete design location of this source, from its
:attr:`designLocation` and the document's axis defaults.
.. versionadded:: 5.0
"""
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
else:
result[axis.name] = axis.map_forward(axis.default)
return result
class RuleDescriptor(SimpleDescriptor):
"""Represents the rule descriptor element: a set of glyph substitutions to
trigger conditionally in some parts of the designspace.
.. code:: python
r1 = RuleDescriptor()
r1.name = "unique.rule.name"
r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)])
r1.conditionSets.append([dict(...), dict(...)])
r1.subs.append(("a", "a.alt"))
.. code:: xml
<!-- optional: list of substitution rules -->
<rules>
<rule name="vertical.bars">
<conditionset>
<condition minimum="250.000000" maximum="750.000000" name="weight"/>
<condition minimum="100" name="width"/>
<condition minimum="10" maximum="40" name="optical"/>
</conditionset>
<sub name="cent" with="cent.alt"/>
<sub name="dollar" with="dollar.alt"/>
</rule>
</rules>
"""
_attrs = ['name', 'conditionSets', 'subs'] # what do we need here
def __init__(self, *, name=None, conditionSets=None, subs=None):
self.name = name
"""string. Unique name for this rule. Can be used to reference this rule data."""
# list of lists of dict(name='aaaa', minimum=0, maximum=1000)
self.conditionSets = conditionSets or []
"""a list of conditionsets.
- Each conditionset is a list of conditions.
- Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.
"""
# list of substitutions stored as tuples of glyphnames ("a", "a.alt")
self.subs = subs or []
"""list of substitutions.
- Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt").
- Note: By default, rules are applied first, before other text
shaping/OpenType layout, as they are part of the
`Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_.
See ref:`rules-element` § Attributes.
"""
def evaluateRule(rule, location):
"""Return True if any of the rule's conditionsets matches the given location."""
return any(evaluateConditions(c, location) for c in rule.conditionSets)
def evaluateConditions(conditions, location):
"""Return True if all the conditions matches the given location.
- If a condition has no minimum, check for < maximum.
- If a condition has no maximum, check for > minimum.
"""
for cd in conditions:
value = location[cd['name']]
if cd.get('minimum') is None:
if value > cd['maximum']:
return False
elif cd.get('maximum') is None:
if cd['minimum'] > value:
return False
elif not cd['minimum'] <= value <= cd['maximum']:
return False
return True
def processRules(rules, location, glyphNames):
"""Apply these rules at this location to these glyphnames.
Return a new list of glyphNames with substitutions applied.
- rule order matters
"""
newNames = []
for rule in rules:
if evaluateRule(rule, location):
for name in glyphNames:
swap = False
for a, b in rule.subs:
if name == a:
swap = True
break
if swap:
newNames.append(b)
else:
newNames.append(name)
glyphNames = newNames
newNames = []
return glyphNames
AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]
SimpleLocationDict = Dict[str, float]
class InstanceDescriptor(SimpleDescriptor):
"""Simple container for data related to the instance
.. code:: python
i2 = InstanceDescriptor()
i2.path = instancePath2
i2.familyName = "InstanceFamilyName"
i2.styleName = "InstanceStyleName"
i2.name = "instance.ufo2"
# anisotropic location
i2.designLocation = dict(weight=500, width=(400,300))
i2.postScriptFontName = "InstancePostscriptName"
i2.styleMapFamilyName = "InstanceStyleMapFamilyName"
i2.styleMapStyleName = "InstanceStyleMapStyleName"
i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'
doc.addInstance(i2)
"""
flavor = "instance"
_defaultLanguageCode = "en"
_attrs = ['filename',
'path',
'name',
'locationLabel',
'designLocation',
'userLocation',
'familyName',
'styleName',
'postScriptFontName',
'styleMapFamilyName',
'styleMapStyleName',
'localisedFamilyName',
'localisedStyleName',
'localisedStyleMapFamilyName',
'localisedStyleMapStyleName',
'glyphs',
'kerning',
'info',
'lib']
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
locationLabel=None,
designLocation=None,
userLocation=None,
familyName=None,
styleName=None,
postScriptFontName=None,
styleMapFamilyName=None,
styleMapStyleName=None,
localisedFamilyName=None,
localisedStyleName=None,
localisedStyleMapFamilyName=None,
localisedStyleMapStyleName=None,
glyphs=None,
kerning=True,
info=True,
lib=None,
):
self.filename = filename
"""string. Relative path to the instance file, **as it is
in the document**. The file may or may not exist.
MutatorMath + VarLib.
"""
self.path = path
"""string. Absolute path to the instance file, calculated from
the document path and the string in the filename attr. The file may
or may not exist.
MutatorMath.
"""
self.font = font
"""Same as :attr:`SourceDescriptor.font`
.. seealso:: :attr:`SourceDescriptor.font`
"""
self.name = name
"""string. Unique identifier name of the instance, used to
identify it if it needs to be referenced from elsewhere in the
document.
"""
self.locationLabel = locationLabel
"""Name of a :class:`LocationLabelDescriptor`. If
provided, the instance should have the same location as the
LocationLabel.
.. seealso::
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None else (location or {})
"""dict. Axis values for this instance, in design space coordinates.
MutatorMath + Varlib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.userLocation: SimpleLocationDict = userLocation or {}
"""dict. Axis values for this instance, in user space coordinates.
MutatorMath + Varlib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.familyName = familyName
"""string. Family name of this instance.
MutatorMath + Varlib.
"""
self.styleName = styleName
"""string. Style name of this instance.
MutatorMath + Varlib.
"""
self.postScriptFontName = postScriptFontName
"""string. Postscript fontname for this instance.
MutatorMath + Varlib.
"""
self.styleMapFamilyName = styleMapFamilyName
"""string. StyleMap familyname for this instance.
MutatorMath + Varlib.
"""
self.styleMapStyleName = styleMapStyleName
"""string. StyleMap stylename for this instance.
MutatorMath + Varlib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name
strings, keyed by language code.
"""
self.localisedStyleName = localisedStyleName or {}
"""dict. A dictionary of localised stylename
strings, keyed by language code.
"""
self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {}
"""A dictionary of localised style map
familyname strings, keyed by language code.
"""
self.localisedStyleMapStyleName = localisedStyleMapStyleName or {}
"""A dictionary of localised style map
stylename strings, keyed by language code.
"""
self.glyphs = glyphs or {}
"""dict for special master definitions for glyphs. If glyphs
need special masters (to record the results of executed rules for
example).
MutatorMath.
.. deprecated:: 5.0
Use rules or sparse sources instead.
"""
self.kerning = kerning
""" bool. Indicates if this instance needs its kerning
calculated.
MutatorMath.
.. deprecated:: 5.0
"""
self.info = info
"""bool. Indicated if this instance needs the interpolating
font.info calculated.
.. deprecated:: 5.0
"""
self.lib = lib or {}
"""Custom data associated with this instance."""
@property
def location(self):
"""dict. Axis values for this instance.
MutatorMath + Varlib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setStyleName(self, styleName, languageCode="en"):
"""These methods give easier access to the localised names."""
self.localisedStyleName[languageCode] = tostr(styleName)
def getStyleName(self, languageCode="en"):
return self.localisedStyleName.get(languageCode)
def setFamilyName(self, familyName, languageCode="en"):
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
return self.localisedFamilyName.get(languageCode)
def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"):
self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName)
def getStyleMapStyleName(self, languageCode="en"):
return self.localisedStyleMapStyleName.get(languageCode)
def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"):
self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName)
def getStyleMapFamilyName(self, languageCode="en"):
return self.localisedStyleMapFamilyName.get(languageCode)
def clearLocation(self, axisName: Optional[str] = None):
"""Clear all location-related fields. Ensures that
:attr:``designLocation`` and :attr:``userLocation`` are dictionaries
(possibly empty if clearing everything).
In order to update the location of this instance wholesale, a user
should first clear all the fields, then change the field(s) for which
they have data.
.. code:: python
instance.clearLocation()
instance.designLocation = {'Weight': (34, 36.5), 'Width': 100}
instance.userLocation = {'Opsz': 16}
In order to update a single axis location, the user should only clear
that axis, then edit the values:
.. code:: python
instance.clearLocation('Weight')
instance.designLocation['Weight'] = (34, 36.5)
Args:
axisName: if provided, only clear the location for that axis.
.. versionadded:: 5.0
"""
self.locationLabel = None
if axisName is None:
self.designLocation = {}
self.userLocation = {}
else:
if self.designLocation is None:
self.designLocation = {}
if axisName in self.designLocation:
del self.designLocation[axisName]
if self.userLocation is None:
self.userLocation = {}
if axisName in self.userLocation:
del self.userLocation[axisName]
def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]:
"""Get the :class:`LocationLabelDescriptor` instance that matches
this instances's :attr:`locationLabel`.
Raises if the named label can't be found.
.. versionadded:: 5.0
"""
if self.locationLabel is None:
return None
label = doc.getLocationLabel(self.locationLabel)
if label is None:
raise DesignSpaceDocumentError(
'InstanceDescriptor.getLocationLabelDescriptor(): '
f'unknown location label `{self.locationLabel}` in instance `{self.name}`.'
)
return label
def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
"""Get the complete design location of this instance, by combining data
from the various location fields, default axis values and mappings, and
top-level location labels.
The source of truth for this instance's location is determined for each
axis independently by taking the first not-None field in this list:
- ``locationLabel``: the location along this axis is the same as the
matching STAT format 4 label. No anisotropy.
- ``designLocation[axisName]``: the explicit design location along this
axis, possibly anisotropic.
- ``userLocation[axisName]``: the explicit user location along this
axis. No anisotropy.
- ``axis.default``: default axis value. No anisotropy.
.. versionadded:: 5.0
"""
label = self.getLocationLabelDescriptor(doc)
if label is not None:
return doc.map_forward(label.userLocation) # type: ignore
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
elif axis.name in self.userLocation:
result[axis.name] = axis.map_forward(self.userLocation[axis.name])
else:
result[axis.name] = axis.map_forward(axis.default)
return result
def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
"""Get the complete user location for this instance.
.. seealso:: :meth:`getFullDesignLocation`
.. versionadded:: 5.0
"""
return doc.map_backward(self.getFullDesignLocation(doc))
def tagForAxisName(name):
# try to find or make a tag name for this axis name
names = {
'weight': ('wght', dict(en = 'Weight')),
'width': ('wdth', dict(en = 'Width')),
'optical': ('opsz', dict(en = 'Optical Size')),
'slant': ('slnt', dict(en = 'Slant')),
'italic': ('ital', dict(en = 'Italic')),
}
if name.lower() in names:
return names[name.lower()]
if len(name) < 4:
tag = name + "*" * (4 - len(name))
else:
tag = name[:4]
return tag, dict(en=name)
class AbstractAxisDescriptor(SimpleDescriptor):
flavor = "axis"
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
# opentype tag for this axis
self.tag = tag
"""string. Four letter tag for this axis. Some might be
registered at the `OpenType
specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__.
Privately-defined axis tags must begin with an uppercase letter and
use only uppercase letters or digits.
"""
# name of the axis used in locations
self.name = name
"""string. Name of the axis as it is used in the location dicts.
MutatorMath + Varlib.
"""
# names for UI purposes, if this is not a standard axis,
self.labelNames = labelNames or {}
"""dict. When defining a non-registered axis, it will be
necessary to define user-facing readable names for the axis. Keyed by
xml:lang code. Values are required to be ``unicode`` strings, even if
they only contain ASCII characters.
"""
self.hidden = hidden
"""bool. Whether this axis should be hidden in user interfaces.
"""
self.map = map or []
"""list of input / output values that can describe a warp of user space
to design space coordinates. If no map values are present, it is assumed
user space is the same as design space, as in [(minimum, minimum),
(maximum, maximum)].
Varlib.
"""
self.axisOrdering = axisOrdering
"""STAT table field ``axisOrdering``.
See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_
.. versionadded:: 5.0
"""
self.axisLabels: List[AxisLabelDescriptor] = axisLabels or []
"""STAT table entries for Axis Value Tables format 1, 2, 3.
See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_
.. versionadded:: 5.0
"""
class AxisDescriptor(AbstractAxisDescriptor):
""" Simple container for the axis data.
Add more localisations?
.. code:: python
a1 = AxisDescriptor()
a1.minimum = 1
a1.maximum = 1000
a1.default = 400
a1.name = "weight"
a1.tag = "wght"
a1.labelNames['fa-IR'] = "قطر"
a1.labelNames['en'] = "Wéíght"
a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)]
a1.axisOrdering = 1
a1.axisLabels = [
AxisLabelDescriptor(name="Regular", userValue=400, elidable=True)
]
doc.addAxis(a1)
"""
_attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels']
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
minimum=None,
default=None,
maximum=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.minimum = minimum
"""number. The minimum value for this axis in user space.
MutatorMath + Varlib.
"""
self.maximum = maximum
"""number. The maximum value for this axis in user space.
MutatorMath + Varlib.
"""
self.default = default
"""number. The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
MutatorMath + Varlib.
"""
def serialize(self):
# output to a dict, used in testing
return dict(
tag=self.tag,
name=self.name,
labelNames=self.labelNames,
maximum=self.maximum,
minimum=self.minimum,
default=self.default,
hidden=self.hidden,
map=self.map,
axisOrdering=self.axisOrdering,
axisLabels=self.axisLabels,
)
def map_forward(self, v):
"""Maps value from axis mapping's input (user) to output (design)."""
from fontTools.varLib.models import piecewiseLinearMap
if not self.map:
return v
return piecewiseLinearMap(v, {k: v for k, v in self.map})
def map_backward(self, v):
"""Maps value from axis mapping's output (design) to input (user)."""
from fontTools.varLib.models import piecewiseLinearMap
if isinstance(v, tuple):
v = v[0]
if not self.map:
return v
return piecewiseLinearMap(v, {v: k for k, v in self.map})
class DiscreteAxisDescriptor(AbstractAxisDescriptor):
"""Container for discrete axis data.
Use this for axes that do not interpolate. The main difference from a
continuous axis is that a continuous axis has a ``minimum`` and ``maximum``,
while a discrete axis has a list of ``values``.
Example: an Italic axis with 2 stops, Roman and Italic, that are not
compatible. The axis still allows to bind together the full font family,
which is useful for the STAT table, however it can't become a variation
axis in a VF.
.. code:: python
a2 = DiscreteAxisDescriptor()
a2.values = [0, 1]
a2.default = 0
a2.name = "Italic"
a2.tag = "ITAL"
a2.labelNames['fr'] = "Italique"
a2.map = [(0, 0), (1, -11)]
a2.axisOrdering = 2
a2.axisLabels = [
AxisLabelDescriptor(name="Roman", userValue=0, elidable=True)
]
doc.addAxis(a2)
.. versionadded:: 5.0
"""
flavor = "axis"
_attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels')
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
values=None,
default=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.default: float = default
"""The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
However, this default value is less important than in continuous axes:
- it doesn't define the "neutral" version of outlines from which
deltas would apply, as this axis does not interpolate.
- it doesn't provide the reference glyph set for the designspace, as
fonts at each value can have different glyph sets.
"""
self.values: List[float] = values or []
"""List of possible values for this axis. Contrary to continuous axes,
only the values in this list can be taken by the axis, nothing in-between.
"""
def map_forward(self, value):
"""Maps value from axis mapping's input to output.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
return next((v for k, v in self.map if k == value), value)
def map_backward(self, value):
"""Maps value from axis mapping's output to input.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
if isinstance(value, tuple):
value = value[0]
return next((k for k, v in self.map if v == value), value)
class AxisLabelDescriptor(SimpleDescriptor):
"""Container for axis label data.
Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3).
All values are user values.
See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_
The STAT format of the Axis value depends on which field are filled-in,
see :meth:`getFormat`
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames')
def __init__(
self,
*,
name,
userValue,
userMinimum=None,
userMaximum=None,
elidable=False,
olderSibling=False,
linkedUserValue=None,
labelNames=None,
):
self.userMinimum: Optional[float] = userMinimum
"""STAT field ``rangeMinValue`` (format 2)."""
self.userValue: float = userValue
"""STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2)."""
self.userMaximum: Optional[float] = userMaximum
"""STAT field ``rangeMaxValue`` (format 2)."""
self.name: str = name
"""Label for this axis location, STAT field ``valueNameID``."""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.linkedUserValue: Optional[float] = linkedUserValue
"""STAT field ``linkedValue`` (format 3)."""
self.labelNames: MutableMapping[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
``xml:lang`` code.
"""
def getFormat(self) -> int:
"""Determine which format of STAT Axis value to use to encode this label.
=========== ========= =========== =========== ===============
STAT Format userValue userMinimum userMaximum linkedUserValue
=========== ========= =========== =========== ===============
1 ✅ ❌ ❌ ❌
2 ✅ ✅ ✅ ❌
3 ✅ ❌ ❌ ✅
=========== ========= =========== =========== ===============
"""
if self.linkedUserValue is not None:
return 3
if self.userMinimum is not None or self.userMaximum is not None:
return 2
return 1
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
class LocationLabelDescriptor(SimpleDescriptor):
"""Container for location label data.
Analogue of OpenType's STAT data for a free-floating location (format 4).
All values are user values.
See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames')
def __init__(
self,
*,
name,
userLocation,
elidable=False,
olderSibling=False,
labelNames=None,
):
self.name: str = name
"""Label for this named location, STAT field ``valueNameID``."""
self.userLocation: SimpleLocationDict = userLocation or {}
"""Location in user coordinates along each axis.
If an axis is not mentioned, it is assumed to be at its default location.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullUserLocation`
"""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.labelNames: Dict[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
xml:lang code.
"""
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
"""Get the complete user location of this label, by combining data
from the explicit user location and default axis values.
.. versionadded:: 5.0
"""
return {
axis.name: self.userLocation.get(axis.name, axis.default)
for axis in doc.axes
}
class VariableFontDescriptor(SimpleDescriptor):
"""Container for variable fonts, sub-spaces of the Designspace.
Use-cases:
- From a single DesignSpace with discrete axes, define 1 variable font
per value on the discrete axes. Before version 5, you would have needed
1 DesignSpace per such variable font, and a lot of data duplication.
- From a big variable font with many axes, define subsets of that variable
font that only include some axes and freeze other axes at a given location.
.. versionadded:: 5.0
"""
flavor = "variable-font"
_attrs = ('filename', 'axisSubsets', 'lib')
filename = posixpath_property("_filename")
def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):
self.name: str = name
"""string, required. Name of this variable to identify it during the
build process and from other parts of the document, and also as a
filename in case the filename property is empty.
VarLib.
"""
self.filename: str = filename
"""string, optional. Relative path to the variable font file, **as it is
in the document**. The file may or may not exist.
If not specified, the :attr:`name` will be used as a basename for the file.
"""
self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or []
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).
"""
self.lib: MutableMapping[str, Any] = lib or {}
"""Custom data associated with this variable font."""
class RangeAxisSubsetDescriptor(SimpleDescriptor):
"""Subset of a continuous axis to include in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum')
def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
"""New minimum value of the axis in the target variable font.
If not specified, assume the same minimum value as the full axis.
(default = ``-math.inf``)
"""
self.userDefault: Optional[float] = userDefault
"""New default value of the axis in the target variable font.
If not specified, assume the same default value as the full axis.
(default = ``None``)
"""
self.userMaximum: float = userMaximum
"""New maximum value of the axis in the target variable font.
If not specified, assume the same maximum value as the full axis.
(default = ``math.inf``)
"""
class ValueAxisSubsetDescriptor(SimpleDescriptor):
"""Single value of a discrete or continuous axis to use in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userValue')
def __init__(self, *, name, userValue):
self.name: str = name
"""Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`
to "snapshot" or "freeze".
"""
self.userValue: float = userValue
"""Value in user coordinates at which to freeze the given axis."""
class BaseDocWriter(object):
_whiteSpace = " "
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
@classmethod
def getAxisDecriptor(cls):
return cls.axisDescriptorClass()
@classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@classmethod
def getInstanceDescriptor(cls):
return cls.instanceDescriptorClass()
@classmethod
def getRuleDescriptor(cls):
return cls.ruleDescriptorClass()
def __init__(self, documentPath, documentObject: DesignSpaceDocument):
self.path = documentPath
self.documentObject = documentObject
self.effectiveFormatTuple = self._getEffectiveFormatTuple()
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple)
if self.documentObject.axes or self.documentObject.elidedFallbackName is not None:
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
self._addLocationLabel(labelsElement, labelObject)
self.root.append(labelsElement)
if self.documentObject.rules:
if getattr(self.documentObject, "rulesProcessingLast", False):
attributes = {"processing": "last"}
else:
attributes = {}
self.root.append(ET.Element("rules", attributes))
for ruleObject in self.documentObject.rules:
self._addRule(ruleObject)
if self.documentObject.sources:
self.root.append(ET.Element("sources"))
for sourceObject in self.documentObject.sources:
self._addSource(sourceObject)
if self.documentObject.variableFonts:
variableFontsElement = ET.Element("variable-fonts")
for variableFont in self.documentObject.variableFonts:
self._addVariableFont(variableFontsElement, variableFont)
self.root.append(variableFontsElement)
if self.documentObject.instances:
self.root.append(ET.Element("instances"))
for instanceObject in self.documentObject.instances:
self._addInstance(instanceObject)
if self.documentObject.lib:
self._addLib(self.root, self.documentObject.lib, 2)
tree = ET.ElementTree(self.root)
tree.write(
self.path,
encoding=encoding,
method='xml',
xml_declaration=xml_declaration,
pretty_print=pretty,
)
def _getEffectiveFormatTuple(self):
"""Try to use the version specified in the document, or a sufficiently
recent version to be able to encode what the document contains.
"""
minVersion = self.documentObject.formatTuple
if (
any(
hasattr(axis, 'values') or
axis.axisOrdering is not None or
axis.axisLabels
for axis in self.documentObject.axes
) or
self.documentObject.locationLabels or
any(
source.localisedFamilyName
for source in self.documentObject.sources
) or
self.documentObject.variableFonts or
any(
instance.locationLabel or
instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
""" Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib['name'] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = dimensionName
if type(dimensionValue) == tuple:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0])
dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
return ("%f" % num).rstrip('0').rstrip('.')
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
ruleElement = ET.Element('rule')
if ruleObject.name is not None:
ruleElement.attrib['name'] = ruleObject.name
for conditions in ruleObject.conditionSets:
conditionsetElement = ET.Element('conditionset')
for cond in conditions:
if cond.get('minimum') is None and cond.get('maximum') is None:
# neither is defined, don't add this condition
continue
conditionElement = ET.Element('condition')
conditionElement.attrib['name'] = cond.get('name')
if cond.get('minimum') is not None:
conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum'))
if cond.get('maximum') is not None:
conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum'))
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
subElement = ET.Element('sub')
subElement.attrib['name'] = sub[0]
subElement.attrib['with'] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
self.root.findall('.rules')[0].append(ruleElement)
def _addAxis(self, axisObject):
axisElement = ET.Element('axis')
axisElement.attrib['tag'] = axisObject.tag
axisElement.attrib['name'] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
mapElement = ET.Element('map')
mapElement.attrib['input'] = self.intOrFloat(inputValue)
mapElement.attrib['output'] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering or axisObject.axisLabels:
labelsElement = ET.Element('labels')
if axisObject.axisOrdering is not None:
labelsElement.attrib['ordering'] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if hasattr(axisObject, "minimum"):
axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum)
axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum)
elif hasattr(axisObject, "values"):
axisElement.attrib['values'] = " ".join(self.intOrFloat(v) for v in axisObject.values)
axisElement.attrib['default'] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
axisElement.attrib['hidden'] = "1"
self.root.findall('.axes')[0].append(axisElement)
def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum)
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
if label.linkedUserValue is not None:
labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue)
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
languageElement = ET.Element('labelname')
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
def _addLocationElement(
self,
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
userLocation: SimpleLocationDict = None
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
dimElement.attrib['xvalue'] = self.intOrFloat(value[0])
dimElement.attrib['yvalue'] = self.intOrFloat(value[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = userLocation[axis.name]
dimElement.attrib['uservalue'] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
instanceElement = ET.Element('instance')
if instanceObject.name is not None:
instanceElement.attrib['name'] = instanceObject.name
if instanceObject.locationLabel is not None:
instanceElement.attrib['location'] = instanceObject.locationLabel
if instanceObject.familyName is not None:
instanceElement.attrib['familyname'] = instanceObject.familyName
if instanceObject.styleName is not None:
instanceElement.attrib['stylename'] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedStyleNameElement = ET.Element('stylename')
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
if instanceObject.localisedFamilyName:
languageCodes = list(instanceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
if instanceObject.localisedStyleMapStyleName:
languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapStyleNameElement = ET.Element('stylemapstylename')
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code)
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname')
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code)
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
if instanceObject.locationLabel is None:
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
userLocation=instanceObject.userLocation
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location)
instanceElement.append(locationElement)
if instanceObject.filename is not None:
instanceElement.attrib['filename'] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName
if instanceObject.styleMapFamilyName is not None:
instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName
if instanceObject.styleMapStyleName is not None:
instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
if instanceElement.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
instanceElement.append(glyphsElement)
glyphsElement = instanceElement.findall('.glyphs')[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data)
glyphsElement.append(glyphElement)
if instanceObject.kerning:
kerningElement = ET.Element('kerning')
instanceElement.append(kerningElement)
if instanceObject.info:
infoElement = ET.Element('info')
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
self.root.findall('.instances')[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
sourceElement.attrib['filename'] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
sourceElement.attrib['name'] = sourceObject.name
if sourceObject.familyName is not None:
sourceElement.attrib['familyname'] = sourceObject.familyName
if sourceObject.styleName is not None:
sourceElement.attrib['stylename'] = sourceObject.styleName
if sourceObject.layerName is not None:
sourceElement.attrib['layer'] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
libElement = ET.Element('lib')
libElement.attrib['copy'] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
groupsElement = ET.Element('groups')
groupsElement.attrib['copy'] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
featuresElement = ET.Element('features')
featuresElement.attrib['copy'] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
infoElement = ET.Element('info')
if sourceObject.copyInfo:
infoElement.attrib['copy'] = "1"
if sourceObject.muteInfo:
infoElement.attrib['mute'] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = '1'
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = '1'
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
self._addLocationElement(sourceElement, designLocation=sourceObject.location)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location)
sourceElement.append(locationElement)
self.root.findall('.sources')[0].append(sourceElement)
def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None:
vfElement = ET.Element('variable-font')
vfElement.attrib['name'] = vf.name
if vf.filename is not None:
vfElement.attrib['filename'] = vf.filename
if vf.axisSubsets:
subsetsElement = ET.Element('axis-subsets')
for subset in vf.axisSubsets:
subsetElement = ET.Element('axis-subset')
subsetElement.attrib['name'] = subset.name
# Mypy doesn't support narrowing union types via hasattr()
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
# TODO(Python 3.10): use TypeGuard
if hasattr(subset, "userMinimum"):
subset = cast(RangeAxisSubsetDescriptor, subset)
if subset.userMinimum != -math.inf:
subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum)
if subset.userMaximum != math.inf:
subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum)
if subset.userDefault is not None:
subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault)
elif hasattr(subset, "userValue"):
subset = cast(ValueAxisSubsetDescriptor, subset)
subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue)
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
parentElement.append(vfElement)
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
libElement = ET.Element('lib')
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
glyphElement = ET.Element('glyph')
if data.get('mute'):
glyphElement.attrib['mute'] = "1"
if data.get('unicodes') is not None:
glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')])
if data.get('instanceLocation') is not None:
locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation'))
glyphElement.append(locationElement)
if glyphName is not None:
glyphElement.attrib['name'] = glyphName
if data.get('note') is not None:
noteElement = ET.Element('note')
noteElement.text = data.get('note')
glyphElement.append(noteElement)
if data.get('masters') is not None:
mastersElement = ET.Element("masters")
for m in data.get('masters'):
masterElement = ET.Element("master")
if m.get('glyphName') is not None:
masterElement.attrib['glyphname'] = m.get('glyphName')
if m.get('font') is not None:
masterElement.attrib['source'] = m.get('font')
if m.get('location') is not None:
locationElement, m['location'] = self._makeLocationElement(m.get('location'))
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
return glyphElement
class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontsDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject):
self.path = documentPath
self.documentObject = documentObject
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self._axes = []
self.rules = []
self.sources = []
self.instances = []
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
self.readLabels()
self.readRules()
self.readVariableFonts()
self.readSources()
self.readInstances()
self.readLib()
def readRules(self):
# we also need to read any conditions that are outside of a condition set.
rules = []
rulesElement = self.root.find(".rules")
if rulesElement is not None:
processingValue = rulesElement.attrib.get("processing", "first")
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
"<rules> processing attribute value is not valid: %r, "
"expected 'first' or 'last'" % processingValue)
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib.get("name")
# read any stray conditions outside a condition set
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
# read the conditionsets
for conditionSetElement in ruleElement.findall('.conditionset'):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
for subElement in ruleElement.findall('.sub'):
a = subElement.attrib['name']
b = subElement.attrib['with']
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
for conditionElement in parentElement.findall('.condition'):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd['minimum'] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd['minimum'] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd['maximum'] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd['maximum'] = None
cd['name'] = conditionElement.attrib.get("name")
# # test for things
if cd.get('minimum') is None and cd.get('maximum') is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule" +
(" '%s'" % ruleName if ruleName is not None else ""))
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
if axesElement is not None and 'elidedfallbackname' in axesElement.attrib:
self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname']
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
if self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib:
axisObject = self.discreteAxisDescriptorClass()
axisObject.values = [float(s) for s in axisElement.attrib["values"].split(" ")]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
if axisElement.attrib.get('hidden', False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
for mapElement in axisElement.findall('map'):
a = float(mapElement.attrib['input'])
b = float(mapElement.attrib['output'])
axisObject.map.append((a, b))
for labelNameElement in axisElement.findall('labelname'):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
if key == XML_LANG:
axisObject.labelNames[lang] = tostr(labelNameElement.text)
labelElement = axisElement.find(".labels")
if labelElement is not None:
if "ordering" in labelElement.attrib:
axisObject.axisOrdering = int(labelElement.attrib["ordering"])
for label in labelElement.findall(".label"):
axisObject.axisLabels.append(self.readAxisLabel(label))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
def readAxisLabel(self, element: ET.Element):
xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
raise DesignSpaceDocumentError("label element must have a uservalue attribute.")
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
maximumStr = element.get("usermaximum")
maximum = float(maximumStr) if maximumStr is not None else None
linkedValueStr = element.get("linkeduservalue")
linkedValue = float(linkedValueStr) if linkedValueStr is not None else None
elidable = True if element.get("elidable") == "true" else False
olderSibling = True if element.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in element.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
return self.axisLabelDescriptorClass(
name=name,
userValue=value,
userMinimum=minimum,
userMaximum=maximum,
elidable=elidable,
olderSibling=olderSibling,
linkedUserValue=linkedValue,
labelNames=labelNames,
)
def readLabels(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'elidable', 'oldersibling'}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"Label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = labelElement.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
raise DesignSpaceDocumentError(f'<label> element "{name}" must only have user locations (using uservalue="").')
elidable = True if labelElement.get("elidable") == "true" else False
olderSibling = True if labelElement.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in labelElement.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
locationLabel = self.locationLabelDescriptorClass(
name=name,
userLocation=userLocation,
elidable=elidable,
olderSibling=olderSibling,
labelNames=labelNames,
)
self.documentObject.locationLabels.append(locationLabel)
def readVariableFonts(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'filename'}
for variableFontElement in self.root.findall(".variable-fonts/variable-font"):
unknown_attrs = set(variableFontElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}")
name = variableFontElement.get("name")
if name is None:
raise DesignSpaceDocumentError("variable-font element must have a name attribute.")
filename = variableFontElement.get("filename")
axisSubsetsElement = variableFontElement.find(".axis-subsets")
if axisSubsetsElement is None:
raise DesignSpaceDocumentError("variable-font element must contain an axis-subsets element.")
axisSubsets = []
for axisSubset in axisSubsetsElement.iterfind(".axis-subset"):
axisSubsets.append(self.readAxisSubset(axisSubset))
lib = None
libElement = variableFontElement.find(".lib")
if libElement is not None:
lib = plistlib.fromtree(libElement[0])
variableFont = self.variableFontsDescriptorClass(
name=name,
filename=filename,
axisSubsets=axisSubsets,
lib=lib,
)
self.documentObject.variableFonts.append(variableFont)
def readAxisSubset(self, element: ET.Element):
if "uservalue" in element.attrib:
xml_attrs = {'name', 'uservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
userValueStr = element.get("uservalue")
if userValueStr is None:
raise DesignSpaceDocumentError(
"The axis-subset element for a discrete subset must have a uservalue attribute."
)
userValue = float(userValueStr)
return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue)
else:
xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
userMinimum = element.get("userminimum")
userDefault = element.get("userdefault")
userMaximum = element.get("usermaximum")
if userMinimum is not None and userDefault is not None and userMaximum is not None:
return self.rangeAxisSubsetDescriptorClass(
name=name,
userMinimum=float(userMinimum),
userDefault=float(userDefault),
userMaximum=float(userMaximum),
)
if all(v is None for v in (userMinimum, userDefault, userMaximum)):
return self.rangeAxisSubsetDescriptorClass(name=name)
raise DesignSpaceDocumentError(
"axis-subset element must have min/max/default values or none at all."
)
def readSources(self):
for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")):
filename = sourceElement.attrib.get('filename')
if filename is not None and self.path is not None:
sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))
else:
sourcePath = None
sourceName = sourceElement.attrib.get('name')
if sourceName is None:
# add a temporary source name
sourceName = "temp_master.%d" % (sourceCount)
sourceObject = self.sourceDescriptorClass()
sourceObject.path = sourcePath # absolute path to the ufo source
sourceObject.filename = filename # path as it is stored in the document
sourceObject.name = sourceName
familyName = sourceElement.attrib.get("familyname")
if familyName is not None:
sourceObject.familyName = familyName
styleName = sourceElement.attrib.get("stylename")
if styleName is not None:
sourceObject.styleName = styleName
for familyNameElement in sourceElement.findall('familyname'):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
sourceObject.setFamilyName(familyName, lang)
designLocation, userLocation = self.locationFromElement(sourceElement)
if userLocation:
raise DesignSpaceDocumentError(f'<source> element "{sourceName}" must only have design locations (using xvalue="").')
sourceObject.location = designLocation
layerName = sourceElement.attrib.get('layer')
if layerName is not None:
sourceObject.layerName = layerName
for libElement in sourceElement.findall('.lib'):
if libElement.attrib.get('copy') == '1':
sourceObject.copyLib = True
for groupsElement in sourceElement.findall('.groups'):
if groupsElement.attrib.get('copy') == '1':
sourceObject.copyGroups = True
for infoElement in sourceElement.findall(".info"):
if infoElement.attrib.get('copy') == '1':
sourceObject.copyInfo = True
if infoElement.attrib.get('mute') == '1':
sourceObject.muteInfo = True
for featuresElement in sourceElement.findall(".features"):
if featuresElement.attrib.get('copy') == '1':
sourceObject.copyFeatures = True
for glyphElement in sourceElement.findall(".glyph"):
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
continue
if glyphElement.attrib.get('mute') == '1':
sourceObject.mutedGlyphNames.append(glyphName)
for kerningElement in sourceElement.findall(".kerning"):
if kerningElement.attrib.get('mute') == '1':
sourceObject.muteKerning = True
self.documentObject.sources.append(sourceObject)
def locationFromElement(self, element):
"""Read a nested ``<location>`` element inside the given ``element``.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
elementLocation = (None, None)
for locationElement in element.findall('.location'):
elementLocation = self.readLocationElement(locationElement)
break
return elementLocation
def readLocationElement(self, locationElement):
"""Read a ``<location>`` element.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
if self._strictAxisNames and not self.documentObject.axes:
raise DesignSpaceDocumentError("No axes defined")
userLoc = {}
designLoc = {}
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
self.log.warning("Location with undefined axis: \"%s\".", dimName)
continue
userValue = xValue = yValue = None
try:
userValue = dimensionElement.attrib.get('uservalue')
if userValue is not None:
userValue = float(userValue)
except ValueError:
self.log.warning("ValueError in readLocation userValue %3.3f", userValue)
try:
xValue = dimensionElement.attrib.get('xvalue')
if xValue is not None:
xValue = float(xValue)
except ValueError:
self.log.warning("ValueError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get('yvalue')
if yValue is not None:
yValue = float(yValue)
except ValueError:
self.log.warning("ValueError in readLocation yValue %3.3f", yValue)
if userValue is None == xValue is None:
raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"')
if yValue is not None:
if xValue is None:
raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"')
designLoc[dimName] = (xValue, yValue)
elif xValue is not None:
designLoc[dimName] = xValue
else:
userLoc[dimName] = userValue
return designLoc, userLoc
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
instanceElements = self.root.findall('.instances/instance')
for instanceElement in instanceElements:
self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)
def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True):
filename = instanceElement.attrib.get('filename')
if filename is not None and self.documentObject.path is not None:
instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename)
else:
instancePath = None
instanceObject = self.instanceDescriptorClass()
instanceObject.path = instancePath # absolute path to the instance
instanceObject.filename = filename # path as it is stored in the document
name = instanceElement.attrib.get("name")
if name is not None:
instanceObject.name = name
familyname = instanceElement.attrib.get('familyname')
if familyname is not None:
instanceObject.familyName = familyname
stylename = instanceElement.attrib.get('stylename')
if stylename is not None:
instanceObject.styleName = stylename
postScriptFontName = instanceElement.attrib.get('postscriptfontname')
if postScriptFontName is not None:
instanceObject.postScriptFontName = postScriptFontName
styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname')
if styleMapFamilyName is not None:
instanceObject.styleMapFamilyName = styleMapFamilyName
styleMapStyleName = instanceElement.attrib.get('stylemapstylename')
if styleMapStyleName is not None:
instanceObject.styleMapStyleName = styleMapStyleName
# read localised names
for styleNameElement in instanceElement.findall('stylename'):
for key, lang in styleNameElement.items():
if key == XML_LANG:
styleName = styleNameElement.text
instanceObject.setStyleName(styleName, lang)
for familyNameElement in instanceElement.findall('familyname'):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
instanceObject.setFamilyName(familyName, lang)
for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'):
for key, lang in styleMapStyleNameElement.items():
if key == XML_LANG:
styleMapStyleName = styleMapStyleNameElement.text
instanceObject.setStyleMapStyleName(styleMapStyleName, lang)
for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'):
for key, lang in styleMapFamilyNameElement.items():
if key == XML_LANG:
styleMapFamilyName = styleMapFamilyNameElement.text
instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang)
designLocation, userLocation = self.locationFromElement(instanceElement)
locationLabel = instanceElement.attrib.get('location')
if (designLocation or userLocation) and locationLabel is not None:
raise DesignSpaceDocumentError('instance element must have at most one of the location="..." attribute or the nested location element')
instanceObject.locationLabel = locationLabel
instanceObject.userLocation = userLocation or {}
instanceObject.designLocation = designLocation or {}
for glyphElement in instanceElement.findall('.glyphs/glyph'):
self.readGlyphElement(glyphElement, instanceObject)
for infoElement in instanceElement.findall("info"):
self.readInfoElement(infoElement, instanceObject)
for libElement in instanceElement.findall('lib'):
self.readLibElement(libElement, instanceObject)
self.documentObject.instances.append(instanceObject)
def readLibElement(self, libElement, instanceObject):
"""Read the lib element for the given instance."""
instanceObject.lib = plistlib.fromtree(libElement[0])
def readInfoElement(self, infoElement, instanceObject):
""" Read the info element."""
instanceObject.info = True
def readGlyphElement(self, glyphElement, instanceObject):
"""
Read the glyph element, which could look like either one of these:
.. code-block:: xml
<glyph name="b" unicode="0x62"/>
<glyph name="b"/>
<glyph name="b">
<master location="location-token-bbb" source="master-token-aaa2"/>
<master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/>
<note>
This is an instance from an anisotropic interpolation.
</note>
</glyph>
"""
glyphData = {}
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
raise DesignSpaceDocumentError("Glyph object without name attribute")
mute = glyphElement.attrib.get("mute")
if mute == "1":
glyphData['mute'] = True
# unicode
unicodes = glyphElement.attrib.get('unicode')
if unicodes is not None:
try:
unicodes = [int(u, 16) for u in unicodes.split(" ")]
glyphData['unicodes'] = unicodes
except ValueError:
raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes)
for noteElement in glyphElement.findall('.note'):
glyphData['note'] = noteElement.text
break
designLocation, userLocation = self.locationFromElement(glyphElement)
if userLocation:
raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").')
if designLocation is not None:
glyphData['instanceLocation'] = designLocation
glyphSources = None
for masterElement in glyphElement.findall('.masters/master'):
fontSourceName = masterElement.attrib.get('source')
designLocation, userLocation = self.locationFromElement(masterElement)
if userLocation:
raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").')
masterGlyphName = masterElement.attrib.get('glyphname')
if masterGlyphName is None:
# if we don't read a glyphname, use the one we have
masterGlyphName = glyphName
d = dict(font=fontSourceName,
location=designLocation,
glyphName=masterGlyphName)
if glyphSources is None:
glyphSources = []
glyphSources.append(d)
if glyphSources is not None:
glyphData['masters'] = glyphSources
instanceObject.glyphs[glyphName] = glyphData
def readLib(self):
"""Read the lib element for the whole document."""
for libElement in self.root.findall(".lib"):
self.documentObject.lib = plistlib.fromtree(libElement[0])
class DesignSpaceDocument(LogMixin, AsDictMixin):
"""The DesignSpaceDocument object can read and write ``.designspace`` data.
It imports the axes, sources, variable fonts and instances to very basic
**descriptor** objects that store the data in attributes. Data is added to
the document by creating such descriptor objects, filling them with data
and then adding them to the document. This makes it easy to integrate this
object in different contexts.
The **DesignSpaceDocument** object can be subclassed to work with
different objects, as long as they have the same attributes. Reader and
Writer objects can be subclassed as well.
**Note:** Python attribute names are usually camelCased, the
corresponding `XML <document-xml-structure>`_ attributes are usually
all lowercase.
.. code:: python
from fontTools.designspaceLib import DesignSpaceDocument
doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace")
doc.formatVersion
doc.elidedFallbackName
doc.axes
doc.locationLabels
doc.rules
doc.rulesProcessingLast
doc.sources
doc.variableFonts
doc.instances
doc.lib
"""
def __init__(self, readerClass=None, writerClass=None):
self.path = None
"""String, optional. When the document is read from the disk, this is
the full path that was given to :meth:`read` or :meth:`fromfile`.
"""
self.filename = None
"""String, optional. When the document is read from the disk, this is
its original file name, i.e. the last part of its path.
When the document is produced by a Python script and still only exists
in memory, the producing script can write here an indication of a
possible "good" filename, in case one wants to save the file somewhere.
"""
self.formatVersion: Optional[str] = None
"""Format version for this document, as a string. E.g. "4.0" """
self.elidedFallbackName: Optional[str] = None
"""STAT Style Attributes Header field ``elidedFallbackNameID``.
See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_
.. versionadded:: 5.0
"""
self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = []
"""List of this document's axes."""
self.locationLabels: List[LocationLabelDescriptor] = []
"""List of this document's STAT format 4 labels.
.. versionadded:: 5.0"""
self.rules: List[RuleDescriptor] = []
"""List of this document's rules."""
self.rulesProcessingLast: bool = False
"""This flag indicates whether the substitution rules should be applied
before or after other glyph substitution features.
- False: before
- True: after.
Default is False. For new projects, you probably want True. See
the following issues for more information:
`fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__
`fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__
If you want to use a different feature altogether, e.g. ``calt``,
use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag``
.. code:: xml
<lib>
<dict>
<key>com.github.fonttools.varLib.featureVarsFeatureTag</key>
<string>calt</string>
</dict>
</lib>
"""
self.sources: List[SourceDescriptor] = []
"""List of this document's sources."""
self.variableFonts: List[VariableFontDescriptor] = []
"""List of this document's variable fonts.
.. versionadded:: 5.0"""
self.instances: List[InstanceDescriptor] = []
"""List of this document's instances."""
self.lib: Dict = {}
"""User defined, custom data associated with the whole document.
Use reverse-DNS notation to identify your own data.
Respect the data stored by others.
"""
self.default: Optional[str] = None
"""Name of the default master.
This attribute is updated by the :meth:`findDefault`
"""
if readerClass is not None:
self.readerClass = readerClass
else:
self.readerClass = BaseDocReader
if writerClass is not None:
self.writerClass = writerClass
else:
self.writerClass = BaseDocWriter
@classmethod
def fromfile(cls, path, readerClass=None, writerClass=None):
"""Read a designspace file from ``path`` and return a new instance of
:class:.
"""
self = cls(readerClass=readerClass, writerClass=writerClass)
self.read(path)
return self
@classmethod
def fromstring(cls, string, readerClass=None, writerClass=None):
self = cls(readerClass=readerClass, writerClass=writerClass)
reader = self.readerClass.fromstring(string, self)
reader.read()
if self.sources:
self.findDefault()
return self
def tostring(self, encoding=None):
"""Returns the designspace as a string. Default encoding ``utf-8``."""
if encoding is str or (
encoding is not None and encoding.lower() == "unicode"
):
f = StringIO()
xml_declaration = False
elif encoding is None or encoding == "utf-8":
f = BytesIO()
encoding = "UTF-8"
xml_declaration = True
else:
raise ValueError("unsupported encoding: '%s'" % encoding)
writer = self.writerClass(f, self)
writer.write(encoding=encoding, xml_declaration=xml_declaration)
return f.getvalue()
def read(self, path):
"""Read a designspace file from ``path`` and populates the fields of
``self`` with the data.
"""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
reader = self.readerClass(path, self)
reader.read()
if self.sources:
self.findDefault()
def write(self, path):
"""Write this designspace to ``path``."""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
self.updatePaths()
writer = self.writerClass(path, self)
writer.write()
def _posixRelativePath(self, otherPath):
relative = os.path.relpath(otherPath, os.path.dirname(self.path))
return posix(relative)
def updatePaths(self):
"""
Right before we save we need to identify and respond to the following situations:
In each descriptor, we have to do the right thing for the filename attribute.
::
case 1.
descriptor.filename == None
descriptor.path == None
-- action:
write as is, descriptors will not have a filename attr.
useless, but no reason to interfere.
case 2.
descriptor.filename == "../something"
descriptor.path == None
-- action:
write as is. The filename attr should not be touched.
case 3.
descriptor.filename == None
descriptor.path == "~/absolute/path/there"
-- action:
calculate the relative path for filename.
We're not overwriting some other value for filename, it should be fine
case 4.
descriptor.filename == '../somewhere'
descriptor.path == "~/absolute/path/there"
-- action:
there is a conflict between the given filename, and the path.
So we know where the file is relative to the document.
Can't guess why they're different, we just choose for path to be correct and update filename.
"""
assert self.path is not None
for descriptor in self.sources + self.instances:
if descriptor.path is not None:
# case 3 and 4: filename gets updated and relativized
descriptor.filename = self._posixRelativePath(descriptor.path)
def addSource(self, sourceDescriptor: SourceDescriptor):
"""Add the given ``sourceDescriptor`` to ``doc.sources``."""
self.sources.append(sourceDescriptor)
def addSourceDescriptor(self, **kwargs):
"""Instantiate a new :class:`SourceDescriptor` using the given
``kwargs`` and add it to ``doc.sources``.
"""
source = self.writerClass.sourceDescriptorClass(**kwargs)
self.addSource(source)
return source
def addInstance(self, instanceDescriptor: InstanceDescriptor):
"""Add the given ``instanceDescriptor`` to :attr:`instances`."""
self.instances.append(instanceDescriptor)
def addInstanceDescriptor(self, **kwargs):
"""Instantiate a new :class:`InstanceDescriptor` using the given
``kwargs`` and add it to :attr:`instances`.
"""
instance = self.writerClass.instanceDescriptorClass(**kwargs)
self.addInstance(instance)
return instance
def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]):
"""Add the given ``axisDescriptor`` to :attr:`axes`."""
self.axes.append(axisDescriptor)
def addAxisDescriptor(self, **kwargs):
"""Instantiate a new :class:`AxisDescriptor` using the given
``kwargs`` and add it to :attr:`axes`.
The axis will be and instance of :class:`DiscreteAxisDescriptor` if
the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise.
"""
if "values" in kwargs:
axis = self.writerClass.discreteAxisDescriptorClass(**kwargs)
else:
axis = self.writerClass.axisDescriptorClass(**kwargs)
self.addAxis(axis)
return axis
def addRule(self, ruleDescriptor: RuleDescriptor):
"""Add the given ``ruleDescriptor`` to :attr:`rules`."""
self.rules.append(ruleDescriptor)
def addRuleDescriptor(self, **kwargs):
"""Instantiate a new :class:`RuleDescriptor` using the given
``kwargs`` and add it to :attr:`rules`.
"""
rule = self.writerClass.ruleDescriptorClass(**kwargs)
self.addRule(rule)
return rule
def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor):
"""Add the given ``variableFontDescriptor`` to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
self.variableFonts.append(variableFontDescriptor)
def addVariableFontDescriptor(self, **kwargs):
"""Instantiate a new :class:`VariableFontDescriptor` using the given
``kwargs`` and add it to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
variableFont = self.writerClass.variableFontDescriptorClass(**kwargs)
self.addVariableFont(variableFont)
return variableFont
def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor):
"""Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
self.locationLabels.append(locationLabelDescriptor)
def addLocationLabelDescriptor(self, **kwargs):
"""Instantiate a new :class:`LocationLabelDescriptor` using the given
``kwargs`` and add it to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs)
self.addLocationLabel(locationLabel)
return locationLabel
def newDefaultLocation(self):
"""Return a dict with the default location in design space coordinates."""
# Without OrderedDict, output XML would be non-deterministic.
# https://github.com/LettError/designSpaceDocument/issues/10
loc = collections.OrderedDict()
for axisDescriptor in self.axes:
loc[axisDescriptor.name] = axisDescriptor.map_forward(
axisDescriptor.default
)
return loc
def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]:
"""Return the :class:`LocationLabel` that matches the given
``userLocation``, or ``None`` if no such label exists.
.. versionadded:: 5.0
"""
return next(
(label for label in self.locationLabels if label.userLocation == userLocation), None
)
def updateFilenameFromPath(self, masters=True, instances=True, force=False):
"""Set a descriptor filename attr from the path and this document path.
If the filename attribute is not None: skip it.
"""
if masters:
for descriptor in self.sources:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
if instances:
for descriptor in self.instances:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
def newAxisDescriptor(self):
"""Ask the writer class to make us a new axisDescriptor."""
return self.writerClass.getAxisDecriptor()
def newSourceDescriptor(self):
"""Ask the writer class to make us a new sourceDescriptor."""
return self.writerClass.getSourceDescriptor()
def newInstanceDescriptor(self):
"""Ask the writer class to make us a new instanceDescriptor."""
return self.writerClass.getInstanceDescriptor()
def getAxisOrder(self):
"""Return a list of axis names, in the same order as defined in the document."""
names = []
for axisDescriptor in self.axes:
names.append(axisDescriptor.name)
return names
def getAxis(self, name: str) -> AxisDescriptor | DiscreteAxisDescriptor | None:
"""Return the axis with the given ``name``, or ``None`` if no such axis exists."""
return next((axis for axis in self.axes if axis.name == name), None)
def getAxisByTag(self, tag: str) -> AxisDescriptor | DiscreteAxisDescriptor | None:
"""Return the axis with the given ``tag``, or ``None`` if no such axis exists."""
return next((axis for axis in self.axes if axis.tag == tag), None)
def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]:
"""Return the top-level location label with the given ``name``, or
``None`` if no such label exists.
.. versionadded:: 5.0
"""
for label in self.locationLabels:
if label.name == name:
return label
return None
def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict:
"""Map a user location to a design location.
Assume that missing coordinates are at the default location for that axis.
Note: the output won't be anisotropic, only the xvalue is set.
.. versionadded:: 5.0
"""
return {
axis.name: axis.map_forward(userLocation.get(axis.name, axis.default))
for axis in self.axes
}
def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict:
"""Map a design location to a user location.
Assume that missing coordinates are at the default location for that axis.
When the input has anisotropic locations, only the xvalue is used.
.. versionadded:: 5.0
"""
return {
axis.name: (
axis.map_backward(designLocation[axis.name])
if axis.name in designLocation
else axis.default
)
for axis in self.axes
}
def findDefault(self):
"""Set and return SourceDescriptor at the default location or None.
The default location is the set of all `default` values in user space
of all axes.
This function updates the document's :attr:`default` value.
.. versionchanged:: 5.0
Allow the default source to not specify some of the axis values, and
they are assumed to be the default.
See :meth:`SourceDescriptor.getFullDesignLocation()`
"""
self.default = None
# Convert the default location from user space to design space before comparing
# it against the SourceDescriptor locations (always in design space).
defaultDesignLocation = self.newDefaultLocation()
for sourceDescriptor in self.sources:
if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation:
self.default = sourceDescriptor
return sourceDescriptor
return None
def normalizeLocation(self, location):
"""Return a dict with normalized axis values."""
from fontTools.varLib.models import normalizeValue
new = {}
for axis in self.axes:
if axis.name not in location:
# skipping this dimension it seems
continue
value = location[axis.name]
# 'anisotropic' location, take first coord only
if isinstance(value, tuple):
value = value[0]
triple = [
axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum)
]
new[axis.name] = normalizeValue(value, triple)
return new
def normalize(self):
"""
Normalise the geometry of this designspace:
- scale all the locations of all masters and instances to the -1 - 0 - 1 value.
- we need the axis data to do the scaling, so we do those last.
"""
# masters
for item in self.sources:
item.location = self.normalizeLocation(item.location)
# instances
for item in self.instances:
# glyph masters for this instance
for _, glyphData in item.glyphs.items():
glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation'])
for glyphMaster in glyphData['masters']:
glyphMaster['location'] = self.normalizeLocation(glyphMaster['location'])
item.location = self.normalizeLocation(item.location)
# the axes
for axis in self.axes:
# scale the map first
newMap = []
for inputValue, outputValue in axis.map:
newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name)
newMap.append((inputValue, newOutputValue))
if newMap:
axis.map = newMap
# finally the axis values
minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name)
maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name)
default = self.normalizeLocation({axis.name: axis.default}).get(axis.name)
# and set them in the axis.minimum
axis.minimum = minimum
axis.maximum = maximum
axis.default = default
# now the rules
for rule in self.rules:
newConditionSets = []
for conditions in rule.conditionSets:
newConditions = []
for cond in conditions:
if cond.get('minimum') is not None:
minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name'])
else:
minimum = None
if cond.get('maximum') is not None:
maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name'])
else:
maximum = None
newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum))
newConditionSets.append(newConditions)
rule.conditionSets = newConditionSets
def loadSourceFonts(self, opener, **kwargs):
"""Ensure SourceDescriptor.font attributes are loaded, and return list of fonts.
Takes a callable which initializes a new font object (e.g. TTFont, or
defcon.Font, etc.) from the SourceDescriptor.path, and sets the
SourceDescriptor.font attribute.
If the font attribute is already not None, it is not loaded again.
Fonts with the same path are only loaded once and shared among SourceDescriptors.
For example, to load UFO sources using defcon:
designspace = DesignSpaceDocument.fromfile("path/to/my.designspace")
designspace.loadSourceFonts(defcon.Font)
Or to load masters as FontTools binary fonts, including extra options:
designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False)
Args:
opener (Callable): takes one required positional argument, the source.path,
and an optional list of keyword arguments, and returns a new font object
loaded from the path.
**kwargs: extra options passed on to the opener function.
Returns:
List of font objects in the order they appear in the sources list.
"""
# we load fonts with the same source.path only once
loaded = {}
fonts = []
for source in self.sources:
if source.font is not None: # font already loaded
fonts.append(source.font)
continue
if source.path in loaded:
source.font = loaded[source.path]
else:
if source.path is None:
raise DesignSpaceDocumentError(
"Designspace source '%s' has no 'path' attribute"
% (source.name or "<Unknown>")
)
source.font = opener(source.path, **kwargs)
loaded[source.path] = source.font
fonts.append(source.font)
return fonts
@property
def formatTuple(self):
"""Return the formatVersion as a tuple of (major, minor).
.. versionadded:: 5.0
"""
if self.formatVersion is None:
return (5, 0)
numbers = (int(i) for i in self.formatVersion.split("."))
major = next(numbers)
minor = next(numbers, 0)
return (major, minor)
def getVariableFonts(self) -> List[VariableFontDescriptor]:
"""Return all variable fonts defined in this document, or implicit
variable fonts that can be built from the document's continuous axes.
In the case of Designspace documents before version 5, the whole
document was implicitly describing a variable font that covers the
whole space.
In version 5 and above documents, there can be as many variable fonts
as there are locations on discrete axes.
.. seealso:: :func:`splitInterpolable`
.. versionadded:: 5.0
"""
if self.variableFonts:
return self.variableFonts
variableFonts = []
discreteAxes = []
rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = []
for axis in self.axes:
if hasattr(axis, "values"):
# Mypy doesn't support narrowing union types via hasattr()
# TODO(Python 3.10): use TypeGuard
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
axis = cast(DiscreteAxisDescriptor, axis)
discreteAxes.append(axis) # type: ignore
else:
rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name))
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
basename = None
if self.filename is not None:
basename = os.path.splitext(self.filename)[0] + "-VF"
if self.path is not None:
basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF"
if basename is None:
basename = "VF"
axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)])
variableFonts.append(VariableFontDescriptor(
name=f"{basename}{axisNames}",
axisSubsets=rangeAxisSubsets + [
ValueAxisSubsetDescriptor(name=axis.name, userValue=value)
for axis, value in zip(discreteAxes, values)
]
))
return variableFonts
def deepcopyExceptFonts(self):
"""Allow deep-copying a DesignSpace document without deep-copying
attached UFO fonts or TTFont objects. The :attr:`font` attribute
is shared by reference between the original and the copy.
.. versionadded:: 5.0
"""
fonts = [source.font for source in self.sources]
try:
for source in self.sources:
source.font = None
res = copy.deepcopy(self)
for source, font in zip(res.sources, fonts):
source.font = font
return res
finally:
for source, font in zip(self.sources, fonts):
source.font = font
| mit | 5fb8e241dc83fabd2930b100c39e08b7 | 39.459259 | 147 | 0.609517 | 4.506432 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/varLib/cff.py | 1 | 21557 | from collections import namedtuple
from fontTools.cffLib import (
maxStackLimit,
TopDictIndex,
buildOrder,
topDictOperators,
topDictOperators2,
privateDictOperators,
privateDictOperators2,
FDArrayIndex,
FontDict,
VarStoreData
)
from io import BytesIO
from fontTools.cffLib.specializer import (
specializeCommands, commandsToProgram)
from fontTools.ttLib import newTable
from fontTools import varLib
from fontTools.varLib.models import allEqual
from fontTools.misc.roundTools import roundFunc
from fontTools.misc.psCharStrings import T2CharString, T2OutlineExtractor
from fontTools.pens.t2CharStringPen import T2CharStringPen
from functools import partial
from .errors import (
VarLibCFFDictMergeError, VarLibCFFPointTypeMergeError,
VarLibCFFHintTypeMergeError,VarLibMergeError)
# Backwards compatibility
MergeDictError = VarLibCFFDictMergeError
MergeTypeError = VarLibCFFPointTypeMergeError
def addCFFVarStore(varFont, varModel, varDataList, masterSupports):
fvarTable = varFont['fvar']
axisKeys = [axis.axisTag for axis in fvarTable.axes]
varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys)
varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList)
topDict = varFont['CFF2'].cff.topDictIndex[0]
topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV)
if topDict.FDArray[0].vstore is None:
fdArray = topDict.FDArray
for fontDict in fdArray:
if hasattr(fontDict, "Private"):
fontDict.Private.vstore = topDict.VarStore
def lib_convertCFFToCFF2(cff, otFont):
# This assumes a decompiled CFF table.
cff2GetGlyphOrder = cff.otFont.getGlyphOrder
topDictData = TopDictIndex(None, cff2GetGlyphOrder, None)
topDictData.items = cff.topDictIndex.items
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, 'Private'):
privateDict = topDict.Private
else:
privateDict = None
opOrder = buildOrder(topDictOperators2)
topDict.order = opOrder
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
if not hasattr(topDict, "FDArray"):
fdArray = topDict.FDArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = topDict.GlobalSubrs
topDict.GlobalSubrs.fdArray = fdArray
charStrings = topDict.CharStrings
if charStrings.charStringsAreIndexed:
charStrings.charStringsIndex.fdArray = fdArray
else:
charStrings.fdArray = fdArray
fontDict = FontDict()
fontDict.setCFF2(True)
fdArray.append(fontDict)
fontDict.Private = privateDict
privateOpOrder = buildOrder(privateDictOperators2)
if privateDict is not None:
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
else:
# clean up the PrivateDicts in the fdArray
fdArray = topDict.FDArray
privateOpOrder = buildOrder(privateDictOperators2)
for fontDict in fdArray:
fontDict.setCFF2(True)
for key in list(fontDict.rawDict.keys()):
if key not in fontDict.order:
del fontDict.rawDict[key]
if hasattr(fontDict, key):
delattr(fontDict, key)
privateDict = fontDict.Private
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
# Now delete up the decrecated topDict operators from CFF 1.0
for entry in topDictOperators:
key = entry[1]
if key not in opOrder:
if key in topDict.rawDict:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
# At this point, the Subrs and Charstrings are all still T2Charstring class
# easiest to fix this by compiling, then decompiling again
cff.major = 2
file = BytesIO()
cff.compile(file, otFont, isCFF2=True)
file.seek(0)
cff.decompile(file, otFont, isCFF2=True)
def convertCFFtoCFF2(varFont):
# Convert base font to a single master CFF2 font.
cffTable = varFont['CFF ']
lib_convertCFFToCFF2(cffTable.cff, varFont)
newCFF2 = newTable("CFF2")
newCFF2.cff = cffTable.cff
varFont['CFF2'] = newCFF2
del varFont['CFF ']
def conv_to_int(num):
if isinstance(num, float) and num.is_integer():
return int(num)
return num
pd_blend_fields = ("BlueValues", "OtherBlues", "FamilyBlues",
"FamilyOtherBlues", "BlueScale", "BlueShift",
"BlueFuzz", "StdHW", "StdVW", "StemSnapH",
"StemSnapV")
def get_private(regionFDArrays, fd_index, ri, fd_map):
region_fdArray = regionFDArrays[ri]
region_fd_map = fd_map[fd_index]
if ri in region_fd_map:
region_fdIndex = region_fd_map[ri]
private = region_fdArray[region_fdIndex].Private
else:
private = None
return private
def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
"""
I step through the FontDicts in the FDArray of the varfont TopDict.
For each varfont FontDict:
* step through each key in FontDict.Private.
* For each key, step through each relevant source font Private dict, and
build a list of values to blend.
The 'relevant' source fonts are selected by first getting the right
submodel using ``vsindex_dict[vsindex]``. The indices of the
``subModel.locations`` are mapped to source font list indices by
assuming the latter order is the same as the order of the
``var_model.locations``. I can then get the index of each subModel
location in the list of ``var_model.locations``.
"""
topDict = top_dicts[0]
region_top_dicts = top_dicts[1:]
if hasattr(region_top_dicts[0], 'FDArray'):
regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]
else:
regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]
for fd_index, font_dict in enumerate(topDict.FDArray):
private_dict = font_dict.Private
vsindex = getattr(private_dict, 'vsindex', 0)
# At the moment, no PrivateDict has a vsindex key, but let's support
# how it should work. See comment at end of
# merge_charstrings() - still need to optimize use of vsindex.
sub_model, _ = vsindex_dict[vsindex]
master_indices = []
for loc in sub_model.locations[1:]:
i = var_model.locations.index(loc) - 1
master_indices.append(i)
pds = [private_dict]
last_pd = private_dict
for ri in master_indices:
pd = get_private(regionFDArrays, fd_index, ri, fd_map)
# If the region font doesn't have this FontDict, just reference
# the last one used.
if pd is None:
pd = last_pd
else:
last_pd = pd
pds.append(pd)
num_masters = len(pds)
for key, value in private_dict.rawDict.items():
dataList = []
if key not in pd_blend_fields:
continue
if isinstance(value, list):
try:
values = [pd.rawDict[key] for pd in pds]
except KeyError:
print(
"Warning: {key} in default font Private dict is "
"missing from another font, and was "
"discarded.".format(key=key))
continue
try:
values = zip(*values)
except IndexError:
raise VarLibCFFDictMergeError(key, value, values)
"""
Row 0 contains the first value from each master.
Convert each row from absolute values to relative
values from the previous row.
e.g for three masters, a list of values was:
master 0 OtherBlues = [-217,-205]
master 1 OtherBlues = [-234,-222]
master 1 OtherBlues = [-188,-176]
The call to zip() converts this to:
[(-217, -234, -188), (-205, -222, -176)]
and is converted finally to:
OtherBlues = [[-217, 17.0, 46.0], [-205, 0.0, 0.0]]
"""
prev_val_list = [0] * num_masters
any_points_differ = False
for val_list in values:
rel_list = [(val - prev_val_list[i]) for (
i, val) in enumerate(val_list)]
if (not any_points_differ) and not allEqual(rel_list):
any_points_differ = True
prev_val_list = val_list
deltas = sub_model.getDeltas(rel_list)
# For PrivateDict BlueValues, the default font
# values are absolute, not relative to the prior value.
deltas[0] = val_list[0]
dataList.append(deltas)
# If there are no blend values,then
# we can collapse the blend lists.
if not any_points_differ:
dataList = [data[0] for data in dataList]
else:
values = [pd.rawDict[key] for pd in pds]
if not allEqual(values):
dataList = sub_model.getDeltas(values)
else:
dataList = values[0]
# Convert numbers with no decimal part to an int
if isinstance(dataList, list):
for i, item in enumerate(dataList):
if isinstance(item, list):
for j, jtem in enumerate(item):
dataList[i][j] = conv_to_int(jtem)
else:
dataList[i] = conv_to_int(item)
else:
dataList = conv_to_int(dataList)
private_dict.rawDict[key] = dataList
def _cff_or_cff2(font):
if "CFF " in font:
return font["CFF "]
return font["CFF2"]
def getfd_map(varFont, fonts_list):
""" Since a subset source font may have fewer FontDicts in their
FDArray than the default font, we have to match up the FontDicts in
the different fonts . We do this with the FDSelect array, and by
assuming that the same glyph will reference matching FontDicts in
each source font. We return a mapping from fdIndex in the default
font to a dictionary which maps each master list index of each
region font to the equivalent fdIndex in the region font."""
fd_map = {}
default_font = fonts_list[0]
region_fonts = fonts_list[1:]
num_regions = len(region_fonts)
topDict = _cff_or_cff2(default_font).cff.topDictIndex[0]
if not hasattr(topDict, 'FDSelect'):
# All glyphs reference only one FontDict.
# Map the FD index for regions to index 0.
fd_map[0] = {ri:0 for ri in range(num_regions)}
return fd_map
gname_mapping = {}
default_fdSelect = topDict.FDSelect
glyphOrder = default_font.getGlyphOrder()
for gid, fdIndex in enumerate(default_fdSelect):
gname_mapping[glyphOrder[gid]] = fdIndex
if fdIndex not in fd_map:
fd_map[fdIndex] = {}
for ri, region_font in enumerate(region_fonts):
region_glyphOrder = region_font.getGlyphOrder()
region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0]
if not hasattr(region_topDict, 'FDSelect'):
# All the glyphs share the same FontDict. Pick any glyph.
default_fdIndex = gname_mapping[region_glyphOrder[0]]
fd_map[default_fdIndex][ri] = 0
else:
region_fdSelect = region_topDict.FDSelect
for gid, fdIndex in enumerate(region_fdSelect):
default_fdIndex = gname_mapping[region_glyphOrder[gid]]
region_map = fd_map[default_fdIndex]
if ri not in region_map:
region_map[ri] = fdIndex
return fd_map
CVarData = namedtuple('CVarData', 'varDataList masterSupports vsindex_dict')
def merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder):
topDict = varFont['CFF2'].cff.topDictIndex[0]
top_dicts = [topDict] + [
_cff_or_cff2(ttFont).cff.topDictIndex[0]
for ttFont in ordered_fonts_list[1:]
]
num_masters = len(model.mapping)
cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model)
fd_map = getfd_map(varFont, ordered_fonts_list)
merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map)
addCFFVarStore(varFont, model, cvData.varDataList,
cvData.masterSupports)
def _get_cs(charstrings, glyphName):
if glyphName not in charstrings:
return None
return charstrings[glyphName]
def _add_new_vsindex(model, key, masterSupports, vsindex_dict,
vsindex_by_key, varDataList):
varTupleIndexes = []
for support in model.supports[1:]:
if support not in masterSupports:
masterSupports.append(support)
varTupleIndexes.append(masterSupports.index(support))
var_data = varLib.builder.buildVarData(varTupleIndexes, None, False)
vsindex = len(vsindex_dict)
vsindex_by_key[key] = vsindex
vsindex_dict[vsindex] = (model, [key])
varDataList.append(var_data)
return vsindex
def merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):
vsindex_dict = {}
vsindex_by_key = {}
varDataList = []
masterSupports = []
default_charstrings = top_dicts[0].CharStrings
for gid, gname in enumerate(glyphOrder):
all_cs = [
_get_cs(td.CharStrings, gname)
for td in top_dicts]
if len([gs for gs in all_cs if gs is not None]) == 1:
continue
model, model_cs = masterModel.getSubModel(all_cs)
# create the first pass CFF2 charstring, from
# the default charstring.
default_charstring = model_cs[0]
var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)
# We need to override outlineExtractor because these
# charstrings do have widths in the 'program'; we need to drop these
# values rather than post assertion error for them.
default_charstring.outlineExtractor = MergeOutlineExtractor
default_charstring.draw(var_pen)
# Add the coordinates from all the other regions to the
# blend lists in the CFF2 charstring.
region_cs = model_cs[1:]
for region_idx, region_charstring in enumerate(region_cs, start=1):
var_pen.restart(region_idx)
region_charstring.outlineExtractor = MergeOutlineExtractor
region_charstring.draw(var_pen)
# Collapse each coordinate list to a blend operator and its args.
new_cs = var_pen.getCharString(
private=default_charstring.private,
globalSubrs=default_charstring.globalSubrs,
var_model=model, optimize=True)
default_charstrings[gname] = new_cs
if (not var_pen.seen_moveto) or ('blend' not in new_cs.program):
# If this is not a marking glyph, or if there are no blend
# arguments, then we can use vsindex 0. No need to
# check if we need a new vsindex.
continue
# If the charstring required a new model, create
# a VarData table to go with, and set vsindex.
key = tuple(v is not None for v in all_cs)
try:
vsindex = vsindex_by_key[key]
except KeyError:
vsindex = _add_new_vsindex(model, key, masterSupports, vsindex_dict,
vsindex_by_key, varDataList)
# We do not need to check for an existing new_cs.private.vsindex,
# as we know it doesn't exist yet.
if vsindex != 0:
new_cs.program[:0] = [vsindex, 'vsindex']
# If there is no variation in any of the charstrings, then vsindex_dict
# never gets built. This could still be needed if there is variation
# in the PrivatDict, so we will build the default data for vsindex = 0.
if not vsindex_dict:
key = (True,) * num_masters
_add_new_vsindex(masterModel, key, masterSupports, vsindex_dict,
vsindex_by_key, varDataList)
cvData = CVarData(varDataList=varDataList, masterSupports=masterSupports,
vsindex_dict=vsindex_dict)
# XXX To do: optimize use of vsindex between the PrivateDicts and
# charstrings
return cvData
class CFFToCFF2OutlineExtractor(T2OutlineExtractor):
""" This class is used to remove the initial width from the CFF
charstring without trying to add the width to self.nominalWidthX,
which is None. """
def popallWidth(self, evenOdd=0):
args = self.popall()
if not self.gotWidth:
if evenOdd ^ (len(args) % 2):
args = args[1:]
self.width = self.defaultWidthX
self.gotWidth = 1
return args
class MergeOutlineExtractor(CFFToCFF2OutlineExtractor):
""" Used to extract the charstring commands - including hints - from a
CFF charstring in order to merge it as another set of region data
into a CFF2 variable font charstring."""
def __init__(self, pen, localSubrs, globalSubrs,
nominalWidthX, defaultWidthX, private=None, blender=None):
super().__init__(pen, localSubrs,
globalSubrs, nominalWidthX, defaultWidthX, private, blender)
def countHints(self):
args = self.popallWidth()
self.hintCount = self.hintCount + len(args) // 2
return args
def _hint_op(self, type, args):
self.pen.add_hint(type, args)
def op_hstem(self, index):
args = self.countHints()
self._hint_op('hstem', args)
def op_vstem(self, index):
args = self.countHints()
self._hint_op('vstem', args)
def op_hstemhm(self, index):
args = self.countHints()
self._hint_op('hstemhm', args)
def op_vstemhm(self, index):
args = self.countHints()
self._hint_op('vstemhm', args)
def _get_hintmask(self, index):
if not self.hintMaskBytes:
args = self.countHints()
if args:
self._hint_op('vstemhm', args)
self.hintMaskBytes = (self.hintCount + 7) // 8
hintMaskBytes, index = self.callingStack[-1].getBytes(index,
self.hintMaskBytes)
return index, hintMaskBytes
def op_hintmask(self, index):
index, hintMaskBytes = self._get_hintmask(index)
self.pen.add_hintmask('hintmask', [hintMaskBytes])
return hintMaskBytes, index
def op_cntrmask(self, index):
index, hintMaskBytes = self._get_hintmask(index)
self.pen.add_hintmask('cntrmask', [hintMaskBytes])
return hintMaskBytes, index
class CFF2CharStringMergePen(T2CharStringPen):
"""Pen to merge Type 2 CharStrings.
"""
def __init__(
self, default_commands, glyphName, num_masters, master_idx,
roundTolerance=0.01):
# For roundTolerance see https://github.com/fonttools/fonttools/issues/2838
super().__init__(
width=None,
glyphSet=None, CFF2=True,
roundTolerance=roundTolerance)
self.pt_index = 0
self._commands = default_commands
self.m_index = master_idx
self.num_masters = num_masters
self.prev_move_idx = 0
self.seen_moveto = False
self.glyphName = glyphName
self.round = roundFunc(roundTolerance, round=round)
def add_point(self, point_type, pt_coords):
if self.m_index == 0:
self._commands.append([point_type, [pt_coords]])
else:
cmd = self._commands[self.pt_index]
if cmd[0] != point_type:
raise VarLibCFFPointTypeMergeError(
point_type,
self.pt_index, len(cmd[1]),
cmd[0], self.glyphName)
cmd[1].append(pt_coords)
self.pt_index += 1
def add_hint(self, hint_type, args):
if self.m_index == 0:
self._commands.append([hint_type, [args]])
else:
cmd = self._commands[self.pt_index]
if cmd[0] != hint_type:
raise VarLibCFFHintTypeMergeError(hint_type, self.pt_index, len(cmd[1]),
cmd[0], self.glyphName)
cmd[1].append(args)
self.pt_index += 1
def add_hintmask(self, hint_type, abs_args):
# For hintmask, fonttools.cffLib.specializer.py expects
# each of these to be represented by two sequential commands:
# first holding only the operator name, with an empty arg list,
# second with an empty string as the op name, and the mask arg list.
if self.m_index == 0:
self._commands.append([hint_type, []])
self._commands.append(["", [abs_args]])
else:
cmd = self._commands[self.pt_index]
if cmd[0] != hint_type:
raise VarLibCFFHintTypeMergeError(hint_type, self.pt_index, len(cmd[1]),
cmd[0], self.glyphName)
self.pt_index += 1
cmd = self._commands[self.pt_index]
cmd[1].append(abs_args)
self.pt_index += 1
def _moveTo(self, pt):
if not self.seen_moveto:
self.seen_moveto = True
pt_coords = self._p(pt)
self.add_point('rmoveto', pt_coords)
# I set prev_move_idx here because add_point()
# can change self.pt_index.
self.prev_move_idx = self.pt_index - 1
def _lineTo(self, pt):
pt_coords = self._p(pt)
self.add_point('rlineto', pt_coords)
def _curveToOne(self, pt1, pt2, pt3):
_p = self._p
pt_coords = _p(pt1)+_p(pt2)+_p(pt3)
self.add_point('rrcurveto', pt_coords)
def _closePath(self):
pass
def _endPath(self):
pass
def restart(self, region_idx):
self.pt_index = 0
self.m_index = region_idx
self._p0 = (0, 0)
def getCommands(self):
return self._commands
def reorder_blend_args(self, commands, get_delta_func):
"""
We first re-order the master coordinate values.
For a moveto to lineto, the args are now arranged as::
[ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
We re-arrange this to::
[ [master_0 x, master_1 x, master_2 x],
[master_0 y, master_1 y, master_2 y]
]
If the master values are all the same, we collapse the list to
as single value instead of a list.
We then convert this to::
[ [master_0 x] + [x delta tuple] + [numBlends=1]
[master_0 y] + [y delta tuple] + [numBlends=1]
]
"""
for cmd in commands:
# arg[i] is the set of arguments for this operator from master i.
args = cmd[1]
m_args = zip(*args)
# m_args[n] is now all num_master args for the i'th argument
# for this operation.
cmd[1] = list(m_args)
lastOp = None
for cmd in commands:
op = cmd[0]
# masks are represented by two cmd's: first has only op names,
# second has only args.
if lastOp in ['hintmask', 'cntrmask']:
coord = list(cmd[1])
if not allEqual(coord):
raise VarLibMergeError("Hintmask values cannot differ between source fonts.")
cmd[1] = [coord[0][0]]
else:
coords = cmd[1]
new_coords = []
for coord in coords:
if allEqual(coord):
new_coords.append(coord[0])
else:
# convert to deltas
deltas = get_delta_func(coord)[1:]
coord = [coord[0]] + deltas
coord.append(1)
new_coords.append(coord)
cmd[1] = new_coords
lastOp = op
return commands
def getCharString(
self, private=None, globalSubrs=None,
var_model=None, optimize=True):
commands = self._commands
commands = self.reorder_blend_args(commands, partial (var_model.getDeltas, round=self.round))
if optimize:
commands = specializeCommands(
commands, generalizeFirst=False,
maxstack=maxStackLimit)
program = commandsToProgram(commands)
charString = T2CharString(
program=program, private=private,
globalSubrs=globalSubrs)
return charString
| mit | 9a2a62e7bdc13a6fa798ed3e5dc81235 | 31.563444 | 95 | 0.706638 | 2.98078 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/feaLib/ast.py | 3 | 73554 | from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.location import FeatureLibLocation
from fontTools.misc.encodingTools import getEncoding
from fontTools.misc.textTools import byteord, tobytes
from collections import OrderedDict
import itertools
SHIFT = " " * 4
__all__ = [
"Element",
"FeatureFile",
"Comment",
"GlyphName",
"GlyphClass",
"GlyphClassName",
"MarkClassName",
"AnonymousBlock",
"Block",
"FeatureBlock",
"NestedBlock",
"LookupBlock",
"GlyphClassDefinition",
"GlyphClassDefStatement",
"MarkClass",
"MarkClassDefinition",
"AlternateSubstStatement",
"Anchor",
"AnchorDefinition",
"AttachStatement",
"AxisValueLocationStatement",
"BaseAxis",
"CVParametersNameStatement",
"ChainContextPosStatement",
"ChainContextSubstStatement",
"CharacterStatement",
"ConditionsetStatement",
"CursivePosStatement",
"ElidedFallbackName",
"ElidedFallbackNameID",
"Expression",
"FeatureNameStatement",
"FeatureReferenceStatement",
"FontRevisionStatement",
"HheaField",
"IgnorePosStatement",
"IgnoreSubstStatement",
"IncludeStatement",
"LanguageStatement",
"LanguageSystemStatement",
"LigatureCaretByIndexStatement",
"LigatureCaretByPosStatement",
"LigatureSubstStatement",
"LookupFlagStatement",
"LookupReferenceStatement",
"MarkBasePosStatement",
"MarkLigPosStatement",
"MarkMarkPosStatement",
"MultipleSubstStatement",
"NameRecord",
"OS2Field",
"PairPosStatement",
"ReverseChainSingleSubstStatement",
"ScriptStatement",
"SinglePosStatement",
"SingleSubstStatement",
"SizeParameters",
"Statement",
"STATAxisValueStatement",
"STATDesignAxisStatement",
"STATNameStatement",
"SubtableStatement",
"TableBlock",
"ValueRecord",
"ValueRecordDefinition",
"VheaField",
]
def deviceToString(device):
if device is None:
return "<device NULL>"
else:
return "<device %s>" % ", ".join("%d %d" % t for t in device)
fea_keywords = set(
[
"anchor",
"anchordef",
"anon",
"anonymous",
"by",
"contour",
"cursive",
"device",
"enum",
"enumerate",
"excludedflt",
"exclude_dflt",
"feature",
"from",
"ignore",
"ignorebaseglyphs",
"ignoreligatures",
"ignoremarks",
"include",
"includedflt",
"include_dflt",
"language",
"languagesystem",
"lookup",
"lookupflag",
"mark",
"markattachmenttype",
"markclass",
"nameid",
"null",
"parameters",
"pos",
"position",
"required",
"righttoleft",
"reversesub",
"rsub",
"script",
"sub",
"substitute",
"subtable",
"table",
"usemarkfilteringset",
"useextension",
"valuerecorddef",
"base",
"gdef",
"head",
"hhea",
"name",
"vhea",
"vmtx",
]
)
def asFea(g):
if hasattr(g, "asFea"):
return g.asFea()
elif isinstance(g, tuple) and len(g) == 2:
return asFea(g[0]) + " - " + asFea(g[1]) # a range
elif g.lower() in fea_keywords:
return "\\" + g
else:
return g
class Element(object):
"""A base class representing "something" in a feature file."""
def __init__(self, location=None):
#: location of this element as a `FeatureLibLocation` object.
if location and not isinstance(location, FeatureLibLocation):
location = FeatureLibLocation(*location)
self.location = location
def build(self, builder):
pass
def asFea(self, indent=""):
"""Returns this element as a string of feature code. For block-type
elements (such as :class:`FeatureBlock`), the `indent` string is
added to the start of each line in the output."""
raise NotImplementedError
def __str__(self):
return self.asFea()
class Statement(Element):
pass
class Expression(Element):
pass
class Comment(Element):
"""A comment in a feature file."""
def __init__(self, text, location=None):
super(Comment, self).__init__(location)
#: Text of the comment
self.text = text
def asFea(self, indent=""):
return self.text
class NullGlyph(Expression):
"""The NULL glyph, used in glyph deletion substitutions."""
def __init__(self, location=None):
Expression.__init__(self, location)
#: The name itself as a string
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return ()
def asFea(self, indent=""):
return "NULL"
class GlyphName(Expression):
"""A single glyph name, such as ``cedilla``."""
def __init__(self, glyph, location=None):
Expression.__init__(self, location)
#: The name itself as a string
self.glyph = glyph
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return (self.glyph,)
def asFea(self, indent=""):
return asFea(self.glyph)
class GlyphClass(Expression):
"""A glyph class, such as ``[acute cedilla grave]``."""
def __init__(self, glyphs=None, location=None):
Expression.__init__(self, location)
#: The list of glyphs in this class, as :class:`GlyphName` objects.
self.glyphs = glyphs if glyphs is not None else []
self.original = []
self.curr = 0
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs)
def asFea(self, indent=""):
if len(self.original):
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.curr = len(self.glyphs)
return "[" + " ".join(map(asFea, self.original)) + "]"
else:
return "[" + " ".join(map(asFea, self.glyphs)) + "]"
def extend(self, glyphs):
"""Add a list of :class:`GlyphName` objects to the class."""
self.glyphs.extend(glyphs)
def append(self, glyph):
"""Add a single :class:`GlyphName` object to the class."""
self.glyphs.append(glyph)
def add_range(self, start, end, glyphs):
"""Add a range (e.g. ``A-Z``) to the class. ``start`` and ``end``
are either :class:`GlyphName` objects or strings representing the
start and end glyphs in the class, and ``glyphs`` is the full list of
:class:`GlyphName` objects in the range."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append((start, end))
self.glyphs.extend(glyphs)
self.curr = len(self.glyphs)
def add_cid_range(self, start, end, glyphs):
"""Add a range to the class by glyph ID. ``start`` and ``end`` are the
initial and final IDs, and ``glyphs`` is the full list of
:class:`GlyphName` objects in the range."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append(("\\{}".format(start), "\\{}".format(end)))
self.glyphs.extend(glyphs)
self.curr = len(self.glyphs)
def add_class(self, gc):
"""Add glyphs from the given :class:`GlyphClassName` object to the
class."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append(gc)
self.glyphs.extend(gc.glyphSet())
self.curr = len(self.glyphs)
class GlyphClassName(Expression):
"""A glyph class name, such as ``@FRENCH_MARKS``. This must be instantiated
with a :class:`GlyphClassDefinition` object."""
def __init__(self, glyphclass, location=None):
Expression.__init__(self, location)
assert isinstance(glyphclass, GlyphClassDefinition)
self.glyphclass = glyphclass
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphclass.glyphSet())
def asFea(self, indent=""):
return "@" + self.glyphclass.name
class MarkClassName(Expression):
"""A mark class name, such as ``@FRENCH_MARKS`` defined with ``markClass``.
This must be instantiated with a :class:`MarkClass` object."""
def __init__(self, markClass, location=None):
Expression.__init__(self, location)
assert isinstance(markClass, MarkClass)
self.markClass = markClass
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return self.markClass.glyphSet()
def asFea(self, indent=""):
return "@" + self.markClass.name
class AnonymousBlock(Statement):
"""An anonymous data block."""
def __init__(self, tag, content, location=None):
Statement.__init__(self, location)
self.tag = tag #: string containing the block's "tag"
self.content = content #: block data as string
def asFea(self, indent=""):
res = "anon {} {{\n".format(self.tag)
res += self.content
res += "}} {};\n\n".format(self.tag)
return res
class Block(Statement):
"""A block of statements: feature, lookup, etc."""
def __init__(self, location=None):
Statement.__init__(self, location)
self.statements = [] #: Statements contained in the block
def build(self, builder):
"""When handed a 'builder' object of comparable interface to
:class:`fontTools.feaLib.builder`, walks the statements in this
block, calling the builder callbacks."""
for s in self.statements:
s.build(builder)
def asFea(self, indent=""):
indent += SHIFT
return (
indent
+ ("\n" + indent).join([s.asFea(indent=indent) for s in self.statements])
+ "\n"
)
class FeatureFile(Block):
"""The top-level element of the syntax tree, containing the whole feature
file in its ``statements`` attribute."""
def __init__(self):
Block.__init__(self, location=None)
self.markClasses = {} # name --> ast.MarkClass
def asFea(self, indent=""):
return "\n".join(s.asFea(indent=indent) for s in self.statements)
class FeatureBlock(Block):
"""A named feature block."""
def __init__(self, name, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.use_extension = name, use_extension
def build(self, builder):
"""Call the ``start_feature`` callback on the builder object, visit
all the statements in this feature, and then call ``end_feature``."""
# TODO(sascha): Handle use_extension.
builder.start_feature(self.location, self.name)
# language exclude_dflt statements modify builder.features_
# limit them to this block with temporary builder.features_
features = builder.features_
builder.features_ = {}
Block.build(self, builder)
for key, value in builder.features_.items():
features.setdefault(key, []).extend(value)
builder.features_ = features
builder.end_feature()
def asFea(self, indent=""):
res = indent + "feature %s " % self.name.strip()
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += indent + "} %s;\n" % self.name.strip()
return res
class NestedBlock(Block):
"""A block inside another block, for example when found inside a
``cvParameters`` block."""
def __init__(self, tag, block_name, location=None):
Block.__init__(self, location)
self.tag = tag
self.block_name = block_name
def build(self, builder):
Block.build(self, builder)
if self.block_name == "ParamUILabelNameID":
builder.add_to_cv_num_named_params(self.tag)
def asFea(self, indent=""):
res = "{}{} {{\n".format(indent, self.block_name)
res += Block.asFea(self, indent=indent)
res += "{}}};\n".format(indent)
return res
class LookupBlock(Block):
"""A named lookup, containing ``statements``."""
def __init__(self, name, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.use_extension = name, use_extension
def build(self, builder):
# TODO(sascha): Handle use_extension.
builder.start_lookup_block(self.location, self.name)
Block.build(self, builder)
builder.end_lookup_block()
def asFea(self, indent=""):
res = "lookup {} ".format(self.name)
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += "{}}} {};\n".format(indent, self.name)
return res
class TableBlock(Block):
"""A ``table ... { }`` block."""
def __init__(self, name, location=None):
Block.__init__(self, location)
self.name = name
def asFea(self, indent=""):
res = "table {} {{\n".format(self.name.strip())
res += super(TableBlock, self).asFea(indent=indent)
res += "}} {};\n".format(self.name.strip())
return res
class GlyphClassDefinition(Statement):
"""Example: ``@UPPERCASE = [A-Z];``."""
def __init__(self, name, glyphs, location=None):
Statement.__init__(self, location)
self.name = name #: class name as a string, without initial ``@``
self.glyphs = glyphs #: a :class:`GlyphClass` object
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs.glyphSet())
def asFea(self, indent=""):
return "@" + self.name + " = " + self.glyphs.asFea() + ";"
class GlyphClassDefStatement(Statement):
"""Example: ``GlyphClassDef @UPPERCASE, [B], [C], [D];``. The parameters
must be either :class:`GlyphClass` or :class:`GlyphClassName` objects, or
``None``."""
def __init__(
self, baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=None
):
Statement.__init__(self, location)
self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs)
self.ligatureGlyphs = ligatureGlyphs
self.componentGlyphs = componentGlyphs
def build(self, builder):
"""Calls the builder's ``add_glyphClassDef`` callback."""
base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple()
liga = self.ligatureGlyphs.glyphSet() if self.ligatureGlyphs else tuple()
mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple()
comp = self.componentGlyphs.glyphSet() if self.componentGlyphs else tuple()
builder.add_glyphClassDef(self.location, base, liga, mark, comp)
def asFea(self, indent=""):
return "GlyphClassDef {}, {}, {}, {};".format(
self.baseGlyphs.asFea() if self.baseGlyphs else "",
self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "",
self.markGlyphs.asFea() if self.markGlyphs else "",
self.componentGlyphs.asFea() if self.componentGlyphs else "",
)
class MarkClass(object):
"""One `or more` ``markClass`` statements for the same mark class.
While glyph classes can be defined only once, the feature file format
allows expanding mark classes with multiple definitions, each using
different glyphs and anchors. The following are two ``MarkClassDefinitions``
for the same ``MarkClass``::
markClass [acute grave] <anchor 350 800> @FRENCH_ACCENTS;
markClass [cedilla] <anchor 350 -200> @FRENCH_ACCENTS;
The ``MarkClass`` object is therefore just a container for a list of
:class:`MarkClassDefinition` statements.
"""
def __init__(self, name):
self.name = name
self.definitions = []
self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions
def addDefinition(self, definition):
"""Add a :class:`MarkClassDefinition` statement to this mark class."""
assert isinstance(definition, MarkClassDefinition)
self.definitions.append(definition)
for glyph in definition.glyphSet():
if glyph in self.glyphs:
otherLoc = self.glyphs[glyph].location
if otherLoc is None:
end = ""
else:
end = f" at {otherLoc}"
raise FeatureLibError(
"Glyph %s already defined%s" % (glyph, end), definition.location
)
self.glyphs[glyph] = definition
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs.keys())
def asFea(self, indent=""):
res = "\n".join(d.asFea() for d in self.definitions)
return res
class MarkClassDefinition(Statement):
"""A single ``markClass`` statement. The ``markClass`` should be a
:class:`MarkClass` object, the ``anchor`` an :class:`Anchor` object,
and the ``glyphs`` parameter should be a `glyph-containing object`_ .
Example:
.. code:: python
mc = MarkClass("FRENCH_ACCENTS")
mc.addDefinition( MarkClassDefinition(mc, Anchor(350, 800),
GlyphClass([ GlyphName("acute"), GlyphName("grave") ])
) )
mc.addDefinition( MarkClassDefinition(mc, Anchor(350, -200),
GlyphClass([ GlyphName("cedilla") ])
) )
mc.asFea()
# markClass [acute grave] <anchor 350 800> @FRENCH_ACCENTS;
# markClass [cedilla] <anchor 350 -200> @FRENCH_ACCENTS;
"""
def __init__(self, markClass, anchor, glyphs, location=None):
Statement.__init__(self, location)
assert isinstance(markClass, MarkClass)
assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression)
self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return self.glyphs.glyphSet()
def asFea(self, indent=""):
return "markClass {} {} @{};".format(
self.glyphs.asFea(), self.anchor.asFea(), self.markClass.name
)
class AlternateSubstStatement(Statement):
"""A ``sub ... from ...`` statement.
``prefix``, ``glyph``, ``suffix`` and ``replacement`` should be lists of
`glyph-containing objects`_. ``glyph`` should be a `one element list`."""
def __init__(self, prefix, glyph, suffix, replacement, location=None):
Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix)
self.replacement = replacement
def build(self, builder):
"""Calls the builder's ``add_alternate_subst`` callback."""
glyph = self.glyph.glyphSet()
assert len(glyph) == 1, glyph
glyph = list(glyph)[0]
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
replacement = self.replacement.glyphSet()
builder.add_alternate_subst(self.location, prefix, glyph, suffix, replacement)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix):
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += asFea(self.glyph) + "'" # even though we really only use 1
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += asFea(self.glyph)
res += " from "
res += asFea(self.replacement)
res += ";"
return res
class Anchor(Expression):
"""An ``Anchor`` element, used inside a ``pos`` rule.
If a ``name`` is given, this will be used in preference to the coordinates.
Other values should be integer.
"""
def __init__(
self,
x,
y,
name=None,
contourpoint=None,
xDeviceTable=None,
yDeviceTable=None,
location=None,
):
Expression.__init__(self, location)
self.name = name
self.x, self.y, self.contourpoint = x, y, contourpoint
self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable
def asFea(self, indent=""):
if self.name is not None:
return "<anchor {}>".format(self.name)
res = "<anchor {} {}".format(self.x, self.y)
if self.contourpoint:
res += " contourpoint {}".format(self.contourpoint)
if self.xDeviceTable or self.yDeviceTable:
res += " "
res += deviceToString(self.xDeviceTable)
res += " "
res += deviceToString(self.yDeviceTable)
res += ">"
return res
class AnchorDefinition(Statement):
"""A named anchor definition. (2.e.viii). ``name`` should be a string."""
def __init__(self, name, x, y, contourpoint=None, location=None):
Statement.__init__(self, location)
self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint
def asFea(self, indent=""):
res = "anchorDef {} {}".format(self.x, self.y)
if self.contourpoint:
res += " contourpoint {}".format(self.contourpoint)
res += " {};".format(self.name)
return res
class AttachStatement(Statement):
"""A ``GDEF`` table ``Attach`` statement."""
def __init__(self, glyphs, contourPoints, location=None):
Statement.__init__(self, location)
self.glyphs = glyphs #: A `glyph-containing object`_
self.contourPoints = contourPoints #: A list of integer contour points
def build(self, builder):
"""Calls the builder's ``add_attach_points`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_attach_points(self.location, glyphs, self.contourPoints)
def asFea(self, indent=""):
return "Attach {} {};".format(
self.glyphs.asFea(), " ".join(str(c) for c in self.contourPoints)
)
class ChainContextPosStatement(Statement):
r"""A chained contextual positioning statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
``lookups`` should be a list of elements representing what lookups
to apply at each glyph position. Each element should be a
:class:`LookupBlock` to apply a single chaining lookup at the given
position, a list of :class:`LookupBlock`\ s to apply multiple
lookups, or ``None`` to apply no lookup. The length of the outer
list should equal the length of ``glyphs``; the inner lists can be
of variable length."""
def __init__(self, prefix, glyphs, suffix, lookups, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix
self.lookups = list(lookups)
for i, lookup in enumerate(lookups):
if lookup:
try:
(_ for _ in lookup)
except TypeError:
self.lookups[i] = [lookup]
def build(self, builder):
"""Calls the builder's ``add_chain_context_pos`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_chain_context_pos(
self.location, prefix, glyphs, suffix, self.lookups
)
def asFea(self, indent=""):
res = "pos "
if (
len(self.prefix)
or len(self.suffix)
or any([x is not None for x in self.lookups])
):
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
for i, g in enumerate(self.glyphs):
res += g.asFea() + "'"
if self.lookups[i]:
for lu in self.lookups[i]:
res += " lookup " + lu.name
if i < len(self.glyphs) - 1:
res += " "
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(map(asFea, self.glyph))
res += ";"
return res
class ChainContextSubstStatement(Statement):
r"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
``lookups`` should be a list of elements representing what lookups
to apply at each glyph position. Each element should be a
:class:`LookupBlock` to apply a single chaining lookup at the given
position, a list of :class:`LookupBlock`\ s to apply multiple
lookups, or ``None`` to apply no lookup. The length of the outer
list should equal the length of ``glyphs``; the inner lists can be
of variable length."""
def __init__(self, prefix, glyphs, suffix, lookups, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix
self.lookups = list(lookups)
for i, lookup in enumerate(lookups):
if lookup:
try:
(_ for _ in lookup)
except TypeError:
self.lookups[i] = [lookup]
def build(self, builder):
"""Calls the builder's ``add_chain_context_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_chain_context_subst(
self.location, prefix, glyphs, suffix, self.lookups
)
def asFea(self, indent=""):
res = "sub "
if (
len(self.prefix)
or len(self.suffix)
or any([x is not None for x in self.lookups])
):
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
for i, g in enumerate(self.glyphs):
res += g.asFea() + "'"
if self.lookups[i]:
for lu in self.lookups[i]:
res += " lookup " + lu.name
if i < len(self.glyphs) - 1:
res += " "
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(map(asFea, self.glyph))
res += ";"
return res
class CursivePosStatement(Statement):
"""A cursive positioning statement. Entry and exit anchors can either
be :class:`Anchor` objects or ``None``."""
def __init__(self, glyphclass, entryAnchor, exitAnchor, location=None):
Statement.__init__(self, location)
self.glyphclass = glyphclass
self.entryAnchor, self.exitAnchor = entryAnchor, exitAnchor
def build(self, builder):
"""Calls the builder object's ``add_cursive_pos`` callback."""
builder.add_cursive_pos(
self.location, self.glyphclass.glyphSet(), self.entryAnchor, self.exitAnchor
)
def asFea(self, indent=""):
entry = self.entryAnchor.asFea() if self.entryAnchor else "<anchor NULL>"
exit = self.exitAnchor.asFea() if self.exitAnchor else "<anchor NULL>"
return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit)
class FeatureReferenceStatement(Statement):
"""Example: ``feature salt;``"""
def __init__(self, featureName, location=None):
Statement.__init__(self, location)
self.location, self.featureName = (location, featureName)
def build(self, builder):
"""Calls the builder object's ``add_feature_reference`` callback."""
builder.add_feature_reference(self.location, self.featureName)
def asFea(self, indent=""):
return "feature {};".format(self.featureName)
class IgnorePosStatement(Statement):
"""An ``ignore pos`` statement, containing `one or more` contexts to ignore.
``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples,
with each of ``prefix``, ``glyphs`` and ``suffix`` being
`glyph-containing objects`_ ."""
def __init__(self, chainContexts, location=None):
Statement.__init__(self, location)
self.chainContexts = chainContexts
def build(self, builder):
"""Calls the builder object's ``add_chain_context_pos`` callback on each
rule context."""
for prefix, glyphs, suffix in self.chainContexts:
prefix = [p.glyphSet() for p in prefix]
glyphs = [g.glyphSet() for g in glyphs]
suffix = [s.glyphSet() for s in suffix]
builder.add_chain_context_pos(self.location, prefix, glyphs, suffix, [])
def asFea(self, indent=""):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
if len(prefix) or len(suffix):
if len(prefix):
res += " ".join(map(asFea, prefix)) + " "
res += " ".join(g.asFea() + "'" for g in glyphs)
if len(suffix):
res += " " + " ".join(map(asFea, suffix))
else:
res += " ".join(map(asFea, glyphs))
contexts.append(res)
return "ignore pos " + ", ".join(contexts) + ";"
class IgnoreSubstStatement(Statement):
"""An ``ignore sub`` statement, containing `one or more` contexts to ignore.
``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples,
with each of ``prefix``, ``glyphs`` and ``suffix`` being
`glyph-containing objects`_ ."""
def __init__(self, chainContexts, location=None):
Statement.__init__(self, location)
self.chainContexts = chainContexts
def build(self, builder):
"""Calls the builder object's ``add_chain_context_subst`` callback on
each rule context."""
for prefix, glyphs, suffix in self.chainContexts:
prefix = [p.glyphSet() for p in prefix]
glyphs = [g.glyphSet() for g in glyphs]
suffix = [s.glyphSet() for s in suffix]
builder.add_chain_context_subst(self.location, prefix, glyphs, suffix, [])
def asFea(self, indent=""):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
if len(prefix) or len(suffix):
if len(prefix):
res += " ".join(map(asFea, prefix)) + " "
res += " ".join(g.asFea() + "'" for g in glyphs)
if len(suffix):
res += " " + " ".join(map(asFea, suffix))
else:
res += " ".join(map(asFea, glyphs))
contexts.append(res)
return "ignore sub " + ", ".join(contexts) + ";"
class IncludeStatement(Statement):
"""An ``include()`` statement."""
def __init__(self, filename, location=None):
super(IncludeStatement, self).__init__(location)
self.filename = filename #: String containing name of file to include
def build(self):
# TODO: consider lazy-loading the including parser/lexer?
raise FeatureLibError(
"Building an include statement is not implemented yet. "
"Instead, use Parser(..., followIncludes=True) for building.",
self.location,
)
def asFea(self, indent=""):
return indent + "include(%s);" % self.filename
class LanguageStatement(Statement):
"""A ``language`` statement within a feature."""
def __init__(self, language, include_default=True, required=False, location=None):
Statement.__init__(self, location)
assert len(language) == 4
self.language = language #: A four-character language tag
self.include_default = include_default #: If false, "exclude_dflt"
self.required = required
def build(self, builder):
"""Call the builder object's ``set_language`` callback."""
builder.set_language(
location=self.location,
language=self.language,
include_default=self.include_default,
required=self.required,
)
def asFea(self, indent=""):
res = "language {}".format(self.language.strip())
if not self.include_default:
res += " exclude_dflt"
if self.required:
res += " required"
res += ";"
return res
class LanguageSystemStatement(Statement):
"""A top-level ``languagesystem`` statement."""
def __init__(self, script, language, location=None):
Statement.__init__(self, location)
self.script, self.language = (script, language)
def build(self, builder):
"""Calls the builder object's ``add_language_system`` callback."""
builder.add_language_system(self.location, self.script, self.language)
def asFea(self, indent=""):
return "languagesystem {} {};".format(self.script, self.language.strip())
class FontRevisionStatement(Statement):
"""A ``head`` table ``FontRevision`` statement. ``revision`` should be a
number, and will be formatted to three significant decimal places."""
def __init__(self, revision, location=None):
Statement.__init__(self, location)
self.revision = revision
def build(self, builder):
builder.set_font_revision(self.location, self.revision)
def asFea(self, indent=""):
return "FontRevision {:.3f};".format(self.revision)
class LigatureCaretByIndexStatement(Statement):
"""A ``GDEF`` table ``LigatureCaretByIndex`` statement. ``glyphs`` should be
a `glyph-containing object`_, and ``carets`` should be a list of integers."""
def __init__(self, glyphs, carets, location=None):
Statement.__init__(self, location)
self.glyphs, self.carets = (glyphs, carets)
def build(self, builder):
"""Calls the builder object's ``add_ligatureCaretByIndex_`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets))
def asFea(self, indent=""):
return "LigatureCaretByIndex {} {};".format(
self.glyphs.asFea(), " ".join(str(x) for x in self.carets)
)
class LigatureCaretByPosStatement(Statement):
"""A ``GDEF`` table ``LigatureCaretByPos`` statement. ``glyphs`` should be
a `glyph-containing object`_, and ``carets`` should be a list of integers."""
def __init__(self, glyphs, carets, location=None):
Statement.__init__(self, location)
self.glyphs, self.carets = (glyphs, carets)
def build(self, builder):
"""Calls the builder object's ``add_ligatureCaretByPos_`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets))
def asFea(self, indent=""):
return "LigatureCaretByPos {} {};".format(
self.glyphs.asFea(), " ".join(str(x) for x in self.carets)
)
class LigatureSubstStatement(Statement):
"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_; ``replacement`` should be a single
`glyph-containing object`_.
If ``forceChain`` is True, this is expressed as a chaining rule
(e.g. ``sub f' i' by f_i``) even when no context is given."""
def __init__(self, prefix, glyphs, suffix, replacement, forceChain, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
self.replacement, self.forceChain = replacement, forceChain
def build(self, builder):
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_ligature_subst(
self.location, prefix, glyphs, suffix, self.replacement, self.forceChain
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
res += " ".join(g.asFea() + "'" for g in self.glyphs)
if len(self.suffix):
res += " " + " ".join(g.asFea() for g in self.suffix)
else:
res += " ".join(g.asFea() for g in self.glyphs)
res += " by "
res += asFea(self.replacement)
res += ";"
return res
class LookupFlagStatement(Statement):
"""A ``lookupflag`` statement. The ``value`` should be an integer value
representing the flags in use, but not including the ``markAttachment``
class and ``markFilteringSet`` values, which must be specified as
glyph-containing objects."""
def __init__(
self, value=0, markAttachment=None, markFilteringSet=None, location=None
):
Statement.__init__(self, location)
self.value = value
self.markAttachment = markAttachment
self.markFilteringSet = markFilteringSet
def build(self, builder):
"""Calls the builder object's ``set_lookup_flag`` callback."""
markAttach = None
if self.markAttachment is not None:
markAttach = self.markAttachment.glyphSet()
markFilter = None
if self.markFilteringSet is not None:
markFilter = self.markFilteringSet.glyphSet()
builder.set_lookup_flag(self.location, self.value, markAttach, markFilter)
def asFea(self, indent=""):
res = []
flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"]
curr = 1
for i in range(len(flags)):
if self.value & curr != 0:
res.append(flags[i])
curr = curr << 1
if self.markAttachment is not None:
res.append("MarkAttachmentType {}".format(self.markAttachment.asFea()))
if self.markFilteringSet is not None:
res.append("UseMarkFilteringSet {}".format(self.markFilteringSet.asFea()))
if not res:
res = ["0"]
return "lookupflag {};".format(" ".join(res))
class LookupReferenceStatement(Statement):
"""Represents a ``lookup ...;`` statement to include a lookup in a feature.
The ``lookup`` should be a :class:`LookupBlock` object."""
def __init__(self, lookup, location=None):
Statement.__init__(self, location)
self.location, self.lookup = (location, lookup)
def build(self, builder):
"""Calls the builder object's ``add_lookup_call`` callback."""
builder.add_lookup_call(self.lookup.name)
def asFea(self, indent=""):
return "lookup {};".format(self.lookup.name)
class MarkBasePosStatement(Statement):
"""A mark-to-base positioning rule. The ``base`` should be a
`glyph-containing object`_. The ``marks`` should be a list of
(:class:`Anchor`, :class:`MarkClass`) tuples."""
def __init__(self, base, marks, location=None):
Statement.__init__(self, location)
self.base, self.marks = base, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_base_pos`` callback."""
builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos base {}".format(self.base.asFea())
for a, m in self.marks:
res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name)
res += ";"
return res
class MarkLigPosStatement(Statement):
"""A mark-to-ligature positioning rule. The ``ligatures`` must be a
`glyph-containing object`_. The ``marks`` should be a list of lists: each
element in the top-level list represents a component glyph, and is made
up of a list of (:class:`Anchor`, :class:`MarkClass`) tuples representing
mark attachment points for that position.
Example::
m1 = MarkClass("TOP_MARKS")
m2 = MarkClass("BOTTOM_MARKS")
# ... add definitions to mark classes...
glyph = GlyphName("lam_meem_jeem")
marks = [
[ (Anchor(625,1800), m1) ], # Attachments on 1st component (lam)
[ (Anchor(376,-378), m2) ], # Attachments on 2nd component (meem)
[ ] # No attachments on the jeem
]
mlp = MarkLigPosStatement(glyph, marks)
mlp.asFea()
# pos ligature lam_meem_jeem <anchor 625 1800> mark @TOP_MARKS
# ligComponent <anchor 376 -378> mark @BOTTOM_MARKS;
"""
def __init__(self, ligatures, marks, location=None):
Statement.__init__(self, location)
self.ligatures, self.marks = ligatures, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_lig_pos`` callback."""
builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos ligature {}".format(self.ligatures.asFea())
ligs = []
for l in self.marks:
temp = ""
if l is None or not len(l):
temp = "\n" + indent + SHIFT * 2 + "<anchor NULL>"
else:
for a, m in l:
temp += (
"\n"
+ indent
+ SHIFT * 2
+ "{} mark @{}".format(a.asFea(), m.name)
)
ligs.append(temp)
res += ("\n" + indent + SHIFT + "ligComponent").join(ligs)
res += ";"
return res
class MarkMarkPosStatement(Statement):
"""A mark-to-mark positioning rule. The ``baseMarks`` must be a
`glyph-containing object`_. The ``marks`` should be a list of
(:class:`Anchor`, :class:`MarkClass`) tuples."""
def __init__(self, baseMarks, marks, location=None):
Statement.__init__(self, location)
self.baseMarks, self.marks = baseMarks, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_mark_pos`` callback."""
builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos mark {}".format(self.baseMarks.asFea())
for a, m in self.marks:
res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name)
res += ";"
return res
class MultipleSubstStatement(Statement):
"""A multiple substitution statement.
Args:
prefix: a list of `glyph-containing objects`_.
glyph: a single glyph-containing object.
suffix: a list of glyph-containing objects.
replacement: a list of glyph-containing objects.
forceChain: If true, the statement is expressed as a chaining rule
(e.g. ``sub f' i' by f_i``) even when no context is given.
"""
def __init__(
self, prefix, glyph, suffix, replacement, forceChain=False, location=None
):
Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = prefix, glyph, suffix
self.replacement = replacement
self.forceChain = forceChain
def build(self, builder):
"""Calls the builder object's ``add_multiple_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
if not self.replacement and hasattr(self.glyph, "glyphSet"):
for glyph in self.glyph.glyphSet():
builder.add_multiple_subst(
self.location,
prefix,
glyph,
suffix,
self.replacement,
self.forceChain,
)
else:
builder.add_multiple_subst(
self.location,
prefix,
self.glyph,
suffix,
self.replacement,
self.forceChain,
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += asFea(self.glyph) + "'"
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += asFea(self.glyph)
replacement = self.replacement or [NullGlyph()]
res += " by "
res += " ".join(map(asFea, replacement))
res += ";"
return res
class PairPosStatement(Statement):
"""A pair positioning statement.
``glyphs1`` and ``glyphs2`` should be `glyph-containing objects`_.
``valuerecord1`` should be a :class:`ValueRecord` object;
``valuerecord2`` should be either a :class:`ValueRecord` object or ``None``.
If ``enumerated`` is true, then this is expressed as an
`enumerated pair <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_.
"""
def __init__(
self,
glyphs1,
valuerecord1,
glyphs2,
valuerecord2,
enumerated=False,
location=None,
):
Statement.__init__(self, location)
self.enumerated = enumerated
self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1
self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2
def build(self, builder):
"""Calls a callback on the builder object:
* If the rule is enumerated, calls ``add_specific_pair_pos`` on each
combination of first and second glyphs.
* If the glyphs are both single :class:`GlyphName` objects, calls
``add_specific_pair_pos``.
* Else, calls ``add_class_pair_pos``.
"""
if self.enumerated:
g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()]
seen_pair = False
for glyph1, glyph2 in itertools.product(*g):
seen_pair = True
builder.add_specific_pair_pos(
self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2
)
if not seen_pair:
raise FeatureLibError(
"Empty glyph class in positioning rule", self.location
)
return
is_specific = isinstance(self.glyphs1, GlyphName) and isinstance(
self.glyphs2, GlyphName
)
if is_specific:
builder.add_specific_pair_pos(
self.location,
self.glyphs1.glyph,
self.valuerecord1,
self.glyphs2.glyph,
self.valuerecord2,
)
else:
builder.add_class_pair_pos(
self.location,
self.glyphs1.glyphSet(),
self.valuerecord1,
self.glyphs2.glyphSet(),
self.valuerecord2,
)
def asFea(self, indent=""):
res = "enum " if self.enumerated else ""
if self.valuerecord2:
res += "pos {} {} {} {};".format(
self.glyphs1.asFea(),
self.valuerecord1.asFea(),
self.glyphs2.asFea(),
self.valuerecord2.asFea(),
)
else:
res += "pos {} {} {};".format(
self.glyphs1.asFea(), self.glyphs2.asFea(), self.valuerecord1.asFea()
)
return res
class ReverseChainSingleSubstStatement(Statement):
"""A reverse chaining substitution statement. You don't see those every day.
Note the unusual argument order: ``suffix`` comes `before` ``glyphs``.
``old_prefix``, ``old_suffix``, ``glyphs`` and ``replacements`` should be
lists of `glyph-containing objects`_. ``glyphs`` and ``replacements`` should
be one-item lists.
"""
def __init__(self, old_prefix, old_suffix, glyphs, replacements, location=None):
Statement.__init__(self, location)
self.old_prefix, self.old_suffix = old_prefix, old_suffix
self.glyphs = glyphs
self.replacements = replacements
def build(self, builder):
prefix = [p.glyphSet() for p in self.old_prefix]
suffix = [s.glyphSet() for s in self.old_suffix]
originals = self.glyphs[0].glyphSet()
replaces = self.replacements[0].glyphSet()
if len(replaces) == 1:
replaces = replaces * len(originals)
builder.add_reverse_chain_single_subst(
self.location, prefix, suffix, dict(zip(originals, replaces))
)
def asFea(self, indent=""):
res = "rsub "
if len(self.old_prefix) or len(self.old_suffix):
if len(self.old_prefix):
res += " ".join(asFea(g) for g in self.old_prefix) + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs)
if len(self.old_suffix):
res += " " + " ".join(asFea(g) for g in self.old_suffix)
else:
res += " ".join(map(asFea, self.glyphs))
res += " by {};".format(" ".join(asFea(g) for g in self.replacements))
return res
class SingleSubstStatement(Statement):
"""A single substitution statement.
Note the unusual argument order: ``prefix`` and suffix come `after`
the replacement ``glyphs``. ``prefix``, ``suffix``, ``glyphs`` and
``replace`` should be lists of `glyph-containing objects`_. ``glyphs`` and
``replace`` should be one-item lists.
"""
def __init__(self, glyphs, replace, prefix, suffix, forceChain, location=None):
Statement.__init__(self, location)
self.prefix, self.suffix = prefix, suffix
self.forceChain = forceChain
self.glyphs = glyphs
self.replacements = replace
def build(self, builder):
"""Calls the builder object's ``add_single_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
originals = self.glyphs[0].glyphSet()
replaces = self.replacements[0].glyphSet()
if len(replaces) == 1:
replaces = replaces * len(originals)
builder.add_single_subst(
self.location,
prefix,
suffix,
OrderedDict(zip(originals, replaces)),
self.forceChain,
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(asFea(g) for g in self.prefix) + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs)
if len(self.suffix):
res += " " + " ".join(asFea(g) for g in self.suffix)
else:
res += " ".join(asFea(g) for g in self.glyphs)
res += " by {};".format(" ".join(asFea(g) for g in self.replacements))
return res
class ScriptStatement(Statement):
"""A ``script`` statement."""
def __init__(self, script, location=None):
Statement.__init__(self, location)
self.script = script #: the script code
def build(self, builder):
"""Calls the builder's ``set_script`` callback."""
builder.set_script(self.location, self.script)
def asFea(self, indent=""):
return "script {};".format(self.script.strip())
class SinglePosStatement(Statement):
"""A single position statement. ``prefix`` and ``suffix`` should be
lists of `glyph-containing objects`_.
``pos`` should be a one-element list containing a (`glyph-containing object`_,
:class:`ValueRecord`) tuple."""
def __init__(self, pos, prefix, suffix, forceChain, location=None):
Statement.__init__(self, location)
self.pos, self.prefix, self.suffix = pos, prefix, suffix
self.forceChain = forceChain
def build(self, builder):
"""Calls the builder object's ``add_single_pos`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
pos = [(g.glyphSet(), value) for g, value in self.pos]
builder.add_single_pos(self.location, prefix, suffix, pos, self.forceChain)
def asFea(self, indent=""):
res = "pos "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += " ".join(
[
asFea(x[0]) + "'" + ((" " + x[1].asFea()) if x[1] else "")
for x in self.pos
]
)
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(
[asFea(x[0]) + " " + (x[1].asFea() if x[1] else "") for x in self.pos]
)
res += ";"
return res
class SubtableStatement(Statement):
"""Represents a subtable break."""
def __init__(self, location=None):
Statement.__init__(self, location)
def build(self, builder):
"""Calls the builder objects's ``add_subtable_break`` callback."""
builder.add_subtable_break(self.location)
def asFea(self, indent=""):
return "subtable;"
class ValueRecord(Expression):
"""Represents a value record."""
def __init__(
self,
xPlacement=None,
yPlacement=None,
xAdvance=None,
yAdvance=None,
xPlaDevice=None,
yPlaDevice=None,
xAdvDevice=None,
yAdvDevice=None,
vertical=False,
location=None,
):
Expression.__init__(self, location)
self.xPlacement, self.yPlacement = (xPlacement, yPlacement)
self.xAdvance, self.yAdvance = (xAdvance, yAdvance)
self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice)
self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice)
self.vertical = vertical
def __eq__(self, other):
return (
self.xPlacement == other.xPlacement
and self.yPlacement == other.yPlacement
and self.xAdvance == other.xAdvance
and self.yAdvance == other.yAdvance
and self.xPlaDevice == other.xPlaDevice
and self.xAdvDevice == other.xAdvDevice
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return (
hash(self.xPlacement)
^ hash(self.yPlacement)
^ hash(self.xAdvance)
^ hash(self.yAdvance)
^ hash(self.xPlaDevice)
^ hash(self.yPlaDevice)
^ hash(self.xAdvDevice)
^ hash(self.yAdvDevice)
)
def asFea(self, indent=""):
if not self:
return "<NULL>"
x, y = self.xPlacement, self.yPlacement
xAdvance, yAdvance = self.xAdvance, self.yAdvance
xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice
xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice
vertical = self.vertical
# Try format A, if possible.
if x is None and y is None:
if xAdvance is None and vertical:
return str(yAdvance)
elif yAdvance is None and not vertical:
return str(xAdvance)
# Make any remaining None value 0 to avoid generating invalid records.
x = x or 0
y = y or 0
xAdvance = xAdvance or 0
yAdvance = yAdvance or 0
# Try format B, if possible.
if (
xPlaDevice is None
and yPlaDevice is None
and xAdvDevice is None
and yAdvDevice is None
):
return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance)
# Last resort is format C.
return "<%s %s %s %s %s %s %s %s>" % (
x,
y,
xAdvance,
yAdvance,
deviceToString(xPlaDevice),
deviceToString(yPlaDevice),
deviceToString(xAdvDevice),
deviceToString(yAdvDevice),
)
def __bool__(self):
return any(
getattr(self, v) is not None
for v in [
"xPlacement",
"yPlacement",
"xAdvance",
"yAdvance",
"xPlaDevice",
"yPlaDevice",
"xAdvDevice",
"yAdvDevice",
]
)
__nonzero__ = __bool__
class ValueRecordDefinition(Statement):
"""Represents a named value record definition."""
def __init__(self, name, value, location=None):
Statement.__init__(self, location)
self.name = name #: Value record name as string
self.value = value #: :class:`ValueRecord` object
def asFea(self, indent=""):
return "valueRecordDef {} {};".format(self.value.asFea(), self.name)
def simplify_name_attributes(pid, eid, lid):
if pid == 3 and eid == 1 and lid == 1033:
return ""
elif pid == 1 and eid == 0 and lid == 0:
return "1"
else:
return "{} {} {}".format(pid, eid, lid)
class NameRecord(Statement):
"""Represents a name record. (`Section 9.e. <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_)"""
def __init__(self, nameID, platformID, platEncID, langID, string, location=None):
Statement.__init__(self, location)
self.nameID = nameID #: Name ID as integer (e.g. 9 for designer's name)
self.platformID = platformID #: Platform ID as integer
self.platEncID = platEncID #: Platform encoding ID as integer
self.langID = langID #: Language ID as integer
self.string = string #: Name record value
def build(self, builder):
"""Calls the builder object's ``add_name_record`` callback."""
builder.add_name_record(
self.location,
self.nameID,
self.platformID,
self.platEncID,
self.langID,
self.string,
)
def asFea(self, indent=""):
def escape(c, escape_pattern):
# Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS
if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C):
return chr(c)
else:
return escape_pattern % c
encoding = getEncoding(self.platformID, self.platEncID, self.langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", self.location)
s = tobytes(self.string, encoding=encoding)
if encoding == "utf_16_be":
escaped_string = "".join(
[
escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x")
for i in range(0, len(s), 2)
]
)
else:
escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s])
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'nameid {} {}"{}";'.format(self.nameID, plat, escaped_string)
class FeatureNameStatement(NameRecord):
"""Represents a ``sizemenuname`` or ``name`` statement."""
def build(self, builder):
"""Calls the builder object's ``add_featureName`` callback."""
NameRecord.build(self, builder)
builder.add_featureName(self.nameID)
def asFea(self, indent=""):
if self.nameID == "size":
tag = "sizemenuname"
else:
tag = "name"
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return '{} {}"{}";'.format(tag, plat, self.string)
class STATNameStatement(NameRecord):
"""Represents a STAT table ``name`` statement."""
def asFea(self, indent=""):
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'name {}"{}";'.format(plat, self.string)
class SizeParameters(Statement):
"""A ``parameters`` statement."""
def __init__(self, DesignSize, SubfamilyID, RangeStart, RangeEnd, location=None):
Statement.__init__(self, location)
self.DesignSize = DesignSize
self.SubfamilyID = SubfamilyID
self.RangeStart = RangeStart
self.RangeEnd = RangeEnd
def build(self, builder):
"""Calls the builder object's ``set_size_parameters`` callback."""
builder.set_size_parameters(
self.location,
self.DesignSize,
self.SubfamilyID,
self.RangeStart,
self.RangeEnd,
)
def asFea(self, indent=""):
res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID)
if self.RangeStart != 0 or self.RangeEnd != 0:
res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10))
return res + ";"
class CVParametersNameStatement(NameRecord):
"""Represent a name statement inside a ``cvParameters`` block."""
def __init__(
self, nameID, platformID, platEncID, langID, string, block_name, location=None
):
NameRecord.__init__(
self, nameID, platformID, platEncID, langID, string, location=location
)
self.block_name = block_name
def build(self, builder):
"""Calls the builder object's ``add_cv_parameter`` callback."""
item = ""
if self.block_name == "ParamUILabelNameID":
item = "_{}".format(builder.cv_num_named_params_.get(self.nameID, 0))
builder.add_cv_parameter(self.nameID)
self.nameID = (self.nameID, self.block_name + item)
NameRecord.build(self, builder)
def asFea(self, indent=""):
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'name {}"{}";'.format(plat, self.string)
class CharacterStatement(Statement):
"""
Statement used in cvParameters blocks of Character Variant features (cvXX).
The Unicode value may be written with either decimal or hexadecimal
notation. The value must be preceded by '0x' if it is a hexadecimal value.
The largest Unicode value allowed is 0xFFFFFF.
"""
def __init__(self, character, tag, location=None):
Statement.__init__(self, location)
self.character = character
self.tag = tag
def build(self, builder):
"""Calls the builder object's ``add_cv_character`` callback."""
builder.add_cv_character(self.character, self.tag)
def asFea(self, indent=""):
return "Character {:#x};".format(self.character)
class BaseAxis(Statement):
"""An axis definition, being either a ``VertAxis.BaseTagList/BaseScriptList``
pair or a ``HorizAxis.BaseTagList/BaseScriptList`` pair."""
def __init__(self, bases, scripts, vertical, location=None):
Statement.__init__(self, location)
self.bases = bases #: A list of baseline tag names as strings
self.scripts = scripts #: A list of script record tuplets (script tag, default baseline tag, base coordinate)
self.vertical = vertical #: Boolean; VertAxis if True, HorizAxis if False
def build(self, builder):
"""Calls the builder object's ``set_base_axis`` callback."""
builder.set_base_axis(self.bases, self.scripts, self.vertical)
def asFea(self, indent=""):
direction = "Vert" if self.vertical else "Horiz"
scripts = [
"{} {} {}".format(a[0], a[1], " ".join(map(str, a[2])))
for a in self.scripts
]
return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format(
direction, " ".join(self.bases), indent, direction, ", ".join(scripts)
)
class OS2Field(Statement):
"""An entry in the ``OS/2`` table. Most ``values`` should be numbers or
strings, apart from when the key is ``UnicodeRange``, ``CodePageRange``
or ``Panose``, in which case it should be an array of integers."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_os2_field`` callback."""
builder.add_os2_field(self.key, self.value)
def asFea(self, indent=""):
def intarr2str(x):
return " ".join(map(str, x))
numbers = (
"FSType",
"TypoAscender",
"TypoDescender",
"TypoLineGap",
"winAscent",
"winDescent",
"XHeight",
"CapHeight",
"WeightClass",
"WidthClass",
"LowerOpSize",
"UpperOpSize",
)
ranges = ("UnicodeRange", "CodePageRange")
keywords = dict([(x.lower(), [x, str]) for x in numbers])
keywords.update([(x.lower(), [x, intarr2str]) for x in ranges])
keywords["panose"] = ["Panose", intarr2str]
keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)]
if self.key in keywords:
return "{} {};".format(
keywords[self.key][0], keywords[self.key][1](self.value)
)
return "" # should raise exception
class HheaField(Statement):
"""An entry in the ``hhea`` table."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_hhea_field`` callback."""
builder.add_hhea_field(self.key, self.value)
def asFea(self, indent=""):
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
keywords = dict([(x.lower(), x) for x in fields])
return "{} {};".format(keywords[self.key], self.value)
class VheaField(Statement):
"""An entry in the ``vhea`` table."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_vhea_field`` callback."""
builder.add_vhea_field(self.key, self.value)
def asFea(self, indent=""):
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
keywords = dict([(x.lower(), x) for x in fields])
return "{} {};".format(keywords[self.key], self.value)
class STATDesignAxisStatement(Statement):
"""A STAT table Design Axis
Args:
tag (str): a 4 letter axis tag
axisOrder (int): an int
names (list): a list of :class:`STATNameStatement` objects
"""
def __init__(self, tag, axisOrder, names, location=None):
Statement.__init__(self, location)
self.tag = tag
self.axisOrder = axisOrder
self.names = names
self.location = location
def build(self, builder):
builder.addDesignAxis(self, self.location)
def asFea(self, indent=""):
indent += SHIFT
res = f"DesignAxis {self.tag} {self.axisOrder} {{ \n"
res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n"
res += "};"
return res
class ElidedFallbackName(Statement):
"""STAT table ElidedFallbackName
Args:
names: a list of :class:`STATNameStatement` objects
"""
def __init__(self, names, location=None):
Statement.__init__(self, location)
self.names = names
self.location = location
def build(self, builder):
builder.setElidedFallbackName(self.names, self.location)
def asFea(self, indent=""):
indent += SHIFT
res = "ElidedFallbackName { \n"
res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n"
res += "};"
return res
class ElidedFallbackNameID(Statement):
"""STAT table ElidedFallbackNameID
Args:
value: an int pointing to an existing name table name ID
"""
def __init__(self, value, location=None):
Statement.__init__(self, location)
self.value = value
self.location = location
def build(self, builder):
builder.setElidedFallbackName(self.value, self.location)
def asFea(self, indent=""):
return f"ElidedFallbackNameID {self.value};"
class STATAxisValueStatement(Statement):
"""A STAT table Axis Value Record
Args:
names (list): a list of :class:`STATNameStatement` objects
locations (list): a list of :class:`AxisValueLocationStatement` objects
flags (int): an int
"""
def __init__(self, names, locations, flags, location=None):
Statement.__init__(self, location)
self.names = names
self.locations = locations
self.flags = flags
def build(self, builder):
builder.addAxisValueRecord(self, self.location)
def asFea(self, indent=""):
res = "AxisValue {\n"
for location in self.locations:
res += location.asFea()
for nameRecord in self.names:
res += nameRecord.asFea()
res += "\n"
if self.flags:
flags = ["OlderSiblingFontAttribute", "ElidableAxisValueName"]
flagStrings = []
curr = 1
for i in range(len(flags)):
if self.flags & curr != 0:
flagStrings.append(flags[i])
curr = curr << 1
res += f"flag {' '.join(flagStrings)};\n"
res += "};"
return res
class AxisValueLocationStatement(Statement):
"""
A STAT table Axis Value Location
Args:
tag (str): a 4 letter axis tag
values (list): a list of ints and/or floats
"""
def __init__(self, tag, values, location=None):
Statement.__init__(self, location)
self.tag = tag
self.values = values
def asFea(self, res=""):
res += f"location {self.tag} "
res += f"{' '.join(str(i) for i in self.values)};\n"
return res
class ConditionsetStatement(Statement):
"""
A variable layout conditionset
Args:
name (str): the name of this conditionset
conditions (dict): a dictionary mapping axis tags to a
tuple of (min,max) userspace coordinates.
"""
def __init__(self, name, conditions, location=None):
Statement.__init__(self, location)
self.name = name
self.conditions = conditions
def build(self, builder):
builder.add_conditionset(self.name, self.conditions)
def asFea(self, res="", indent=""):
res += indent + f"conditionset {self.name} " + "{\n"
for tag, (minvalue, maxvalue) in self.conditions.items():
res += indent + SHIFT + f"{tag} {minvalue} {maxvalue};\n"
res += indent + "}" + f" {self.name};\n"
return res
class VariationBlock(Block):
"""A variation feature block, applicable in a given set of conditions."""
def __init__(self, name, conditionset, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.conditionset, self.use_extension = (
name,
conditionset,
use_extension,
)
def build(self, builder):
"""Call the ``start_feature`` callback on the builder object, visit
all the statements in this feature, and then call ``end_feature``."""
builder.start_feature(self.location, self.name)
if (
self.conditionset != "NULL"
and self.conditionset not in builder.conditionsets_
):
raise FeatureLibError(
f"variation block used undefined conditionset {self.conditionset}",
self.location,
)
# language exclude_dflt statements modify builder.features_
# limit them to this block with temporary builder.features_
features = builder.features_
builder.features_ = {}
Block.build(self, builder)
for key, value in builder.features_.items():
items = builder.feature_variations_.setdefault(key, {}).setdefault(
self.conditionset, []
)
items.extend(value)
if key not in features:
features[key] = [] # Ensure we make a feature record
builder.features_ = features
builder.end_feature()
def asFea(self, indent=""):
res = indent + "variation %s " % self.name.strip()
res += self.conditionset + " "
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += indent + "} %s;\n" % self.name.strip()
return res
| mit | 63eaacb363aa396cd9c094c6387663f8 | 33.56485 | 138 | 0.578323 | 3.908704 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/tables/S_I_N_G_.py | 3 | 2661 | from fontTools.misc import sstruct
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
from . import DefaultTable
SINGFormat = """
> # big endian
tableVersionMajor: H
tableVersionMinor: H
glyphletVersion: H
permissions: h
mainGID: H
unitsPerEm: H
vertAdvance: h
vertOrigin: h
uniqueName: 28s
METAMD5: 16s
nameLength: 1s
"""
# baseGlyphName is a byte string which follows the record above.
class table_S_I_N_G_(DefaultTable.DefaultTable):
dependencies = []
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(SINGFormat, data, self)
self.uniqueName = self.decompileUniqueName(self.uniqueName)
self.nameLength = byteord(self.nameLength)
assert len(rest) == self.nameLength
self.baseGlyphName = tostr(rest)
rawMETAMD5 = self.METAMD5
self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
for char in rawMETAMD5[1:]:
self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
self.METAMD5 = self.METAMD5 + "]"
def decompileUniqueName(self, data):
name = ""
for char in data:
val = byteord(char)
if val == 0:
break
if (val > 31) or (val < 128):
name += chr(val)
else:
octString = oct(val)
if len(octString) > 3:
octString = octString[1:] # chop off that leading zero.
elif len(octString) < 3:
octString.zfill(3)
name += "\\" + octString
return name
def compile(self, ttFont):
d = self.__dict__.copy()
d["nameLength"] = bytechr(len(self.baseGlyphName))
d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
METAMD5List = eval(self.METAMD5)
d["METAMD5"] = b""
for val in METAMD5List:
d["METAMD5"] += bytechr(val)
assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
data = sstruct.pack(SINGFormat, d)
data = data + tobytes(self.baseGlyphName)
return data
def compilecompileUniqueName(self, name, length):
nameLen = len(name)
if length <= nameLen:
name = name[:length-1] + "\000"
else:
name += (nameLen - length) * "\000"
return name
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(SINGFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
writer.simpletag("baseGlyphName", value=self.baseGlyphName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
setattr(self, name, value)
else:
setattr(self, name, safeEval(value))
| mit | 136976fcdda8fca51167b4284261137f | 27.612903 | 83 | 0.680947 | 2.849036 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/misc/sstruct.py | 3 | 6054 | """sstruct.py -- SuperStruct
Higher level layer on top of the struct module, enabling to
bind names to struct elements. The interface is similar to
struct, except the objects passed and returned are not tuples
(or argument lists), but dictionaries or instances.
Just like struct, we use fmt strings to describe a data
structure, except we use one line per element. Lines are
separated by newlines or semi-colons. Each line contains
either one of the special struct characters ('@', '=', '<',
'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').
Repetitions, like the struct module offers them are not useful
in this context, except for fixed length strings (eg. 'myInt:5h'
is not allowed but 'myString:5s' is). The 'x' fmt character
(pad byte) is treated as 'special', since it is by definition
anonymous. Extra whitespace is allowed everywhere.
The sstruct module offers one feature that the "normal" struct
module doesn't: support for fixed point numbers. These are spelled
as "n.mF", where n is the number of bits before the point, and m
the number of bits after the point. Fixed point numbers get
converted to floats.
pack(fmt, object):
'object' is either a dictionary or an instance (or actually
anything that has a __dict__ attribute). If it is a dictionary,
its keys are used for names. If it is an instance, it's
attributes are used to grab struct elements from. Returns
a string containing the data.
unpack(fmt, data, object=None)
If 'object' is omitted (or None), a new dictionary will be
returned. If 'object' is a dictionary, it will be used to add
struct elements to. If it is an instance (or in fact anything
that has a __dict__ attribute), an attribute will be added for
each struct element. In the latter two cases, 'object' itself
is returned.
unpack2(fmt, data, object=None)
Convenience function. Same as unpack, except data may be longer
than needed. The returned value is a tuple: (object, leftoverdata).
calcsize(fmt)
like struct.calcsize(), but uses our own fmt strings:
it returns the size of the data in bytes.
"""
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
from fontTools.misc.textTools import tobytes, tostr
import struct
import re
__version__ = "1.2"
__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
class Error(Exception):
pass
def pack(fmt, obj):
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
elements = []
if not isinstance(obj, dict):
obj = obj.__dict__
for name in names:
value = obj[name]
if name in fixes:
# fixed point conversion
value = fl2fi(value, fixes[name])
elif isinstance(value, str):
value = tobytes(value)
elements.append(value)
data = struct.pack(*(formatstring,) + tuple(elements))
return data
def unpack(fmt, data, obj=None):
if obj is None:
obj = {}
data = tobytes(data)
formatstring, names, fixes = getformat(fmt)
if isinstance(obj, dict):
d = obj
else:
d = obj.__dict__
elements = struct.unpack(formatstring, data)
for i in range(len(names)):
name = names[i]
value = elements[i]
if name in fixes:
# fixed point conversion
value = fi2fl(value, fixes[name])
elif isinstance(value, bytes):
try:
value = tostr(value)
except UnicodeDecodeError:
pass
d[name] = value
return obj
def unpack2(fmt, data, obj=None):
length = calcsize(fmt)
return unpack(fmt, data[:length], obj), data[length:]
def calcsize(fmt):
formatstring, names, fixes = getformat(fmt)
return struct.calcsize(formatstring)
# matches "name:formatchar" (whitespace is allowed)
_elementRE = re.compile(
r"\s*" # whitespace
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
r"\s*:\s*" # whitespace : whitespace
r"([xcbB?hHiIlLqQfd]|" # formatchar...
r"[0-9]+[ps]|" # ...formatchar...
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
r"\s*" # whitespace
r"(#.*)?$" # [comment] + end of string
)
# matches the special struct fmt chars and 'x' (pad byte)
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
# matches an "empty" string, possibly containing whitespace and/or a comment
_emptyRE = re.compile(r"\s*(#.*)?$")
_fixedpointmappings = {
8: "b",
16: "h",
32: "l"}
_formatcache = {}
def getformat(fmt, keep_pad_byte=False):
fmt = tostr(fmt, encoding="ascii")
try:
formatstring, names, fixes = _formatcache[fmt]
except KeyError:
lines = re.split("[\n;]", fmt)
formatstring = ""
names = []
fixes = {}
for line in lines:
if _emptyRE.match(line):
continue
m = _extraRE.match(line)
if m:
formatchar = m.group(1)
if formatchar != 'x' and formatstring:
raise Error("a special fmt char must be first")
else:
m = _elementRE.match(line)
if not m:
raise Error("syntax error in fmt: '%s'" % line)
name = m.group(1)
formatchar = m.group(2)
if keep_pad_byte or formatchar != "x":
names.append(name)
if m.group(3):
# fixed point
before = int(m.group(3))
after = int(m.group(4))
bits = before + after
if bits not in [8, 16, 32]:
raise Error("fixed point must be 8, 16 or 32 bits long")
formatchar = _fixedpointmappings[bits]
assert m.group(5) == "F"
fixes[name] = after
formatstring = formatstring + formatchar
_formatcache[fmt] = formatstring, names, fixes
return formatstring, names, fixes
def _test():
fmt = """
# comments are allowed
> # big endian (see documentation for struct)
# empty lines are allowed:
ashort: h
along: l
abyte: b # a byte
achar: c
astr: 5s
afloat: f; adouble: d # multiple "statements" are allowed
afixed: 16.16F
abool: ?
apad: x
"""
print('size:', calcsize(fmt))
class foo(object):
pass
i = foo()
i.ashort = 0x7fff
i.along = 0x7fffffff
i.abyte = 0x7f
i.achar = "a"
i.astr = "12345"
i.afloat = 0.5
i.adouble = 0.5
i.afixed = 1.5
i.abool = True
data = pack(fmt, i)
print('data:', repr(data))
print(unpack(fmt, data))
i2 = foo()
unpack(fmt, data, i2)
print(vars(i2))
if __name__ == "__main__":
_test()
| mit | 8ec7752f6cc9d6a0a62d8f9e7bc22f69 | 27.027778 | 82 | 0.672118 | 3.007452 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/misc/py23.py | 3 | 2238 | """Python 2/3 compat layer leftovers."""
import decimal as _decimal
import math as _math
import warnings
from contextlib import redirect_stderr, redirect_stdout
from io import BytesIO
from io import StringIO as UnicodeIO
from types import SimpleNamespace
from .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr
warnings.warn(
"The py23 module has been deprecated and will be removed in a future release. "
"Please update your code.",
DeprecationWarning,
)
__all__ = [
"basestring",
"bytechr",
"byteord",
"BytesIO",
"bytesjoin",
"open",
"Py23Error",
"range",
"RecursionError",
"round",
"SimpleNamespace",
"StringIO",
"strjoin",
"Tag",
"tobytes",
"tostr",
"tounicode",
"unichr",
"unicode",
"UnicodeIO",
"xrange",
"zip",
]
class Py23Error(NotImplementedError):
pass
RecursionError = RecursionError
StringIO = UnicodeIO
basestring = str
isclose = _math.isclose
isfinite = _math.isfinite
open = open
range = range
round = round3 = round
unichr = chr
unicode = str
zip = zip
tounicode = tostr
def xrange(*args, **kwargs):
raise Py23Error("'xrange' is not defined. Use 'range' instead.")
def round2(number, ndigits=None):
"""
Implementation of Python 2 built-in round() function.
Rounds a number to a given precision in decimal digits (default
0 digits). The result is a floating point number. Values are rounded
to the closest multiple of 10 to the power minus ndigits; if two
multiples are equally close, rounding is done away from 0.
ndigits may be negative.
See Python 2 documentation:
https://docs.python.org/2/library/functions.html?highlight=round#round
"""
if ndigits is None:
ndigits = 0
if ndigits < 0:
exponent = 10 ** (-ndigits)
quotient, remainder = divmod(number, exponent)
if remainder >= exponent // 2 and number >= 0:
quotient += 1
return float(quotient * exponent)
else:
exponent = _decimal.Decimal("10") ** (-ndigits)
d = _decimal.Decimal.from_float(number).quantize(
exponent, rounding=_decimal.ROUND_HALF_UP
)
return float(d)
| mit | e12d0c3df3a655320f3054fc44724ec4 | 22.3125 | 83 | 0.655496 | 3.858621 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/tables/_c_v_t.py | 3 | 1145 | from fontTools.misc.textTools import safeEval
from . import DefaultTable
import sys
import array
class table__c_v_t(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
values = array.array("h")
values.frombytes(data)
if sys.byteorder != "big": values.byteswap()
self.values = values
def compile(self, ttFont):
values = self.values[:]
if sys.byteorder != "big": values.byteswap()
return values.tobytes()
def toXML(self, writer, ttFont):
for i in range(len(self.values)):
value = self.values[i]
writer.simpletag("cv", value=value, index=i)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "values"):
self.values = array.array("h")
if name == "cv":
index = safeEval(attrs["index"])
value = safeEval(attrs["value"])
for i in range(1 + index - len(self.values)):
self.values.append(0)
self.values[index] = value
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
def __setitem__(self, index, value):
self.values[index] = value
def __delitem__(self, index):
del self.values[index]
| mit | 447a018556846687b2fcb4de9f51c295 | 24.444444 | 49 | 0.675983 | 3.029101 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/ttFont.py | 1 | 33479 | from fontTools.config import Config
from fontTools.misc import xmlWriter
from fontTools.misc.configTools import AbstractConfig
from fontTools.misc.textTools import Tag, byteord, tostr
from fontTools.misc.loggingTools import deprecateArgument
from fontTools.ttLib import TTLibError
from fontTools.ttLib.ttGlyphSet import _TTGlyph, _TTGlyphSetCFF, _TTGlyphSetGlyf
from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
from io import BytesIO, StringIO
import os
import logging
import traceback
log = logging.getLogger(__name__)
class TTFont(object):
"""Represents a TrueType font.
The object manages file input and output, and offers a convenient way of
accessing tables. Tables will be only decompiled when necessary, ie. when
they're actually accessed. This means that simple operations can be extremely fast.
Example usage::
>> from fontTools import ttLib
>> tt = ttLib.TTFont("afont.ttf") # Load an existing font file
>> tt['maxp'].numGlyphs
242
>> tt['OS/2'].achVendID
'B&H\000'
>> tt['head'].unitsPerEm
2048
For details of the objects returned when accessing each table, see :ref:`tables`.
To add a table to the font, use the :py:func:`newTable` function::
>> os2 = newTable("OS/2")
>> os2.version = 4
>> # set other attributes
>> font["OS/2"] = os2
TrueType fonts can also be serialized to and from XML format (see also the
:ref:`ttx` binary)::
>> tt.saveXML("afont.ttx")
Dumping 'LTSH' table...
Dumping 'OS/2' table...
[...]
>> tt2 = ttLib.TTFont() # Create a new font object
>> tt2.importXML("afont.ttx")
>> tt2['maxp'].numGlyphs
242
The TTFont object may be used as a context manager; this will cause the file
reader to be closed after the context ``with`` block is exited::
with TTFont(filename) as f:
# Do stuff
Args:
file: When reading a font from disk, either a pathname pointing to a file,
or a readable file object.
res_name_or_index: If running on a Macintosh, either a sfnt resource name or
an sfnt resource index number. If the index number is zero, TTLib will
autodetect whether the file is a flat file or a suitcase. (If it is a suitcase,
only the first 'sfnt' resource will be read.)
sfntVersion (str): When constructing a font object from scratch, sets the four-byte
sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create
an OpenType file, use ``OTTO``.
flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2
file.
checkChecksums (int): How checksum data should be treated. Default is 0
(no checking). Set to 1 to check and warn on wrong checksums; set to 2 to
raise an exception if any wrong checksums are found.
recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``,
``head`` bounding box values and ``hhea``/``vhea`` min/max values on save.
Also compiles the glyphs on importing, which saves memory consumption and
time.
ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation
will be ignored, and the binary data will be returned for those tables instead.
recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in
the ``head`` table on save.
fontNumber (int): The index of the font in a TrueType Collection file.
lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon
access only. If it is set to False, many data structures are loaded immediately.
The default is ``lazy=None`` which is somewhere in between.
"""
def __init__(self, file=None, res_name_or_index=None,
sfntVersion="\000\001\000\000", flavor=None, checkChecksums=0,
verbose=None, recalcBBoxes=True, allowVID=NotImplemented, ignoreDecompileErrors=False,
recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None,
_tableCache=None, cfg={}):
for name in ("verbose", "quiet"):
val = locals().get(name)
if val is not None:
deprecateArgument(name, "configure logging instead")
setattr(self, name, val)
self.lazy = lazy
self.recalcBBoxes = recalcBBoxes
self.recalcTimestamp = recalcTimestamp
self.tables = {}
self.reader = None
self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg)
self.ignoreDecompileErrors = ignoreDecompileErrors
if not file:
self.sfntVersion = sfntVersion
self.flavor = flavor
self.flavorData = None
return
if not hasattr(file, "read"):
closeStream = True
# assume file is a string
if res_name_or_index is not None:
# see if it contains 'sfnt' resources in the resource or data fork
from . import macUtils
if res_name_or_index == 0:
if macUtils.getSFNTResIndices(file):
# get the first available sfnt font.
file = macUtils.SFNTResourceReader(file, 1)
else:
file = open(file, "rb")
else:
file = macUtils.SFNTResourceReader(file, res_name_or_index)
else:
file = open(file, "rb")
else:
# assume "file" is a readable file object
closeStream = False
file.seek(0)
if not self.lazy:
# read input file in memory and wrap a stream around it to allow overwriting
file.seek(0)
tmp = BytesIO(file.read())
if hasattr(file, 'name'):
# save reference to input file name
tmp.name = file.name
if closeStream:
file.close()
file = tmp
self._tableCache = _tableCache
self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
self.sfntVersion = self.reader.sfntVersion
self.flavor = self.reader.flavor
self.flavorData = self.reader.flavorData
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""If we still have a reader object, close it."""
if self.reader is not None:
self.reader.close()
def save(self, file, reorderTables=True):
"""Save the font to disk.
Args:
file: Similarly to the constructor, can be either a pathname or a writable
file object.
reorderTables (Option[bool]): If true (the default), reorder the tables,
sorting them by tag (recommended by the OpenType specification). If
false, retain the original font order. If None, reorder by table
dependency (fastest).
"""
if not hasattr(file, "write"):
if self.lazy and self.reader.file.name == file:
raise TTLibError(
"Can't overwrite TTFont when 'lazy' attribute is True")
createStream = True
else:
# assume "file" is a writable file object
createStream = False
tmp = BytesIO()
writer_reordersTables = self._save(tmp)
if not (reorderTables is None or writer_reordersTables or
(reorderTables is False and self.reader is None)):
if reorderTables is False:
# sort tables using the original font's order
tableOrder = list(self.reader.keys())
else:
# use the recommended order from the OpenType specification
tableOrder = None
tmp.flush()
tmp2 = BytesIO()
reorderFontTables(tmp, tmp2, tableOrder)
tmp.close()
tmp = tmp2
if createStream:
# "file" is a path
with open(file, "wb") as file:
file.write(tmp.getvalue())
else:
file.write(tmp.getvalue())
tmp.close()
def _save(self, file, tableCache=None):
"""Internal function, to be shared by save() and TTCollection.save()"""
if self.recalcTimestamp and 'head' in self:
self['head'] # make sure 'head' is loaded so the recalculation is actually done
tags = list(self.keys())
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
numTables = len(tags)
# write to a temporary stream to allow saving to unseekable streams
writer = SFNTWriter(file, numTables, self.sfntVersion, self.flavor, self.flavorData)
done = []
for tag in tags:
self._writeTable(tag, writer, done, tableCache)
writer.close()
return writer.reordersTables()
def saveXML(self, fileOrPath, newlinestr="\n", **kwargs):
"""Export the font as TTX (an XML-based text file), or as a series of text
files when splitTables is true. In the latter case, the 'fileOrPath'
argument should be a path to a directory.
The 'tables' argument must either be false (dump all tables) or a
list of tables to dump. The 'skipTables' argument may be a list of tables
to skip, but only when the 'tables' argument is false.
"""
writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
self._saveXML(writer, **kwargs)
writer.close()
def _saveXML(self, writer,
writeVersion=True,
quiet=None, tables=None, skipTables=None, splitTables=False,
splitGlyphs=False, disassembleInstructions=True,
bitmapGlyphDataFormat='raw'):
if quiet is not None:
deprecateArgument("quiet", "configure logging instead")
self.disassembleInstructions = disassembleInstructions
self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
if not tables:
tables = list(self.keys())
if "GlyphOrder" not in tables:
tables = ["GlyphOrder"] + tables
if skipTables:
for tag in skipTables:
if tag in tables:
tables.remove(tag)
numTables = len(tables)
if writeVersion:
from fontTools import version
version = ".".join(version.split('.')[:2])
writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
ttLibVersion=version)
else:
writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
writer.newline()
# always splitTables if splitGlyphs is enabled
splitTables = splitTables or splitGlyphs
if not splitTables:
writer.newline()
else:
path, ext = os.path.splitext(writer.filename)
fileNameTemplate = path + ".%s" + ext
for i in range(numTables):
tag = tables[i]
if splitTables:
tablePath = fileNameTemplate % tagToIdentifier(tag)
tableWriter = xmlWriter.XMLWriter(tablePath,
newlinestr=writer.newlinestr)
tableWriter.begintag("ttFont", ttLibVersion=version)
tableWriter.newline()
tableWriter.newline()
writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
writer.newline()
else:
tableWriter = writer
self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
if splitTables:
tableWriter.endtag("ttFont")
tableWriter.newline()
tableWriter.close()
writer.endtag("ttFont")
writer.newline()
def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
if quiet is not None:
deprecateArgument("quiet", "configure logging instead")
if tag in self:
table = self[tag]
report = "Dumping '%s' table..." % tag
else:
report = "No '%s' table found." % tag
log.info(report)
if tag not in self:
return
xmlTag = tagToXML(tag)
attrs = dict()
if hasattr(table, "ERROR"):
attrs['ERROR'] = "decompilation error"
from .tables.DefaultTable import DefaultTable
if table.__class__ == DefaultTable:
attrs['raw'] = True
writer.begintag(xmlTag, **attrs)
writer.newline()
if tag == "glyf":
table.toXML(writer, self, splitGlyphs=splitGlyphs)
else:
table.toXML(writer, self)
writer.endtag(xmlTag)
writer.newline()
writer.newline()
def importXML(self, fileOrPath, quiet=None):
"""Import a TTX file (an XML-based text format), so as to recreate
a font object.
"""
if quiet is not None:
deprecateArgument("quiet", "configure logging instead")
if "maxp" in self and "post" in self:
# Make sure the glyph order is loaded, as it otherwise gets
# lost if the XML doesn't contain the glyph order, yet does
# contain the table which was originally used to extract the
# glyph names from (ie. 'post', 'cmap' or 'CFF ').
self.getGlyphOrder()
from fontTools.misc import xmlReader
reader = xmlReader.XMLReader(fileOrPath, self)
reader.read()
def isLoaded(self, tag):
"""Return true if the table identified by ``tag`` has been
decompiled and loaded into memory."""
return tag in self.tables
def has_key(self, tag):
"""Test if the table identified by ``tag`` is present in the font.
As well as this method, ``tag in font`` can also be used to determine the
presence of the table."""
if self.isLoaded(tag):
return True
elif self.reader and tag in self.reader:
return True
elif tag == "GlyphOrder":
return True
else:
return False
__contains__ = has_key
def keys(self):
"""Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table."""
keys = list(self.tables.keys())
if self.reader:
for key in list(self.reader.keys()):
if key not in keys:
keys.append(key)
if "GlyphOrder" in keys:
keys.remove("GlyphOrder")
keys = sortedTagList(keys)
return ["GlyphOrder"] + keys
def ensureDecompiled(self, recurse=None):
"""Decompile all the tables, even if a TTFont was opened in 'lazy' mode."""
for tag in self.keys():
table = self[tag]
if recurse is None:
recurse = self.lazy is not False
if recurse and hasattr(table, "ensureDecompiled"):
table.ensureDecompiled(recurse=recurse)
self.lazy = False
def __len__(self):
return len(list(self.keys()))
def __getitem__(self, tag):
tag = Tag(tag)
table = self.tables.get(tag)
if table is None:
if tag == "GlyphOrder":
table = GlyphOrder(tag)
self.tables[tag] = table
elif self.reader is not None:
table = self._readTable(tag)
else:
raise KeyError("'%s' table not found" % tag)
return table
def _readTable(self, tag):
log.debug("Reading '%s' table from disk", tag)
data = self.reader[tag]
if self._tableCache is not None:
table = self._tableCache.get((tag, data))
if table is not None:
return table
tableClass = getTableClass(tag)
table = tableClass(tag)
self.tables[tag] = table
log.debug("Decompiling '%s' table", tag)
try:
table.decompile(data, self)
except Exception:
if not self.ignoreDecompileErrors:
raise
# fall back to DefaultTable, retaining the binary table data
log.exception(
"An exception occurred during the decompilation of the '%s' table", tag)
from .tables.DefaultTable import DefaultTable
file = StringIO()
traceback.print_exc(file=file)
table = DefaultTable(tag)
table.ERROR = file.getvalue()
self.tables[tag] = table
table.decompile(data, self)
if self._tableCache is not None:
self._tableCache[(tag, data)] = table
return table
def __setitem__(self, tag, table):
self.tables[Tag(tag)] = table
def __delitem__(self, tag):
if tag not in self:
raise KeyError("'%s' table not found" % tag)
if tag in self.tables:
del self.tables[tag]
if self.reader and tag in self.reader:
del self.reader[tag]
def get(self, tag, default=None):
"""Returns the table if it exists or (optionally) a default if it doesn't."""
try:
return self[tag]
except KeyError:
return default
def setGlyphOrder(self, glyphOrder):
"""Set the glyph order
Args:
glyphOrder ([str]): List of glyph names in order.
"""
self.glyphOrder = glyphOrder
if hasattr(self, '_reverseGlyphOrderDict'):
del self._reverseGlyphOrderDict
if self.isLoaded("glyf"):
self["glyf"].setGlyphOrder(glyphOrder)
def getGlyphOrder(self):
"""Returns a list of glyph names ordered by their position in the font."""
try:
return self.glyphOrder
except AttributeError:
pass
if 'CFF ' in self:
cff = self['CFF ']
self.glyphOrder = cff.getGlyphOrder()
elif 'post' in self:
# TrueType font
glyphOrder = self['post'].getGlyphOrder()
if glyphOrder is None:
#
# No names found in the 'post' table.
# Try to create glyph names from the unicode cmap (if available)
# in combination with the Adobe Glyph List (AGL).
#
self._getGlyphNamesFromCmap()
elif len(glyphOrder) < self['maxp'].numGlyphs:
#
# Not enough names found in the 'post' table.
# Can happen when 'post' format 1 is improperly used on a font that
# has more than 258 glyphs (the lenght of 'standardGlyphOrder').
#
log.warning("Not enough names found in the 'post' table, generating them from cmap instead")
self._getGlyphNamesFromCmap()
else:
self.glyphOrder = glyphOrder
else:
self._getGlyphNamesFromCmap()
return self.glyphOrder
def _getGlyphNamesFromCmap(self):
#
# This is rather convoluted, but then again, it's an interesting problem:
# - we need to use the unicode values found in the cmap table to
# build glyph names (eg. because there is only a minimal post table,
# or none at all).
# - but the cmap parser also needs glyph names to work with...
# So here's what we do:
# - make up glyph names based on glyphID
# - load a temporary cmap table based on those names
# - extract the unicode values, build the "real" glyph names
# - unload the temporary cmap table
#
if self.isLoaded("cmap"):
# Bootstrapping: we're getting called by the cmap parser
# itself. This means self.tables['cmap'] contains a partially
# loaded cmap, making it impossible to get at a unicode
# subtable here. We remove the partially loaded cmap and
# restore it later.
# This only happens if the cmap table is loaded before any
# other table that does f.getGlyphOrder() or f.getGlyphName().
cmapLoading = self.tables['cmap']
del self.tables['cmap']
else:
cmapLoading = None
# Make up glyph names based on glyphID, which will be used by the
# temporary cmap and by the real cmap in case we don't find a unicode
# cmap.
numGlyphs = int(self['maxp'].numGlyphs)
glyphOrder = [None] * numGlyphs
glyphOrder[0] = ".notdef"
for i in range(1, numGlyphs):
glyphOrder[i] = "glyph%.5d" % i
# Set the glyph order, so the cmap parser has something
# to work with (so we don't get called recursively).
self.glyphOrder = glyphOrder
# Make up glyph names based on the reversed cmap table. Because some
# glyphs (eg. ligatures or alternates) may not be reachable via cmap,
# this naming table will usually not cover all glyphs in the font.
# If the font has no Unicode cmap table, reversecmap will be empty.
if 'cmap' in self:
reversecmap = self['cmap'].buildReversed()
else:
reversecmap = {}
useCount = {}
for i in range(numGlyphs):
tempName = glyphOrder[i]
if tempName in reversecmap:
# If a font maps both U+0041 LATIN CAPITAL LETTER A and
# U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph,
# we prefer naming the glyph as "A".
glyphName = self._makeGlyphName(min(reversecmap[tempName]))
numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1
if numUses > 1:
glyphName = "%s.alt%d" % (glyphName, numUses - 1)
glyphOrder[i] = glyphName
if 'cmap' in self:
# Delete the temporary cmap table from the cache, so it can
# be parsed again with the right names.
del self.tables['cmap']
self.glyphOrder = glyphOrder
if cmapLoading:
# restore partially loaded cmap, so it can continue loading
# using the proper names.
self.tables['cmap'] = cmapLoading
@staticmethod
def _makeGlyphName(codepoint):
from fontTools import agl # Adobe Glyph List
if codepoint in agl.UV2AGL:
return agl.UV2AGL[codepoint]
elif codepoint <= 0xFFFF:
return "uni%04X" % codepoint
else:
return "u%X" % codepoint
def getGlyphNames(self):
"""Get a list of glyph names, sorted alphabetically."""
glyphNames = sorted(self.getGlyphOrder())
return glyphNames
def getGlyphNames2(self):
"""Get a list of glyph names, sorted alphabetically,
but not case sensitive.
"""
from fontTools.misc import textTools
return textTools.caselessSort(self.getGlyphOrder())
def getGlyphName(self, glyphID):
"""Returns the name for the glyph with the given ID.
If no name is available, synthesises one with the form ``glyphXXXXX``` where
```XXXXX`` is the zero-padded glyph ID.
"""
try:
return self.getGlyphOrder()[glyphID]
except IndexError:
return "glyph%.5d" % glyphID
def getGlyphNameMany(self, lst):
"""Converts a list of glyph IDs into a list of glyph names."""
glyphOrder = self.getGlyphOrder();
cnt = len(glyphOrder)
return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid
for gid in lst]
def getGlyphID(self, glyphName):
"""Returns the ID of the glyph with the given name."""
try:
return self.getReverseGlyphMap()[glyphName]
except KeyError:
if glyphName[:5] == "glyph":
try:
return int(glyphName[5:])
except (NameError, ValueError):
raise KeyError(glyphName)
def getGlyphIDMany(self, lst):
"""Converts a list of glyph names into a list of glyph IDs."""
d = self.getReverseGlyphMap()
try:
return [d[glyphName] for glyphName in lst]
except KeyError:
getGlyphID = self.getGlyphID
return [getGlyphID(glyphName) for glyphName in lst]
def getReverseGlyphMap(self, rebuild=False):
"""Returns a mapping of glyph names to glyph IDs."""
if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
self._buildReverseGlyphOrderDict()
return self._reverseGlyphOrderDict
def _buildReverseGlyphOrderDict(self):
self._reverseGlyphOrderDict = d = {}
for glyphID,glyphName in enumerate(self.getGlyphOrder()):
d[glyphName] = glyphID
return d
def _writeTable(self, tag, writer, done, tableCache=None):
"""Internal helper function for self.save(). Keeps track of
inter-table dependencies.
"""
if tag in done:
return
tableClass = getTableClass(tag)
for masterTable in tableClass.dependencies:
if masterTable not in done:
if masterTable in self:
self._writeTable(masterTable, writer, done, tableCache)
else:
done.append(masterTable)
done.append(tag)
tabledata = self.getTableData(tag)
if tableCache is not None:
entry = tableCache.get((Tag(tag), tabledata))
if entry is not None:
log.debug("reusing '%s' table", tag)
writer.setEntry(tag, entry)
return
log.debug("Writing '%s' table to disk", tag)
writer[tag] = tabledata
if tableCache is not None:
tableCache[(Tag(tag), tabledata)] = writer[tag]
def getTableData(self, tag):
"""Returns the binary representation of a table.
If the table is currently loaded and in memory, the data is compiled to
binary and returned; if it is not currently loaded, the binary data is
read from the font file and returned.
"""
tag = Tag(tag)
if self.isLoaded(tag):
log.debug("Compiling '%s' table", tag)
return self.tables[tag].compile(self)
elif self.reader and tag in self.reader:
log.debug("Reading '%s' table from disk", tag)
return self.reader[tag]
else:
raise KeyError(tag)
def getGlyphSet(self, preferCFF=True, location=None, normalized=False):
"""Return a generic GlyphSet, which is a dict-like object
mapping glyph names to glyph objects. The returned glyph objects
have a ``.draw()`` method that supports the Pen protocol, and will
have an attribute named 'width'.
If the font is CFF-based, the outlines will be taken from the ``CFF ``
or ``CFF2`` tables. Otherwise the outlines will be taken from the
``glyf`` table.
If the font contains both a ``CFF ``/``CFF2`` and a ``glyf`` table, you
can use the ``preferCFF`` argument to specify which one should be taken.
If the font contains both a ``CFF `` and a ``CFF2`` table, the latter is
taken.
If the ``location`` parameter is set, it should be a dictionary mapping
four-letter variation tags to their float values, and the returned
glyph-set will represent an instance of a variable font at that
location.
If the ``normalized`` variable is set to True, that location is
interpreted as in the normalized (-1..+1) space, otherwise it is in the
font's defined axes space.
"""
if location and "fvar" not in self:
location = None
if location and not normalized:
location = self.normalizeLocation(location)
if ("CFF " in self or "CFF2" in self) and (preferCFF or "glyf" not in self):
return _TTGlyphSetCFF(self, location)
elif "glyf" in self:
return _TTGlyphSetGlyf(self, location)
else:
raise TTLibError("Font contains no outlines")
def normalizeLocation(self, location):
"""Normalize a ``location`` from the font's defined axes space (also
known as user space) into the normalized (-1..+1) space. It applies
``avar`` mapping if the font contains an ``avar`` table.
The ``location`` parameter should be a dictionary mapping four-letter
variation tags to their float values.
Raises ``TTLibError`` if the font is not a variable font.
"""
from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap
if "fvar" not in self:
raise TTLibError("Not a variable font")
axes = {
a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in self["fvar"].axes
}
location = normalizeLocation(location, axes)
if "avar" in self:
avar = self["avar"]
avarSegments = avar.segments
mappedLocation = {}
for axisTag, value in location.items():
avarMapping = avarSegments.get(axisTag, None)
if avarMapping is not None:
value = piecewiseLinearMap(value, avarMapping)
mappedLocation[axisTag] = value
location = mappedLocation
return location
def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
"""Returns the 'best' Unicode cmap dictionary available in the font
or ``None``, if no Unicode cmap subtable is available.
By default it will search for the following (platformID, platEncID)
pairs in order::
(3, 10), # Windows Unicode full repertoire
(0, 6), # Unicode full repertoire (format 13 subtable)
(0, 4), # Unicode 2.0 full repertoire
(3, 1), # Windows Unicode BMP
(0, 3), # Unicode 2.0 BMP
(0, 2), # Unicode ISO/IEC 10646
(0, 1), # Unicode 1.1
(0, 0) # Unicode 1.0
This particular order matches what HarfBuzz uses to choose what
subtable to use by default. This order prefers the largest-repertoire
subtable, and among those, prefers the Windows-platform over the
Unicode-platform as the former has wider support.
This order can be customized via the ``cmapPreferences`` argument.
"""
return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
class GlyphOrder(object):
"""A pseudo table. The glyph order isn't in the font as a separate
table, but it's nice to present it as such in the TTX format.
"""
def __init__(self, tag=None):
pass
def toXML(self, writer, ttFont):
glyphOrder = ttFont.getGlyphOrder()
writer.comment("The 'id' attribute is only for humans; "
"it is ignored when parsed.")
writer.newline()
for i in range(len(glyphOrder)):
glyphName = glyphOrder[i]
writer.simpletag("GlyphID", id=i, name=glyphName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "glyphOrder"):
self.glyphOrder = []
if name == "GlyphID":
self.glyphOrder.append(attrs["name"])
ttFont.setGlyphOrder(self.glyphOrder)
def getTableModule(tag):
"""Fetch the packer/unpacker module for a table.
Return None when no module is found.
"""
from . import tables
pyTag = tagToIdentifier(tag)
try:
__import__("fontTools.ttLib.tables." + pyTag)
except ImportError as err:
# If pyTag is found in the ImportError message,
# means table is not implemented. If it's not
# there, then some other module is missing, don't
# suppress the error.
if str(err).find(pyTag) >= 0:
return None
else:
raise err
else:
return getattr(tables, pyTag)
# Registry for custom table packer/unpacker classes. Keys are table
# tags, values are (moduleName, className) tuples.
# See registerCustomTableClass() and getCustomTableClass()
_customTableRegistry = {}
def registerCustomTableClass(tag, moduleName, className=None):
"""Register a custom packer/unpacker class for a table.
The 'moduleName' must be an importable module. If no 'className'
is given, it is derived from the tag, for example it will be
``table_C_U_S_T_`` for a 'CUST' tag.
The registered table class should be a subclass of
:py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable`
"""
if className is None:
className = "table_" + tagToIdentifier(tag)
_customTableRegistry[tag] = (moduleName, className)
def unregisterCustomTableClass(tag):
"""Unregister the custom packer/unpacker class for a table."""
del _customTableRegistry[tag]
def getCustomTableClass(tag):
"""Return the custom table class for tag, if one has been registered
with 'registerCustomTableClass()'. Else return None.
"""
if tag not in _customTableRegistry:
return None
import importlib
moduleName, className = _customTableRegistry[tag]
module = importlib.import_module(moduleName)
return getattr(module, className)
def getTableClass(tag):
"""Fetch the packer/unpacker class for a table."""
tableClass = getCustomTableClass(tag)
if tableClass is not None:
return tableClass
module = getTableModule(tag)
if module is None:
from .tables.DefaultTable import DefaultTable
return DefaultTable
pyTag = tagToIdentifier(tag)
tableClass = getattr(module, "table_" + pyTag)
return tableClass
def getClassTag(klass):
"""Fetch the table tag for a class object."""
name = klass.__name__
assert name[:6] == 'table_'
name = name[6:] # Chop 'table_'
return identifierToTag(name)
def newTable(tag):
"""Return a new instance of a table."""
tableClass = getTableClass(tag)
return tableClass(tag)
def _escapechar(c):
"""Helper function for tagToIdentifier()"""
import re
if re.match("[a-z0-9]", c):
return "_" + c
elif re.match("[A-Z]", c):
return c + "_"
else:
return hex(byteord(c))[2:]
def tagToIdentifier(tag):
"""Convert a table tag to a valid (but UGLY) python identifier,
as well as a filename that's guaranteed to be unique even on a
caseless file system. Each character is mapped to two characters.
Lowercase letters get an underscore before the letter, uppercase
letters get an underscore after the letter. Trailing spaces are
trimmed. Illegal characters are escaped as two hex bytes. If the
result starts with a number (as the result of a hex escape), an
extra underscore is prepended. Examples::
>>> tagToIdentifier('glyf')
'_g_l_y_f'
>>> tagToIdentifier('cvt ')
'_c_v_t'
>>> tagToIdentifier('OS/2')
'O_S_2f_2'
"""
import re
tag = Tag(tag)
if tag == "GlyphOrder":
return tag
assert len(tag) == 4, "tag should be 4 characters long"
while len(tag) > 1 and tag[-1] == ' ':
tag = tag[:-1]
ident = ""
for c in tag:
ident = ident + _escapechar(c)
if re.match("[0-9]", ident):
ident = "_" + ident
return ident
def identifierToTag(ident):
"""the opposite of tagToIdentifier()"""
if ident == "GlyphOrder":
return ident
if len(ident) % 2 and ident[0] == "_":
ident = ident[1:]
assert not (len(ident) % 2)
tag = ""
for i in range(0, len(ident), 2):
if ident[i] == "_":
tag = tag + ident[i+1]
elif ident[i+1] == "_":
tag = tag + ident[i]
else:
# assume hex
tag = tag + chr(int(ident[i:i+2], 16))
# append trailing spaces
tag = tag + (4 - len(tag)) * ' '
return Tag(tag)
def tagToXML(tag):
"""Similarly to tagToIdentifier(), this converts a TT tag
to a valid XML element name. Since XML element names are
case sensitive, this is a fairly simple/readable translation.
"""
import re
tag = Tag(tag)
if tag == "OS/2":
return "OS_2"
elif tag == "GlyphOrder":
return tag
if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
return tag.strip()
else:
return tagToIdentifier(tag)
def xmlToTag(tag):
"""The opposite of tagToXML()"""
if tag == "OS_2":
return Tag("OS/2")
if len(tag) == 8:
return identifierToTag(tag)
else:
return Tag(tag + " " * (4 - len(tag)))
# Table order as recommended in the OpenType specification 1.4
TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX",
"hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf",
"kern", "name", "post", "gasp", "PCLT"]
OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post",
"CFF "]
def sortedTagList(tagList, tableOrder=None):
"""Return a sorted copy of tagList, sorted according to the OpenType
specification, or according to a custom tableOrder. If given and not
None, tableOrder needs to be a list of tag names.
"""
tagList = sorted(tagList)
if tableOrder is None:
if "DSIG" in tagList:
# DSIG should be last (XXX spec reference?)
tagList.remove("DSIG")
tagList.append("DSIG")
if "CFF " in tagList:
tableOrder = OTFTableOrder
else:
tableOrder = TTFTableOrder
orderedTables = []
for tag in tableOrder:
if tag in tagList:
orderedTables.append(tag)
tagList.remove(tag)
orderedTables.extend(tagList)
return orderedTables
def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False):
"""Rewrite a font file, ordering the tables as recommended by the
OpenType specification 1.4.
"""
inFile.seek(0)
outFile.seek(0)
reader = SFNTReader(inFile, checkChecksums=checkChecksums)
writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
tables = list(reader.keys())
for tag in sortedTagList(tables, tableOrder):
writer[tag] = reader[tag]
writer.close()
def maxPowerOfTwo(x):
"""Return the highest exponent of two, so that
(2 ** exponent) <= x. Return 0 if x is 0.
"""
exponent = 0
while x:
x = x >> 1
exponent = exponent + 1
return max(exponent - 1, 0)
def getSearchRange(n, itemSize=16):
"""Calculate searchRange, entrySelector, rangeShift.
"""
# itemSize defaults to 16, for backward compatibility
# with upstream fonttools.
exponent = maxPowerOfTwo(n)
searchRange = (2 ** exponent) * itemSize
entrySelector = exponent
rangeShift = max(0, n * itemSize - searchRange)
return searchRange, entrySelector, rangeShift
| mit | bc42d5fd27dc442fda704eca22688cfd | 30.854424 | 106 | 0.697004 | 3.240321 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/ttLib/scaleUpem.py | 1 | 10099 | """Change the units-per-EM of a font.
AAT and Graphite tables are not supported. CFF/CFF2 fonts
are de-subroutinized."""
from fontTools.ttLib.ttVisitor import TTVisitor
import fontTools.ttLib as ttLib
import fontTools.ttLib.tables.otBase as otBase
import fontTools.ttLib.tables.otTables as otTables
from fontTools.cffLib import VarStoreData
import fontTools.cffLib.specializer as cffSpecializer
from fontTools.varLib import builder # for VarData.calculateNumShorts
from fontTools.misc.fixedTools import otRound
__all__ = ["scale_upem", "ScalerVisitor"]
class ScalerVisitor(TTVisitor):
def __init__(self, scaleFactor):
self.scaleFactor = scaleFactor
def scale(self, v):
return otRound(v * self.scaleFactor)
@ScalerVisitor.register_attrs(
(
(ttLib.getTableClass("head"), ("unitsPerEm", "xMin", "yMin", "xMax", "yMax")),
(ttLib.getTableClass("post"), ("underlinePosition", "underlineThickness")),
(ttLib.getTableClass("VORG"), ("defaultVertOriginY")),
(
ttLib.getTableClass("hhea"),
(
"ascent",
"descent",
"lineGap",
"advanceWidthMax",
"minLeftSideBearing",
"minRightSideBearing",
"xMaxExtent",
"caretOffset",
),
),
(
ttLib.getTableClass("vhea"),
(
"ascent",
"descent",
"lineGap",
"advanceHeightMax",
"minTopSideBearing",
"minBottomSideBearing",
"yMaxExtent",
"caretOffset",
),
),
(
ttLib.getTableClass("OS/2"),
(
"xAvgCharWidth",
"ySubscriptXSize",
"ySubscriptYSize",
"ySubscriptXOffset",
"ySubscriptYOffset",
"ySuperscriptXSize",
"ySuperscriptYSize",
"ySuperscriptXOffset",
"ySuperscriptYOffset",
"yStrikeoutSize",
"yStrikeoutPosition",
"sTypoAscender",
"sTypoDescender",
"sTypoLineGap",
"usWinAscent",
"usWinDescent",
"sxHeight",
"sCapHeight",
),
),
(
otTables.ValueRecord,
("XAdvance", "YAdvance", "XPlacement", "YPlacement"),
), # GPOS
(otTables.Anchor, ("XCoordinate", "YCoordinate")), # GPOS
(otTables.CaretValue, ("Coordinate")), # GDEF
(otTables.BaseCoord, ("Coordinate")), # BASE
(otTables.MathValueRecord, ("Value")), # MATH
(otTables.ClipBox, ("xMin", "yMin", "xMax", "yMax")), # COLR
)
)
def visit(visitor, obj, attr, value):
setattr(obj, attr, visitor.scale(value))
@ScalerVisitor.register_attr(
(ttLib.getTableClass("hmtx"), ttLib.getTableClass("vmtx")), "metrics"
)
def visit(visitor, obj, attr, metrics):
for g in metrics:
advance, lsb = metrics[g]
metrics[g] = visitor.scale(advance), visitor.scale(lsb)
@ScalerVisitor.register_attr(ttLib.getTableClass("VMTX"), "VOriginRecords")
def visit(visitor, obj, attr, VOriginRecords):
for g in VOriginRecords:
VOriginRecords[g] = visitor.scale(VOriginRecords[g])
@ScalerVisitor.register_attr(ttLib.getTableClass("glyf"), "glyphs")
def visit(visitor, obj, attr, glyphs):
for g in glyphs.values():
if g.isComposite():
for component in g.components:
component.x = visitor.scale(component.x)
component.y = visitor.scale(component.y)
else:
for attr in ("xMin", "xMax", "yMin", "yMax"):
v = getattr(g, attr, None)
if v is not None:
setattr(g, attr, visitor.scale(v))
glyf = visitor.font["glyf"]
coordinates = g.getCoordinates(glyf)[0]
for i, (x, y) in enumerate(coordinates):
coordinates[i] = visitor.scale(x), visitor.scale(y)
@ScalerVisitor.register_attr(ttLib.getTableClass("gvar"), "variations")
def visit(visitor, obj, attr, variations):
for varlist in variations.values():
for var in varlist:
coordinates = var.coordinates
for i, xy in enumerate(coordinates):
if xy is None:
continue
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
@ScalerVisitor.register_attr(ttLib.getTableClass("kern"), "kernTables")
def visit(visitor, obj, attr, kernTables):
for table in kernTables:
kernTable = table.kernTable
for k in kernTable.keys():
kernTable[k] = visitor.scale(kernTable[k])
def _cff_scale(visitor, args):
for i, arg in enumerate(args):
if not isinstance(arg, list):
if not isinstance(arg, bytes):
args[i] = visitor.scale(arg)
else:
num_blends = arg[-1]
_cff_scale(visitor, arg)
arg[-1] = num_blends
@ScalerVisitor.register_attr(
(ttLib.getTableClass("CFF "), ttLib.getTableClass("CFF2")), "cff"
)
def visit(visitor, obj, attr, cff):
cff.desubroutinize()
topDict = cff.topDictIndex[0]
varStore = getattr(topDict, "VarStore", None)
getNumRegions = varStore.getNumRegions if varStore is not None else None
privates = set()
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
privates.add(c.private)
commands = cffSpecializer.programToCommands(
c.program, getNumRegions=getNumRegions
)
for op, args in commands:
if op == "vsindex":
continue
_cff_scale(visitor, args)
c.program[:] = cffSpecializer.commandsToProgram(commands)
# Annoying business of scaling numbers that do not matter whatsoever
for attr in (
"UnderlinePosition",
"UnderlineThickness",
"FontBBox",
"StrokeWidth",
):
value = getattr(topDict, attr, None)
if value is None:
continue
if isinstance(value, list):
_cff_scale(visitor, value)
else:
setattr(topDict, attr, visitor.scale(value))
for i in range(6):
topDict.FontMatrix[i] /= visitor.scaleFactor
for private in privates:
for attr in (
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
# "BlueScale",
# "BlueShift",
# "BlueFuzz",
"StdHW",
"StdVW",
"StemSnapH",
"StemSnapV",
"defaultWidthX",
"nominalWidthX",
):
value = getattr(private, attr, None)
if value is None:
continue
if isinstance(value, list):
_cff_scale(visitor, value)
else:
setattr(private, attr, visitor.scale(value))
# ItemVariationStore
@ScalerVisitor.register(otTables.VarData)
def visit(visitor, varData):
for item in varData.Item:
for i, v in enumerate(item):
item[i] = visitor.scale(v)
varData.calculateNumShorts()
# COLRv1
def _setup_scale_paint(paint, scale):
if -2 <= scale <= 2 - (1 >> 14):
paint.Format = otTables.PaintFormat.PaintScaleUniform
paint.scale = scale
return
transform = otTables.Affine2x3()
transform.populateDefaults()
transform.xy = transform.yx = transform.dx = transform.dy = 0
transform.xx = transform.yy = scale
paint.Format = otTables.PaintFormat.PaintTransform
paint.Transform = transform
@ScalerVisitor.register(otTables.BaseGlyphPaintRecord)
def visit(visitor, record):
oldPaint = record.Paint
scale = otTables.Paint()
_setup_scale_paint(scale, visitor.scaleFactor)
scale.Paint = oldPaint
record.Paint = scale
return True
@ScalerVisitor.register(otTables.Paint)
def visit(visitor, paint):
if paint.Format != otTables.PaintFormat.PaintGlyph:
return True
newPaint = otTables.Paint()
newPaint.Format = paint.Format
newPaint.Paint = paint.Paint
newPaint.Glyph = paint.Glyph
del paint.Paint
del paint.Glyph
_setup_scale_paint(paint, 1 / visitor.scaleFactor)
paint.Paint = newPaint
visitor.visit(newPaint.Paint)
return False
def scale_upem(font, new_upem):
"""Change the units-per-EM of font to the new value."""
upem = font["head"].unitsPerEm
visitor = ScalerVisitor(new_upem / upem)
visitor.visit(font)
def main(args=None):
"""Change the units-per-EM of fonts"""
if args is None:
import sys
args = sys.argv[1:]
from fontTools.ttLib import TTFont
from fontTools.misc.cliTools import makeOutputFileName
import argparse
parser = argparse.ArgumentParser(
"fonttools ttLib.scaleUpem", description="Change the units-per-EM of fonts"
)
parser.add_argument("font", metavar="font", help="Font file.")
parser.add_argument(
"new_upem", metavar="new-upem", help="New units-per-EM integer value."
)
parser.add_argument(
"--output-file", metavar="path", default=None, help="Output file."
)
options = parser.parse_args(args)
font = TTFont(options.font)
new_upem = int(options.new_upem)
output_file = (
options.output_file
if options.output_file is not None
else makeOutputFileName(options.font, overWrite=True, suffix="-scaled")
)
scale_upem(font, new_upem)
print("Writing %s" % output_file)
font.save(output_file)
if __name__ == "__main__":
import sys
sys.exit(main())
| mit | cd56618551fa117aad0f14652b4fcde2 | 28.615836 | 86 | 0.572037 | 3.800903 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/feaLib/lexer.py | 1 | 11300 | from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound
from fontTools.feaLib.location import FeatureLibLocation
import re
import os
try:
import cython
except ImportError:
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
class Lexer(object):
NUMBER = "NUMBER"
HEXADECIMAL = "HEXADECIMAL"
OCTAL = "OCTAL"
NUMBERS = (NUMBER, HEXADECIMAL, OCTAL)
FLOAT = "FLOAT"
STRING = "STRING"
NAME = "NAME"
FILENAME = "FILENAME"
GLYPHCLASS = "GLYPHCLASS"
CID = "CID"
SYMBOL = "SYMBOL"
COMMENT = "COMMENT"
NEWLINE = "NEWLINE"
ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
CHAR_DIGIT_ = "0123456789"
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-"
RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$")
MODE_NORMAL_ = "NORMAL"
MODE_FILENAME_ = "FILENAME"
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
self.mode_ = Lexer.MODE_NORMAL_
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type != Lexer.NEWLINE:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return FeatureLibLocation(self.filename_ or "<features>", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += 2 if next_char == "\n" else 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "#":
self.scan_until_(Lexer.CHAR_NEWLINE_)
return (Lexer.COMMENT, text[start : self.pos_], location)
if self.mode_ is Lexer.MODE_FILENAME_:
if cur_char != "(":
raise FeatureLibError("Expected '(' before file name", location)
self.scan_until_(")")
cur_char = text[self.pos_] if self.pos_ < limit else None
if cur_char != ")":
raise FeatureLibError("Expected ')' after file name", location)
self.pos_ += 1
self.mode_ = Lexer.MODE_NORMAL_
return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location)
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location)
if cur_char == "@":
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
glyphclass = text[start + 1 : self.pos_]
if len(glyphclass) < 1:
raise FeatureLibError("Expected glyph class name", location)
if len(glyphclass) > 63:
raise FeatureLibError(
"Glyph class names must not be longer than 63 characters", location
)
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
raise FeatureLibError(
"Glyph class names must consist of letters, digits, "
"underscore, period or hyphen",
location,
)
return (Lexer.GLYPHCLASS, glyphclass, location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start : self.pos_]
if token == "include":
self.mode_ = Lexer.MODE_FILENAME_
return (Lexer.NAME, token, location)
if cur_char == "0" and next_char in "xX":
self.pos_ += 2
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location)
if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.OCTAL, int(text[start : self.pos_], 8), location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char in Lexer.CHAR_SYMBOL_:
self.pos_ += 1
return (Lexer.SYMBOL, cur_char, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
# strip newlines embedded within a string
string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1])
return (Lexer.STRING, string, location)
else:
raise FeatureLibError("Expected '\"' to terminate string", location)
raise FeatureLibError("Unexpected character: %r" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
def scan_anonymous_block(self, tag):
location = self.location_()
tag = tag.strip()
self.scan_until_(Lexer.CHAR_NEWLINE_)
self.scan_over_(Lexer.CHAR_NEWLINE_)
regexp = r"}\s*" + tag + r"\s*;"
split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1)
if len(split) != 2:
raise FeatureLibError(
"Expected '} %s;' to terminate anonymous block" % tag, location
)
self.pos_ += len(split[0])
return (Lexer.ANONYMOUS_BLOCK, split[0], location)
class IncludingLexer(object):
"""A Lexer that follows include statements.
The OpenType feature file specification states that due to
historical reasons, relative imports should be resolved in this
order:
1. If the source font is UFO format, then relative to the UFO's
font directory
2. relative to the top-level include file
3. relative to the parent include file
We only support 1 (via includeDir) and 2.
"""
def __init__(self, featurefile, *, includeDir=None):
"""Initializes an IncludingLexer.
Behavior:
If includeDir is passed, it will be used to determine the top-level
include directory to use for all encountered include statements. If it is
not passed, ``os.path.dirname(featurefile)`` will be considered the
include directory.
"""
self.lexers_ = [self.make_lexer_(featurefile)]
self.featurefilepath = self.lexers_[0].filename_
self.includeDir = includeDir
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while self.lexers_:
lexer = self.lexers_[-1]
try:
token_type, token, location = next(lexer)
except StopIteration:
self.lexers_.pop()
continue
if token_type is Lexer.NAME and token == "include":
fname_type, fname_token, fname_location = lexer.next()
if fname_type is not Lexer.FILENAME:
raise FeatureLibError("Expected file name", fname_location)
# semi_type, semi_token, semi_location = lexer.next()
# if semi_type is not Lexer.SYMBOL or semi_token != ";":
# raise FeatureLibError("Expected ';'", semi_location)
if os.path.isabs(fname_token):
path = fname_token
else:
if self.includeDir is not None:
curpath = self.includeDir
elif self.featurefilepath is not None:
curpath = os.path.dirname(self.featurefilepath)
else:
# if the IncludingLexer was initialized from an in-memory
# file-like stream, it doesn't have a 'name' pointing to
# its filesystem path, therefore we fall back to using the
# current working directory to resolve relative includes
curpath = os.getcwd()
path = os.path.join(curpath, fname_token)
if len(self.lexers_) >= 5:
raise FeatureLibError("Too many recursive includes", fname_location)
try:
self.lexers_.append(self.make_lexer_(path))
except FileNotFoundError as err:
raise IncludedFeaNotFound(fname_token, fname_location) from err
else:
return (token_type, token, location)
raise StopIteration()
@staticmethod
def make_lexer_(file_or_path):
if hasattr(file_or_path, "read"):
fileobj, closing = file_or_path, False
else:
filename, closing = file_or_path, True
fileobj = open(filename, "r", encoding="utf-8")
data = fileobj.read()
filename = getattr(fileobj, "name", None)
if closing:
fileobj.close()
return Lexer(data, filename)
def scan_anonymous_block(self, tag):
return self.lexers_[-1].scan_anonymous_block(tag)
class NonIncludingLexer(IncludingLexer):
"""Lexer that does not follow `include` statements, emits them as-is."""
def __next__(self): # Python 3
return next(self.lexers_[0])
| mit | 9e38c5b91443f5bbfce83fcf0042f798 | 37.831615 | 88 | 0.548319 | 4.005672 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/varLib/__init__.py | 1 | 38058 | """
Module for dealing with 'gvar'-style font variations, also known as run-time
interpolation.
The ideas here are very similar to MutatorMath. There is even code to read
MutatorMath .designspace files in the varLib.designspace module.
For now, if you run this file on a designspace file, it tries to find
ttf-interpolatable files for the masters and build a variable-font from
them. Such ttf-interpolatable and designspace files can be generated from
a Glyphs source, eg., using noto-source as an example:
$ fontmake -o ttf-interpolatable -g NotoSansArabic-MM.glyphs
Then you can make a variable-font this way:
$ fonttools varLib master_ufo/NotoSansArabic.designspace
API *will* change in near future.
"""
from typing import List
from fontTools.misc.vector import Vector
from fontTools.misc.roundTools import noRound, otRound
from fontTools.misc.textTools import Tag, tostr
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
from fontTools.ttLib.tables.ttProgram import Program
from fontTools.ttLib.tables.TupleVariation import TupleVariation
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import OTTableWriter
from fontTools.varLib import builder, models, varStore
from fontTools.varLib.merger import VariationMerger, COLRVariationMerger
from fontTools.varLib.mvar import MVAR_ENTRIES
from fontTools.varLib.iup import iup_delta_optimize
from fontTools.varLib.featureVars import addFeatureVariations
from fontTools.designspaceLib import DesignSpaceDocument, InstanceDescriptor
from fontTools.designspaceLib.split import splitInterpolable, splitVariableFonts
from fontTools.varLib.stat import buildVFStatTable
from fontTools.colorLib.builder import buildColrV1
from fontTools.colorLib.unbuilder import unbuildColrV1
from functools import partial
from collections import OrderedDict, namedtuple
import os.path
import logging
from copy import deepcopy
from pprint import pformat
from .errors import VarLibError, VarLibValidationError
log = logging.getLogger("fontTools.varLib")
# This is a lib key for the designspace document. The value should be
# an OpenType feature tag, to be used as the FeatureVariations feature.
# If present, the DesignSpace <rules processing="..."> flag is ignored.
FEAVAR_FEATURETAG_LIB_KEY = "com.github.fonttools.varLib.featureVarsFeatureTag"
#
# Creation routines
#
def _add_fvar(font, axes, instances: List[InstanceDescriptor]):
"""
Add 'fvar' table to font.
axes is an ordered dictionary of DesignspaceAxis objects.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
"""
assert axes
assert isinstance(axes, OrderedDict)
log.info("Generating fvar")
fvar = newTable('fvar')
nameTable = font['name']
for a in axes.values():
axis = Axis()
axis.axisTag = Tag(a.tag)
# TODO Skip axes that have no variation.
axis.minValue, axis.defaultValue, axis.maxValue = a.minimum, a.default, a.maximum
axis.axisNameID = nameTable.addMultilingualName(a.labelNames, font, minNameID=256)
axis.flags = int(a.hidden)
fvar.axes.append(axis)
for instance in instances:
# Filter out discrete axis locations
coordinates = {name: value for name, value in instance.location.items() if name in axes}
if "en" not in instance.localisedStyleName:
if not instance.styleName:
raise VarLibValidationError(
f"Instance at location '{coordinates}' must have a default English "
"style name ('stylename' attribute on the instance element or a "
"stylename element with an 'xml:lang=\"en\"' attribute)."
)
localisedStyleName = dict(instance.localisedStyleName)
localisedStyleName["en"] = tostr(instance.styleName)
else:
localisedStyleName = instance.localisedStyleName
psname = instance.postScriptFontName
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addMultilingualName(localisedStyleName)
if psname is not None:
psname = tostr(psname)
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = {axes[k].tag:axes[k].map_backward(v) for k,v in coordinates.items()}
#inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()}
fvar.instances.append(inst)
assert "fvar" not in font
font['fvar'] = fvar
return fvar
def _add_avar(font, axes):
"""
Add 'avar' table to font.
axes is an ordered dictionary of AxisDescriptor objects.
"""
assert axes
assert isinstance(axes, OrderedDict)
log.info("Generating avar")
avar = newTable('avar')
interesting = False
for axis in axes.values():
# Currently, some rasterizers require that the default value maps
# (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment
# maps, even when the default normalization mapping for the axis
# was not modified.
# https://github.com/googlei18n/fontmake/issues/295
# https://github.com/fonttools/fonttools/issues/1011
# TODO(anthrotype) revert this (and 19c4b37) when issue is fixed
curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}
if not axis.map:
continue
items = sorted(axis.map)
keys = [item[0] for item in items]
vals = [item[1] for item in items]
# Current avar requirements. We don't have to enforce
# these on the designer and can deduce some ourselves,
# but for now just enforce them.
if axis.minimum != min(keys):
raise VarLibValidationError(
f"Axis '{axis.name}': there must be a mapping for the axis minimum "
f"value {axis.minimum} and it must be the lowest input mapping value."
)
if axis.maximum != max(keys):
raise VarLibValidationError(
f"Axis '{axis.name}': there must be a mapping for the axis maximum "
f"value {axis.maximum} and it must be the highest input mapping value."
)
if axis.default not in keys:
raise VarLibValidationError(
f"Axis '{axis.name}': there must be a mapping for the axis default "
f"value {axis.default}."
)
# No duplicate input values (output values can be >= their preceeding value).
if len(set(keys)) != len(keys):
raise VarLibValidationError(
f"Axis '{axis.name}': All axis mapping input='...' values must be "
"unique, but we found duplicates."
)
# Ascending values
if sorted(vals) != vals:
raise VarLibValidationError(
f"Axis '{axis.name}': mapping output values must be in ascending order."
)
keys_triple = (axis.minimum, axis.default, axis.maximum)
vals_triple = tuple(axis.map_forward(v) for v in keys_triple)
keys = [models.normalizeValue(v, keys_triple) for v in keys]
vals = [models.normalizeValue(v, vals_triple) for v in vals]
if all(k == v for k, v in zip(keys, vals)):
continue
interesting = True
curve.update(zip(keys, vals))
assert 0.0 in curve and curve[0.0] == 0.0
assert -1.0 not in curve or curve[-1.0] == -1.0
assert +1.0 not in curve or curve[+1.0] == +1.0
# curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})
assert "avar" not in font
if not interesting:
log.info("No need for avar")
avar = None
else:
font['avar'] = avar
return avar
def _add_stat(font):
# Note: this function only gets called by old code that calls `build()`
# directly. Newer code that wants to benefit from STAT data from the
# designspace should call `build_many()`
if "STAT" in font:
return
from ..otlLib.builder import buildStatTable
fvarTable = font['fvar']
axes = [dict(tag=a.axisTag, name=a.axisNameID) for a in fvarTable.axes]
buildStatTable(font, axes)
_MasterData = namedtuple('_MasterData', ['glyf', 'hMetrics', 'vMetrics'])
def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True):
if tolerance < 0:
raise ValueError("`tolerance` must be a positive number.")
log.info("Generating gvar")
assert "gvar" not in font
gvar = font["gvar"] = newTable('gvar')
glyf = font['glyf']
defaultMasterIndex = masterModel.reverseMapping[0]
master_datas = [_MasterData(m['glyf'],
m['hmtx'].metrics,
getattr(m.get('vmtx'), 'metrics', None))
for m in master_ttfs]
for glyph in font.getGlyphOrder():
log.debug("building gvar for glyph '%s'", glyph)
isComposite = glyf[glyph].isComposite()
allData = [
m.glyf._getCoordinatesAndControls(glyph, m.hMetrics, m.vMetrics)
for m in master_datas
]
if allData[defaultMasterIndex][1].numberOfContours != 0:
# If the default master is not empty, interpret empty non-default masters
# as missing glyphs from a sparse master
allData = [
d if d is not None and d[1].numberOfContours != 0 else None
for d in allData
]
model, allData = masterModel.getSubModel(allData)
allCoords = [d[0] for d in allData]
allControls = [d[1] for d in allData]
control = allControls[0]
if not models.allEqual(allControls):
log.warning("glyph %s has incompatible masters; skipping" % glyph)
continue
del allControls
# Update gvar
gvar.variations[glyph] = []
deltas = model.getDeltas(allCoords, round=partial(GlyphCoordinates.__round__, round=round))
supports = model.supports
assert len(deltas) == len(supports)
# Prepare for IUP optimization
origCoords = deltas[0]
endPts = control.endPts
for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])):
if all(v == 0 for v in delta.array) and not isComposite:
continue
var = TupleVariation(support, delta)
if optimize:
delta_opt = iup_delta_optimize(delta, origCoords, endPts, tolerance=tolerance)
if None in delta_opt:
"""In composite glyphs, there should be one 0 entry
to make sure the gvar entry is written to the font.
This is to work around an issue with macOS 10.14 and can be
removed once the behaviour of macOS is changed.
https://github.com/fonttools/fonttools/issues/1381
"""
if all(d is None for d in delta_opt):
delta_opt = [(0, 0)] + [None] * (len(delta_opt) - 1)
# Use "optimized" version only if smaller...
var_opt = TupleVariation(support, delta_opt)
axis_tags = sorted(support.keys()) # Shouldn't matter that this is different from fvar...?
tupleData, auxData = var.compile(axis_tags)
unoptimized_len = len(tupleData) + len(auxData)
tupleData, auxData = var_opt.compile(axis_tags)
optimized_len = len(tupleData) + len(auxData)
if optimized_len < unoptimized_len:
var = var_opt
gvar.variations[glyph].append(var)
def _remove_TTHinting(font):
for tag in ("cvar", "cvt ", "fpgm", "prep"):
if tag in font:
del font[tag]
maxp = font['maxp']
for attr in ("maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions"):
setattr(maxp, attr, 0)
maxp.maxZones = 1
font["glyf"].removeHinting()
# TODO: Modify gasp table to deactivate gridfitting for all ranges?
def _merge_TTHinting(font, masterModel, master_ttfs):
log.info("Merging TT hinting")
assert "cvar" not in font
# Check that the existing hinting is compatible
# fpgm and prep table
for tag in ("fpgm", "prep"):
all_pgms = [m[tag].program for m in master_ttfs if tag in m]
if not all_pgms:
continue
font_pgm = getattr(font.get(tag), 'program', None)
if any(pgm != font_pgm for pgm in all_pgms):
log.warning("Masters have incompatible %s tables, hinting is discarded." % tag)
_remove_TTHinting(font)
return
# glyf table
font_glyf = font['glyf']
master_glyfs = [m['glyf'] for m in master_ttfs]
for name, glyph in font_glyf.glyphs.items():
all_pgms = [
getattr(glyf.get(name), 'program', None)
for glyf in master_glyfs
]
if not any(all_pgms):
continue
glyph.expand(font_glyf)
font_pgm = getattr(glyph, 'program', None)
if any(pgm != font_pgm for pgm in all_pgms if pgm):
log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name)
# TODO Only drop hinting from this glyph.
_remove_TTHinting(font)
return
# cvt table
all_cvs = [Vector(m["cvt "].values) if 'cvt ' in m else None
for m in master_ttfs]
nonNone_cvs = models.nonNone(all_cvs)
if not nonNone_cvs:
# There is no cvt table to make a cvar table from, we're done here.
return
if not models.allEqual(len(c) for c in nonNone_cvs):
log.warning("Masters have incompatible cvt tables, hinting is discarded.")
_remove_TTHinting(font)
return
variations = []
deltas, supports = masterModel.getDeltasAndSupports(all_cvs, round=round) # builtin round calls into Vector.__round__, which uses builtin round as we like
for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])):
if all(v == 0 for v in delta):
continue
var = TupleVariation(support, delta)
variations.append(var)
# We can build the cvar table now.
if variations:
cvar = font["cvar"] = newTable('cvar')
cvar.version = 1
cvar.variations = variations
_MetricsFields = namedtuple('_MetricsFields',
['tableTag', 'metricsTag', 'sb1', 'sb2', 'advMapping', 'vOrigMapping'])
HVAR_FIELDS = _MetricsFields(tableTag='HVAR', metricsTag='hmtx', sb1='LsbMap',
sb2='RsbMap', advMapping='AdvWidthMap', vOrigMapping=None)
VVAR_FIELDS = _MetricsFields(tableTag='VVAR', metricsTag='vmtx', sb1='TsbMap',
sb2='BsbMap', advMapping='AdvHeightMap', vOrigMapping='VOrgMap')
def _add_HVAR(font, masterModel, master_ttfs, axisTags):
_add_VHVAR(font, masterModel, master_ttfs, axisTags, HVAR_FIELDS)
def _add_VVAR(font, masterModel, master_ttfs, axisTags):
_add_VHVAR(font, masterModel, master_ttfs, axisTags, VVAR_FIELDS)
def _add_VHVAR(font, masterModel, master_ttfs, axisTags, tableFields):
tableTag = tableFields.tableTag
assert tableTag not in font
log.info("Generating " + tableTag)
VHVAR = newTable(tableTag)
tableClass = getattr(ot, tableTag)
vhvar = VHVAR.table = tableClass()
vhvar.Version = 0x00010000
glyphOrder = font.getGlyphOrder()
# Build list of source font advance widths for each glyph
metricsTag = tableFields.metricsTag
advMetricses = [m[metricsTag].metrics for m in master_ttfs]
# Build list of source font vertical origin coords for each glyph
if tableTag == 'VVAR' and 'VORG' in master_ttfs[0]:
vOrigMetricses = [m['VORG'].VOriginRecords for m in master_ttfs]
defaultYOrigs = [m['VORG'].defaultVertOriginY for m in master_ttfs]
vOrigMetricses = list(zip(vOrigMetricses, defaultYOrigs))
else:
vOrigMetricses = None
metricsStore, advanceMapping, vOrigMapping = _get_advance_metrics(font,
masterModel, master_ttfs, axisTags, glyphOrder, advMetricses,
vOrigMetricses)
vhvar.VarStore = metricsStore
if advanceMapping is None:
setattr(vhvar, tableFields.advMapping, None)
else:
setattr(vhvar, tableFields.advMapping, advanceMapping)
if vOrigMapping is not None:
setattr(vhvar, tableFields.vOrigMapping, vOrigMapping)
setattr(vhvar, tableFields.sb1, None)
setattr(vhvar, tableFields.sb2, None)
font[tableTag] = VHVAR
return
def _get_advance_metrics(font, masterModel, master_ttfs,
axisTags, glyphOrder, advMetricses, vOrigMetricses=None):
vhAdvanceDeltasAndSupports = {}
vOrigDeltasAndSupports = {}
for glyph in glyphOrder:
vhAdvances = [metrics[glyph][0] if glyph in metrics else None for metrics in advMetricses]
vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(vhAdvances, round=round)
singleModel = models.allEqual(id(v[1]) for v in vhAdvanceDeltasAndSupports.values())
if vOrigMetricses:
singleModel = False
for glyph in glyphOrder:
# We need to supply a vOrigs tuple with non-None default values
# for each glyph. vOrigMetricses contains values only for those
# glyphs which have a non-default vOrig.
vOrigs = [metrics[glyph] if glyph in metrics else defaultVOrig
for metrics, defaultVOrig in vOrigMetricses]
vOrigDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports(vOrigs, round=round)
directStore = None
if singleModel:
# Build direct mapping
supports = next(iter(vhAdvanceDeltasAndSupports.values()))[1][1:]
varTupleList = builder.buildVarRegionList(supports, axisTags)
varTupleIndexes = list(range(len(supports)))
varData = builder.buildVarData(varTupleIndexes, [], optimize=False)
for glyphName in glyphOrder:
varData.addItem(vhAdvanceDeltasAndSupports[glyphName][0], round=noRound)
varData.optimize()
directStore = builder.buildVarStore(varTupleList, [varData])
# Build optimized indirect mapping
storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)
advMapping = {}
for glyphName in glyphOrder:
deltas, supports = vhAdvanceDeltasAndSupports[glyphName]
storeBuilder.setSupports(supports)
advMapping[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound)
if vOrigMetricses:
vOrigMap = {}
for glyphName in glyphOrder:
deltas, supports = vOrigDeltasAndSupports[glyphName]
storeBuilder.setSupports(supports)
vOrigMap[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound)
indirectStore = storeBuilder.finish()
mapping2 = indirectStore.optimize(use_NO_VARIATION_INDEX=False)
advMapping = [mapping2[advMapping[g]] for g in glyphOrder]
advanceMapping = builder.buildVarIdxMap(advMapping, glyphOrder)
if vOrigMetricses:
vOrigMap = [mapping2[vOrigMap[g]] for g in glyphOrder]
useDirect = False
vOrigMapping = None
if directStore:
# Compile both, see which is more compact
writer = OTTableWriter()
directStore.compile(writer, font)
directSize = len(writer.getAllData())
writer = OTTableWriter()
indirectStore.compile(writer, font)
advanceMapping.compile(writer, font)
indirectSize = len(writer.getAllData())
useDirect = directSize < indirectSize
if useDirect:
metricsStore = directStore
advanceMapping = None
else:
metricsStore = indirectStore
if vOrigMetricses:
vOrigMapping = builder.buildVarIdxMap(vOrigMap, glyphOrder)
return metricsStore, advanceMapping, vOrigMapping
def _add_MVAR(font, masterModel, master_ttfs, axisTags):
log.info("Generating MVAR")
store_builder = varStore.OnlineVarStoreBuilder(axisTags)
records = []
lastTableTag = None
fontTable = None
tables = None
# HACK: we need to special-case post.underlineThickness and .underlinePosition
# and unilaterally/arbitrarily define a sentinel value to distinguish the case
# when a post table is present in a given master simply because that's where
# the glyph names in TrueType must be stored, but the underline values are not
# meant to be used for building MVAR's deltas. The value of -0x8000 (-36768)
# the minimum FWord (int16) value, was chosen for its unlikelyhood to appear
# in real-world underline position/thickness values.
specialTags = {"unds": -0x8000, "undo": -0x8000}
for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]):
# For each tag, fetch the associated table from all fonts (or not when we are
# still looking at a tag from the same tables) and set up the variation model
# for them.
if tableTag != lastTableTag:
tables = fontTable = None
if tableTag in font:
fontTable = font[tableTag]
tables = []
for master in master_ttfs:
if tableTag not in master or (
tag in specialTags
and getattr(master[tableTag], itemName) == specialTags[tag]
):
tables.append(None)
else:
tables.append(master[tableTag])
model, tables = masterModel.getSubModel(tables)
store_builder.setModel(model)
lastTableTag = tableTag
if tables is None: # Tag not applicable to the master font.
continue
# TODO support gasp entries
master_values = [getattr(table, itemName) for table in tables]
if models.allEqual(master_values):
base, varIdx = master_values[0], None
else:
base, varIdx = store_builder.storeMasters(master_values)
setattr(fontTable, itemName, base)
if varIdx is None:
continue
log.info(' %s: %s.%s %s', tag, tableTag, itemName, master_values)
rec = ot.MetricsValueRecord()
rec.ValueTag = tag
rec.VarIdx = varIdx
records.append(rec)
assert "MVAR" not in font
if records:
store = store_builder.finish()
# Optimize
mapping = store.optimize()
for rec in records:
rec.VarIdx = mapping[rec.VarIdx]
MVAR = font["MVAR"] = newTable('MVAR')
mvar = MVAR.table = ot.MVAR()
mvar.Version = 0x00010000
mvar.Reserved = 0
mvar.VarStore = store
# XXX these should not be hard-coded but computed automatically
mvar.ValueRecordSize = 8
mvar.ValueRecordCount = len(records)
mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag)
def _add_BASE(font, masterModel, master_ttfs, axisTags):
log.info("Generating BASE")
merger = VariationMerger(masterModel, axisTags, font)
merger.mergeTables(font, master_ttfs, ['BASE'])
store = merger.store_builder.finish()
if not store:
return
base = font['BASE'].table
assert base.Version == 0x00010000
base.Version = 0x00010001
base.VarStore = store
def _merge_OTL(font, model, master_fonts, axisTags):
log.info("Merging OpenType Layout tables")
merger = VariationMerger(model, axisTags, font)
merger.mergeTables(font, master_fonts, ['GSUB', 'GDEF', 'GPOS'])
store = merger.store_builder.finish()
if not store:
return
try:
GDEF = font['GDEF'].table
assert GDEF.Version <= 0x00010002
except KeyError:
font['GDEF'] = newTable('GDEF')
GDEFTable = font["GDEF"] = newTable('GDEF')
GDEF = GDEFTable.table = ot.GDEF()
GDEF.GlyphClassDef = None
GDEF.AttachList = None
GDEF.LigCaretList = None
GDEF.MarkAttachClassDef = None
GDEF.MarkGlyphSetsDef = None
GDEF.Version = 0x00010003
GDEF.VarStore = store
# Optimize
varidx_map = store.optimize()
GDEF.remap_device_varidxes(varidx_map)
if 'GPOS' in font:
font['GPOS'].table.remap_device_varidxes(varidx_map)
def _add_GSUB_feature_variations(font, axes, internal_axis_supports, rules, featureTag):
def normalize(name, value):
return models.normalizeLocation(
{name: value}, internal_axis_supports
)[name]
log.info("Generating GSUB FeatureVariations")
axis_tags = {name: axis.tag for name, axis in axes.items()}
conditional_subs = []
for rule in rules:
region = []
for conditions in rule.conditionSets:
space = {}
for condition in conditions:
axis_name = condition["name"]
if condition["minimum"] is not None:
minimum = normalize(axis_name, condition["minimum"])
else:
minimum = -1.0
if condition["maximum"] is not None:
maximum = normalize(axis_name, condition["maximum"])
else:
maximum = 1.0
tag = axis_tags[axis_name]
space[tag] = (minimum, maximum)
region.append(space)
subs = {k: v for k, v in rule.subs}
conditional_subs.append((region, subs))
addFeatureVariations(font, conditional_subs, featureTag)
_DesignSpaceData = namedtuple(
"_DesignSpaceData",
[
"axes",
"internal_axis_supports",
"base_idx",
"normalized_master_locs",
"masters",
"instances",
"rules",
"rulesProcessingLast",
"lib",
],
)
def _add_CFF2(varFont, model, master_fonts):
from .cff import merge_region_fonts
glyphOrder = varFont.getGlyphOrder()
if "CFF2" not in varFont:
from .cff import convertCFFtoCFF2
convertCFFtoCFF2(varFont)
ordered_fonts_list = model.reorderMasters(master_fonts, model.reverseMapping)
# re-ordering the master list simplifies building the CFF2 data item lists.
merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder)
def _add_COLR(font, model, master_fonts, axisTags, colr_layer_reuse=True):
merger = COLRVariationMerger(model, axisTags, font, allowLayerReuse=colr_layer_reuse)
merger.mergeTables(font, master_fonts)
store = merger.store_builder.finish()
colr = font["COLR"].table
if store:
mapping = store.optimize()
colr.VarStore = store
# don't add DeltaSetIndexMap for identity mapping
colr.VarIndexMap = None
varIdxes = [mapping[v] for v in merger.varIdxes]
if any(i != varIdxes[i] for i in range(len(varIdxes))):
colr.VarIndexMap = builder.buildDeltaSetIndexMap(varIdxes)
def load_designspace(designspace):
# TODO: remove this and always assume 'designspace' is a DesignSpaceDocument,
# never a file path, as that's already handled by caller
if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
ds = designspace
else: # Assume a file path
ds = DesignSpaceDocument.fromfile(designspace)
masters = ds.sources
if not masters:
raise VarLibValidationError("Designspace must have at least one source.")
instances = ds.instances
# TODO: Use fontTools.designspaceLib.tagForAxisName instead.
standard_axis_map = OrderedDict([
('weight', ('wght', {'en': u'Weight'})),
('width', ('wdth', {'en': u'Width'})),
('slant', ('slnt', {'en': u'Slant'})),
('optical', ('opsz', {'en': u'Optical Size'})),
('italic', ('ital', {'en': u'Italic'})),
])
# Setup axes
if not ds.axes:
raise VarLibValidationError(f"Designspace must have at least one axis.")
axes = OrderedDict()
for axis_index, axis in enumerate(ds.axes):
axis_name = axis.name
if not axis_name:
if not axis.tag:
raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.")
axis_name = axis.name = axis.tag
if axis_name in standard_axis_map:
if axis.tag is None:
axis.tag = standard_axis_map[axis_name][0]
if not axis.labelNames:
axis.labelNames.update(standard_axis_map[axis_name][1])
else:
if not axis.tag:
raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.")
if not axis.labelNames:
axis.labelNames["en"] = tostr(axis_name)
axes[axis_name] = axis
log.info("Axes:\n%s", pformat([axis.asdict() for axis in axes.values()]))
# Check all master and instance locations are valid and fill in defaults
for obj in masters+instances:
obj_name = obj.name or obj.styleName or ''
loc = obj.getFullDesignLocation(ds)
obj.designLocation = loc
if loc is None:
raise VarLibValidationError(
f"Source or instance '{obj_name}' has no location."
)
for axis_name in loc.keys():
if axis_name not in axes:
raise VarLibValidationError(
f"Location axis '{axis_name}' unknown for '{obj_name}'."
)
for axis_name,axis in axes.items():
v = axis.map_backward(loc[axis_name])
if not (axis.minimum <= v <= axis.maximum):
raise VarLibValidationError(
f"Source or instance '{obj_name}' has out-of-range location "
f"for axis '{axis_name}': is mapped to {v} but must be in "
f"mapped range [{axis.minimum}..{axis.maximum}] (NOTE: all "
"values are in user-space)."
)
# Normalize master locations
internal_master_locs = [o.getFullDesignLocation(ds) for o in masters]
log.info("Internal master locations:\n%s", pformat(internal_master_locs))
# TODO This mapping should ideally be moved closer to logic in _add_fvar/avar
internal_axis_supports = {}
for axis in axes.values():
triple = (axis.minimum, axis.default, axis.maximum)
internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple]
log.info("Internal axis supports:\n%s", pformat(internal_axis_supports))
normalized_master_locs = [models.normalizeLocation(m, internal_axis_supports) for m in internal_master_locs]
log.info("Normalized master locations:\n%s", pformat(normalized_master_locs))
# Find base master
base_idx = None
for i,m in enumerate(normalized_master_locs):
if all(v == 0 for v in m.values()):
if base_idx is not None:
raise VarLibValidationError(
"More than one base master found in Designspace."
)
base_idx = i
if base_idx is None:
raise VarLibValidationError(
"Base master not found; no master at default location?"
)
log.info("Index of base master: %s", base_idx)
return _DesignSpaceData(
axes,
internal_axis_supports,
base_idx,
normalized_master_locs,
masters,
instances,
ds.rules,
ds.rulesProcessingLast,
ds.lib,
)
# https://docs.microsoft.com/en-us/typography/opentype/spec/os2#uswidthclass
WDTH_VALUE_TO_OS2_WIDTH_CLASS = {
50: 1,
62.5: 2,
75: 3,
87.5: 4,
100: 5,
112.5: 6,
125: 7,
150: 8,
200: 9,
}
def set_default_weight_width_slant(font, location):
if "OS/2" in font:
if "wght" in location:
weight_class = otRound(max(1, min(location["wght"], 1000)))
if font["OS/2"].usWeightClass != weight_class:
log.info("Setting OS/2.usWeightClass = %s", weight_class)
font["OS/2"].usWeightClass = weight_class
if "wdth" in location:
# map 'wdth' axis (50..200) to OS/2.usWidthClass (1..9), rounding to closest
widthValue = min(max(location["wdth"], 50), 200)
widthClass = otRound(
models.piecewiseLinearMap(widthValue, WDTH_VALUE_TO_OS2_WIDTH_CLASS)
)
if font["OS/2"].usWidthClass != widthClass:
log.info("Setting OS/2.usWidthClass = %s", widthClass)
font["OS/2"].usWidthClass = widthClass
if "slnt" in location and "post" in font:
italicAngle = max(-90, min(location["slnt"], 90))
if font["post"].italicAngle != italicAngle:
log.info("Setting post.italicAngle = %s", italicAngle)
font["post"].italicAngle = italicAngle
def build_many(
designspace: DesignSpaceDocument,
master_finder=lambda s:s,
exclude=[],
optimize=True,
skip_vf=lambda vf_name: False,
colr_layer_reuse=True,
):
"""
Build variable fonts from a designspace file, version 5 which can define
several VFs, or version 4 which has implicitly one VF covering the whole doc.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
skip_vf can be used to skip building some of the variable fonts defined in
the input designspace. It's a predicate that takes as argument the name
of the variable font and returns `bool`.
Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name
"""
res = {}
for _location, subDoc in splitInterpolable(designspace):
for name, vfDoc in splitVariableFonts(subDoc):
if skip_vf(name):
log.debug(f"Skipping variable TTF font: {name}")
continue
vf = build(
vfDoc,
master_finder,
exclude=list(exclude) + ["STAT"],
optimize=optimize,
colr_layer_reuse=colr_layer_reuse,
)[0]
if "STAT" not in exclude:
buildVFStatTable(vf, designspace, name)
res[name] = vf
return res
def build(
designspace,
master_finder=lambda s:s,
exclude=[],
optimize=True,
colr_layer_reuse=True,
):
"""
Build variation font from a designspace file.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
"""
if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
pass
else: # Assume a file path
designspace = DesignSpaceDocument.fromfile(designspace)
ds = load_designspace(designspace)
log.info("Building variable font")
log.info("Loading master fonts")
master_fonts = load_masters(designspace, master_finder)
# TODO: 'master_ttfs' is unused except for return value, remove later
master_ttfs = []
for master in master_fonts:
try:
master_ttfs.append(master.reader.file.name)
except AttributeError:
master_ttfs.append(None) # in-memory fonts have no path
# Copy the base master to work from it
vf = deepcopy(master_fonts[ds.base_idx])
if "DSIG" in vf:
del vf["DSIG"]
# TODO append masters as named-instances as well; needs .designspace change.
fvar = _add_fvar(vf, ds.axes, ds.instances)
if 'STAT' not in exclude:
_add_stat(vf)
if 'avar' not in exclude:
_add_avar(vf, ds.axes)
# Map from axis names to axis tags...
normalized_master_locs = [
{ds.axes[k].tag: v for k,v in loc.items()} for loc in ds.normalized_master_locs
]
# From here on, we use fvar axes only
axisTags = [axis.axisTag for axis in fvar.axes]
# Assume single-model for now.
model = models.VariationModel(normalized_master_locs, axisOrder=axisTags)
assert 0 == model.mapping[ds.base_idx]
log.info("Building variations tables")
if 'BASE' not in exclude and 'BASE' in vf:
_add_BASE(vf, model, master_fonts, axisTags)
if 'MVAR' not in exclude:
_add_MVAR(vf, model, master_fonts, axisTags)
if 'HVAR' not in exclude:
_add_HVAR(vf, model, master_fonts, axisTags)
if 'VVAR' not in exclude and 'vmtx' in vf:
_add_VVAR(vf, model, master_fonts, axisTags)
if 'GDEF' not in exclude or 'GPOS' not in exclude:
_merge_OTL(vf, model, master_fonts, axisTags)
if 'gvar' not in exclude and 'glyf' in vf:
_add_gvar(vf, model, master_fonts, optimize=optimize)
if 'cvar' not in exclude and 'glyf' in vf:
_merge_TTHinting(vf, model, master_fonts)
if 'GSUB' not in exclude and ds.rules:
featureTag = ds.lib.get(
FEAVAR_FEATURETAG_LIB_KEY,
"rclt" if ds.rulesProcessingLast else "rvrn"
)
_add_GSUB_feature_variations(vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTag)
if 'CFF2' not in exclude and ('CFF ' in vf or 'CFF2' in vf):
_add_CFF2(vf, model, master_fonts)
if "post" in vf:
# set 'post' to format 2 to keep the glyph names dropped from CFF2
post = vf["post"]
if post.formatType != 2.0:
post.formatType = 2.0
post.extraNames = []
post.mapping = {}
if 'COLR' not in exclude and 'COLR' in vf and vf['COLR'].version > 0:
_add_COLR(vf, model, master_fonts, axisTags, colr_layer_reuse)
set_default_weight_width_slant(
vf, location={axis.axisTag: axis.defaultValue for axis in vf["fvar"].axes}
)
for tag in exclude:
if tag in vf:
del vf[tag]
# TODO: Only return vf for 4.0+, the rest is unused.
return vf, model, master_ttfs
def _open_font(path, master_finder=lambda s: s):
# load TTFont masters from given 'path': this can be either a .TTX or an
# OpenType binary font; or if neither of these, try use the 'master_finder'
# callable to resolve the path to a valid .TTX or OpenType font binary.
from fontTools.ttx import guessFileType
master_path = os.path.normpath(path)
tp = guessFileType(master_path)
if tp is None:
# not an OpenType binary/ttx, fall back to the master finder.
master_path = master_finder(master_path)
tp = guessFileType(master_path)
if tp in ("TTX", "OTX"):
font = TTFont()
font.importXML(master_path)
elif tp in ("TTF", "OTF", "WOFF", "WOFF2"):
font = TTFont(master_path)
else:
raise VarLibValidationError("Invalid master path: %r" % master_path)
return font
def load_masters(designspace, master_finder=lambda s: s):
"""Ensure that all SourceDescriptor.font attributes have an appropriate TTFont
object loaded, or else open TTFont objects from the SourceDescriptor.path
attributes.
The paths can point to either an OpenType font, a TTX file, or a UFO. In the
latter case, use the provided master_finder callable to map from UFO paths to
the respective master font binaries (e.g. .ttf, .otf or .ttx).
Return list of master TTFont objects in the same order they are listed in the
DesignSpaceDocument.
"""
for master in designspace.sources:
# If a SourceDescriptor has a layer name, demand that the compiled TTFont
# be supplied by the caller. This spares us from modifying MasterFinder.
if master.layerName and master.font is None:
raise VarLibValidationError(
f"Designspace source '{master.name or '<Unknown>'}' specified a "
"layer name but lacks the required TTFont object in the 'font' "
"attribute."
)
return designspace.loadSourceFonts(_open_font, master_finder=master_finder)
class MasterFinder(object):
def __init__(self, template):
self.template = template
def __call__(self, src_path):
fullname = os.path.abspath(src_path)
dirname, basename = os.path.split(fullname)
stem, ext = os.path.splitext(basename)
path = self.template.format(
fullname=fullname,
dirname=dirname,
basename=basename,
stem=stem,
ext=ext,
)
return os.path.normpath(path)
def main(args=None):
"""Build a variable font from a designspace file and masters"""
from argparse import ArgumentParser
from fontTools import configLogger
parser = ArgumentParser(prog='varLib', description = main.__doc__)
parser.add_argument('designspace')
parser.add_argument(
'-o',
metavar='OUTPUTFILE',
dest='outfile',
default=None,
help='output file'
)
parser.add_argument(
'-x',
metavar='TAG',
dest='exclude',
action='append',
default=[],
help='exclude table'
)
parser.add_argument(
'--disable-iup',
dest='optimize',
action='store_false',
help='do not perform IUP optimization'
)
parser.add_argument(
'--no-colr-layer-reuse',
dest='colr_layer_reuse',
action='store_false',
help='do not rebuild variable COLR table to optimize COLR layer reuse',
)
parser.add_argument(
'--master-finder',
default='master_ttf_interpolatable/{stem}.ttf',
help=(
'templated string used for finding binary font '
'files given the source file names defined in the '
'designspace document. The following special strings '
'are defined: {fullname} is the absolute source file '
'name; {basename} is the file name without its '
'directory; {stem} is the basename without the file '
'extension; {ext} is the source file extension; '
'{dirname} is the directory of the absolute file '
'name. The default value is "%(default)s".'
)
)
logging_group = parser.add_mutually_exclusive_group(required=False)
logging_group.add_argument(
"-v", "--verbose",
action="store_true",
help="Run more verbosely.")
logging_group.add_argument(
"-q", "--quiet",
action="store_true",
help="Turn verbosity off.")
options = parser.parse_args(args)
configLogger(level=(
"DEBUG" if options.verbose else
"ERROR" if options.quiet else
"INFO"))
designspace_filename = options.designspace
finder = MasterFinder(options.master_finder)
vf, _, _ = build(
designspace_filename,
finder,
exclude=options.exclude,
optimize=options.optimize,
colr_layer_reuse=options.colr_layer_reuse,
)
outfile = options.outfile
if outfile is None:
ext = "otf" if vf.sfntVersion == "OTTO" else "ttf"
outfile = os.path.splitext(designspace_filename)[0] + '-VF.' + ext
log.info("Saving variation font %s", outfile)
vf.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
| mit | 715799c283ea712d53e096d4f521d6b9 | 31.143581 | 155 | 0.71596 | 3.066473 | false | false | false | false |
fonttools/fonttools | Lib/fontTools/fontBuilder.py | 1 | 31196 | __all__ = ["FontBuilder"]
"""
This module is *experimental*, meaning it still may evolve and change.
The `FontBuilder` class is a convenient helper to construct working TTF or
OTF fonts from scratch.
Note that the various setup methods cannot be called in arbitrary order,
due to various interdependencies between OpenType tables. Here is an order
that works:
fb = FontBuilder(...)
fb.setupGlyphOrder(...)
fb.setupCharacterMap(...)
fb.setupGlyf(...) --or-- fb.setupCFF(...)
fb.setupHorizontalMetrics(...)
fb.setupHorizontalHeader()
fb.setupNameTable(...)
fb.setupOS2()
fb.addOpenTypeFeatures(...)
fb.setupPost()
fb.save(...)
Here is how to build a minimal TTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.ttGlyphPen import TTGlyphPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.qCurveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=True)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = TTGlyphPen(None)
drawTestGlyph(pen)
glyph = pen.glyph()
glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph}
fb.setupGlyf(glyphs)
metrics = {}
glyphTable = fb.font["glyf"]
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, glyphTable[gn].xMin)
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=-200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.ttf")
```
And here's how to build a minimal OTF:
```python
from fontTools.fontBuilder import FontBuilder
from fontTools.pens.t2CharStringPen import T2CharStringPen
def drawTestGlyph(pen):
pen.moveTo((100, 100))
pen.lineTo((100, 1000))
pen.curveTo((200, 900), (400, 900), (500, 1000))
pen.lineTo((500, 100))
pen.closePath()
fb = FontBuilder(1024, isTTF=False)
fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"])
fb.setupCharacterMap({32: "space", 65: "A", 97: "a"})
advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0}
familyName = "HelloTestFont"
styleName = "TotallyNormal"
version = "0.1"
nameStrings = dict(
familyName=dict(en=familyName, nl="HalloTestFont"),
styleName=dict(en=styleName, nl="TotaalNormaal"),
uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName,
fullName=familyName + "-" + styleName,
psName=familyName + "-" + styleName,
version="Version " + version,
)
pen = T2CharStringPen(600, None)
drawTestGlyph(pen)
charString = pen.getCharString()
charStrings = {
".notdef": charString,
"space": charString,
"A": charString,
"a": charString,
".null": charString,
}
fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {})
lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()}
metrics = {}
for gn, advanceWidth in advanceWidths.items():
metrics[gn] = (advanceWidth, lsb[gn])
fb.setupHorizontalMetrics(metrics)
fb.setupHorizontalHeader(ascent=824, descent=200)
fb.setupNameTable(nameStrings)
fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200)
fb.setupPost()
fb.save("test.otf")
```
"""
from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
_headDefaults = dict(
tableVersion=1.0,
fontRevision=1.0,
checkSumAdjustment=0,
magicNumber=0x5F0F3CF5,
flags=0x0003,
unitsPerEm=1000,
created=0,
modified=0,
xMin=0,
yMin=0,
xMax=0,
yMax=0,
macStyle=0,
lowestRecPPEM=3,
fontDirectionHint=2,
indexToLocFormat=0,
glyphDataFormat=0,
)
_maxpDefaultsTTF = dict(
tableVersion=0x00010000,
numGlyphs=0,
maxPoints=0,
maxContours=0,
maxCompositePoints=0,
maxCompositeContours=0,
maxZones=2,
maxTwilightPoints=0,
maxStorage=0,
maxFunctionDefs=0,
maxInstructionDefs=0,
maxStackElements=0,
maxSizeOfInstructions=0,
maxComponentElements=0,
maxComponentDepth=0,
)
_maxpDefaultsOTF = dict(
tableVersion=0x00005000,
numGlyphs=0,
)
_postDefaults = dict(
formatType=3.0,
italicAngle=0,
underlinePosition=0,
underlineThickness=0,
isFixedPitch=0,
minMemType42=0,
maxMemType42=0,
minMemType1=0,
maxMemType1=0,
)
_hheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceWidthMax=0,
minLeftSideBearing=0,
minRightSideBearing=0,
xMaxExtent=0,
caretSlopeRise=1,
caretSlopeRun=0,
caretOffset=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
metricDataFormat=0,
numberOfHMetrics=0,
)
_vheaDefaults = dict(
tableVersion=0x00010000,
ascent=0,
descent=0,
lineGap=0,
advanceHeightMax=0,
minTopSideBearing=0,
minBottomSideBearing=0,
yMaxExtent=0,
caretSlopeRise=0,
caretSlopeRun=0,
reserved0=0,
reserved1=0,
reserved2=0,
reserved3=0,
reserved4=0,
metricDataFormat=0,
numberOfVMetrics=0,
)
_nameIDs = dict(
copyright=0,
familyName=1,
styleName=2,
uniqueFontIdentifier=3,
fullName=4,
version=5,
psName=6,
trademark=7,
manufacturer=8,
designer=9,
description=10,
vendorURL=11,
designerURL=12,
licenseDescription=13,
licenseInfoURL=14,
# reserved = 15,
typographicFamily=16,
typographicSubfamily=17,
compatibleFullName=18,
sampleText=19,
postScriptCIDFindfontName=20,
wwsFamilyName=21,
wwsSubfamilyName=22,
lightBackgroundPalette=23,
darkBackgroundPalette=24,
variationsPostScriptNamePrefix=25,
)
# to insert in setupNameTable doc string:
# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1])))
_panoseDefaults = dict(
bFamilyType=0,
bSerifStyle=0,
bWeight=0,
bProportion=0,
bContrast=0,
bStrokeVariation=0,
bArmStyle=0,
bLetterForm=0,
bMidline=0,
bXHeight=0,
)
_OS2Defaults = dict(
version=3,
xAvgCharWidth=0,
usWeightClass=400,
usWidthClass=5,
fsType=0x0004, # default: Preview & Print embedding
ySubscriptXSize=0,
ySubscriptYSize=0,
ySubscriptXOffset=0,
ySubscriptYOffset=0,
ySuperscriptXSize=0,
ySuperscriptYSize=0,
ySuperscriptXOffset=0,
ySuperscriptYOffset=0,
yStrikeoutSize=0,
yStrikeoutPosition=0,
sFamilyClass=0,
panose=_panoseDefaults,
ulUnicodeRange1=0,
ulUnicodeRange2=0,
ulUnicodeRange3=0,
ulUnicodeRange4=0,
achVendID="????",
fsSelection=0,
usFirstCharIndex=0,
usLastCharIndex=0,
sTypoAscender=0,
sTypoDescender=0,
sTypoLineGap=0,
usWinAscent=0,
usWinDescent=0,
ulCodePageRange1=0,
ulCodePageRange2=0,
sxHeight=0,
sCapHeight=0,
usDefaultChar=0, # .notdef
usBreakChar=32, # space
usMaxContext=0,
usLowerOpticalPointSize=0,
usUpperOpticalPointSize=0,
)
class FontBuilder(object):
def __init__(self, unitsPerEm=None, font=None, isTTF=True):
"""Initialize a FontBuilder instance.
If the `font` argument is not given, a new `TTFont` will be
constructed, and `unitsPerEm` must be given. If `isTTF` is True,
the font will be a glyf-based TTF; if `isTTF` is False it will be
a CFF-based OTF.
If `font` is given, it must be a `TTFont` instance and `unitsPerEm`
must _not_ be given. The `isTTF` argument will be ignored.
"""
if font is None:
self.font = TTFont(recalcTimestamp=False)
self.isTTF = isTTF
now = timestampNow()
assert unitsPerEm is not None
self.setupHead(unitsPerEm=unitsPerEm, created=now, modified=now)
self.setupMaxp()
else:
assert unitsPerEm is None
self.font = font
self.isTTF = "glyf" in font
def save(self, file):
"""Save the font. The 'file' argument can be either a pathname or a
writable file object.
"""
self.font.save(file)
def _initTableWithValues(self, tableTag, defaults, values):
table = self.font[tableTag] = newTable(tableTag)
for k, v in defaults.items():
setattr(table, k, v)
for k, v in values.items():
setattr(table, k, v)
return table
def _updateTableWithValues(self, tableTag, values):
table = self.font[tableTag]
for k, v in values.items():
setattr(table, k, v)
def setupHead(self, **values):
"""Create a new `head` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("head", _headDefaults, values)
def updateHead(self, **values):
"""Update the head table with the fields and values passed as
keyword arguments.
"""
self._updateTableWithValues("head", values)
def setupGlyphOrder(self, glyphOrder):
"""Set the glyph order for the font."""
self.font.setGlyphOrder(glyphOrder)
def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False):
"""Build the `cmap` table for the font. The `cmapping` argument should
be a dict mapping unicode code points as integers to glyph names.
The `uvs` argument, when passed, must be a list of tuples, describing
Unicode Variation Sequences. These tuples have three elements:
(unicodeValue, variationSelector, glyphName)
`unicodeValue` and `variationSelector` are integer code points.
`glyphName` may be None, to indicate this is the default variation.
Text processors will then use the cmap to find the glyph name.
Each Unicode Variation Sequence should be an officially supported
sequence, but this is not policed.
"""
subTables = []
highestUnicode = max(cmapping) if cmapping else 0
if highestUnicode > 0xFFFF:
cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000)
subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10)
subTables.append(subTable_3_10)
else:
cmapping_3_1 = cmapping
format = 4
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
try:
subTable_3_1.compile(self.font)
except struct.error:
# format 4 overflowed, fall back to format 12
if not allowFallback:
raise ValueError(
"cmap format 4 subtable overflowed; sort glyph order by unicode to fix."
)
format = 12
subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1)
subTables.append(subTable_3_1)
subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3)
subTables.append(subTable_0_3)
if uvs is not None:
uvsDict = {}
for unicodeValue, variationSelector, glyphName in uvs:
if cmapping.get(unicodeValue) == glyphName:
# this is a default variation
glyphName = None
if variationSelector not in uvsDict:
uvsDict[variationSelector] = []
uvsDict[variationSelector].append((unicodeValue, glyphName))
uvsSubTable = buildCmapSubTable({}, 14, 0, 5)
uvsSubTable.uvsDict = uvsDict
subTables.append(uvsSubTable)
self.font["cmap"] = newTable("cmap")
self.font["cmap"].tableVersion = 0
self.font["cmap"].tables = subTables
def setupNameTable(self, nameStrings, windows=True, mac=True):
"""Create the `name` table for the font. The `nameStrings` argument must
be a dict, mapping nameIDs or descriptive names for the nameIDs to name
record values. A value is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
By default, both Windows (platformID=3) and Macintosh (platformID=1) name
records are added, unless any of `windows` or `mac` arguments is False.
The following descriptive names are available for nameIDs:
copyright (nameID 0)
familyName (nameID 1)
styleName (nameID 2)
uniqueFontIdentifier (nameID 3)
fullName (nameID 4)
version (nameID 5)
psName (nameID 6)
trademark (nameID 7)
manufacturer (nameID 8)
designer (nameID 9)
description (nameID 10)
vendorURL (nameID 11)
designerURL (nameID 12)
licenseDescription (nameID 13)
licenseInfoURL (nameID 14)
typographicFamily (nameID 16)
typographicSubfamily (nameID 17)
compatibleFullName (nameID 18)
sampleText (nameID 19)
postScriptCIDFindfontName (nameID 20)
wwsFamilyName (nameID 21)
wwsSubfamilyName (nameID 22)
lightBackgroundPalette (nameID 23)
darkBackgroundPalette (nameID 24)
variationsPostScriptNamePrefix (nameID 25)
"""
nameTable = self.font["name"] = newTable("name")
nameTable.names = []
for nameName, nameValue in nameStrings.items():
if isinstance(nameName, int):
nameID = nameName
else:
nameID = _nameIDs[nameName]
if isinstance(nameValue, str):
nameValue = dict(en=nameValue)
nameTable.addMultilingualName(
nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac
)
def setupOS2(self, **values):
"""Create a new `OS/2` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("OS/2", _OS2Defaults, values)
if "xAvgCharWidth" not in values:
assert (
"hmtx" in self.font
), "the 'hmtx' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcAvgCharWidth(self.font)
if not (
"ulUnicodeRange1" in values
or "ulUnicodeRange2" in values
or "ulUnicodeRange3" in values
or "ulUnicodeRange3" in values
):
assert (
"cmap" in self.font
), "the 'cmap' table must be setup before the 'OS/2' table"
self.font["OS/2"].recalcUnicodeRanges(self.font)
def setupCFF(self, psName, fontInfo, charStringsDict, privateDict):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 1
fontSet.minor = 0
fontSet.otFont = self.font
fontSet.fontNames = [psName]
fontSet.topDictIndex = TopDictIndex()
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fdSelect = None
fdArray = None
topDict = TopDict()
topDict.charset = self.font.getGlyphOrder()
topDict.Private = private
topDict.GlobalSubrs = fontSet.GlobalSubrs
for key, value in fontInfo.items():
setattr(topDict, key, value)
if "FontMatrix" not in fontInfo:
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
charStrings = CharStrings(
None, topDict.charset, globalSubrs, private, fdSelect, fdArray
)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF "] = newTable("CFF ")
self.font["CFF "].cff = fontSet
def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None):
from .cffLib import (
CFFFontSet,
TopDictIndex,
TopDict,
CharStrings,
GlobalSubrsIndex,
PrivateDict,
FDArrayIndex,
FontDict,
)
assert not self.isTTF
self.font.sfntVersion = "OTTO"
fontSet = CFFFontSet()
fontSet.major = 2
fontSet.minor = 0
cff2GetGlyphOrder = self.font.getGlyphOrder
fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None)
globalSubrs = GlobalSubrsIndex()
fontSet.GlobalSubrs = globalSubrs
if fdArrayList is None:
fdArrayList = [{}]
fdSelect = None
fdArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = globalSubrs
for privateDict in fdArrayList:
fontDict = FontDict()
fontDict.setCFF2(True)
private = PrivateDict()
for key, value in privateDict.items():
setattr(private, key, value)
fontDict.Private = private
fdArray.append(fontDict)
topDict = TopDict()
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
topDict.FDArray = fdArray
scale = 1 / self.font["head"].unitsPerEm
topDict.FontMatrix = [scale, 0, 0, scale, 0, 0]
private = fdArray[0].Private
charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray)
for glyphName, charString in charStringsDict.items():
charString.private = private
charString.globalSubrs = globalSubrs
charStrings[glyphName] = charString
topDict.CharStrings = charStrings
fontSet.topDictIndex.append(topDict)
self.font["CFF2"] = newTable("CFF2")
self.font["CFF2"].cff = fontSet
if regions:
self.setupCFF2Regions(regions)
def setupCFF2Regions(self, regions):
from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore
from .cffLib import VarStoreData
assert "fvar" in self.font, "fvar must to be set up first"
assert "CFF2" in self.font, "CFF2 must to be set up first"
axisTags = [a.axisTag for a in self.font["fvar"].axes]
varRegionList = buildVarRegionList(regions, axisTags)
varData = buildVarData(list(range(len(regions))), None, optimize=False)
varStore = buildVarStore(varRegionList, [varData])
vstore = VarStoreData(otVarStore=varStore)
topDict = self.font["CFF2"].cff.topDictIndex[0]
topDict.VarStore = vstore
for fontDict in topDict.FDArray:
fontDict.Private.vstore = vstore
def setupGlyf(self, glyphs, calcGlyphBounds=True):
"""Create the `glyf` table from a dict, that maps glyph names
to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example
as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`.
If `calcGlyphBounds` is True, the bounds of all glyphs will be
calculated. Only pass False if your glyph objects already have
their bounding box values set.
"""
assert self.isTTF
self.font["loca"] = newTable("loca")
self.font["glyf"] = newTable("glyf")
self.font["glyf"].glyphs = glyphs
if hasattr(self.font, "glyphOrder"):
self.font["glyf"].glyphOrder = self.font.glyphOrder
if calcGlyphBounds:
self.calcGlyphBounds()
def setupFvar(self, axes, instances):
"""Adds an font variations table to the font.
Args:
axes (list): See below.
instances (list): See below.
``axes`` should be a list of axes, with each axis either supplied as
a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the
format ```tupletag, minValue, defaultValue, maxValue, name``.
The ``name`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
```instances`` should be a list of instances, with each instance either
supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a
dict with keys ``location`` (mapping of axis tags to float values),
``stylename`` and (optionally) ``postscriptfontname``.
The ``stylename`` is either a string, or a dict, mapping language codes
to strings, to allow localized name table entries.
"""
addFvar(self.font, axes, instances)
def setupAvar(self, axes):
"""Adds an axis variations table to the font.
Args:
axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects.
"""
from .varLib import _add_avar
_add_avar(self.font, OrderedDict(enumerate(axes))) # Only values are used
def setupGvar(self, variations):
gvar = self.font["gvar"] = newTable("gvar")
gvar.version = 1
gvar.reserved = 0
gvar.variations = variations
def calcGlyphBounds(self):
"""Calculate the bounding boxes of all glyphs in the `glyf` table.
This is usually not called explicitly by client code.
"""
glyphTable = self.font["glyf"]
for glyph in glyphTable.glyphs.values():
glyph.recalcBounds(glyphTable)
def setupHorizontalMetrics(self, metrics):
"""Create a new `hmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(width, leftSidebearing)` tuples.
"""
self.setupMetrics("hmtx", metrics)
def setupVerticalMetrics(self, metrics):
"""Create a new `vmtx` table, for horizontal metrics.
The `metrics` argument must be a dict, mapping glyph names to
`(height, topSidebearing)` tuples.
"""
self.setupMetrics("vmtx", metrics)
def setupMetrics(self, tableTag, metrics):
"""See `setupHorizontalMetrics()` and `setupVerticalMetrics()`."""
assert tableTag in ("hmtx", "vmtx")
mtxTable = self.font[tableTag] = newTable(tableTag)
roundedMetrics = {}
for gn in metrics:
w, lsb = metrics[gn]
roundedMetrics[gn] = int(round(w)), int(round(lsb))
mtxTable.metrics = roundedMetrics
def setupHorizontalHeader(self, **values):
"""Create a new `hhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("hhea", _hheaDefaults, values)
def setupVerticalHeader(self, **values):
"""Create a new `vhea` table initialize it with default values,
which can be overridden by keyword arguments.
"""
self._initTableWithValues("vhea", _vheaDefaults, values)
def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None):
"""Create a new `VORG` table. The `verticalOrigins` argument must be
a dict, mapping glyph names to vertical origin values.
The `defaultVerticalOrigin` argument should be the most common vertical
origin value. If omitted, this value will be derived from the actual
values in the `verticalOrigins` argument.
"""
if defaultVerticalOrigin is None:
# find the most frequent vorg value
bag = {}
for gn in verticalOrigins:
vorg = verticalOrigins[gn]
if vorg not in bag:
bag[vorg] = 1
else:
bag[vorg] += 1
defaultVerticalOrigin = sorted(
bag, key=lambda vorg: bag[vorg], reverse=True
)[0]
self._initTableWithValues(
"VORG",
{},
dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin),
)
vorgTable = self.font["VORG"]
vorgTable.majorVersion = 1
vorgTable.minorVersion = 0
for gn in verticalOrigins:
vorgTable[gn] = verticalOrigins[gn]
def setupPost(self, keepGlyphNames=True, **values):
"""Create a new `post` table and initialize it with default values,
which can be overridden by keyword arguments.
"""
isCFF2 = "CFF2" in self.font
postTable = self._initTableWithValues("post", _postDefaults, values)
if (self.isTTF or isCFF2) and keepGlyphNames:
postTable.formatType = 2.0
postTable.extraNames = []
postTable.mapping = {}
else:
postTable.formatType = 3.0
def setupMaxp(self):
"""Create a new `maxp` table. This is called implicitly by FontBuilder
itself and is usually not called by client code.
"""
if self.isTTF:
defaults = _maxpDefaultsTTF
else:
defaults = _maxpDefaultsOTF
self._initTableWithValues("maxp", defaults, {})
def setupDummyDSIG(self):
"""This adds an empty DSIG table to the font to make some MS applications
happy. This does not properly sign the font.
"""
values = dict(
ulVersion=1,
usFlag=0,
usNumSigs=0,
signatureRecords=[],
)
self._initTableWithValues("DSIG", {}, values)
def addOpenTypeFeatures(self, features, filename=None, tables=None):
"""Add OpenType features to the font from a string containing
Feature File syntax.
The `filename` argument is used in error messages and to determine
where to look for "include" files.
The optional `tables` argument can be a list of OTL tables tags to
build, allowing the caller to only build selected OTL tables. See
`fontTools.feaLib` for details.
"""
from .feaLib.builder import addOpenTypeFeaturesFromString
addOpenTypeFeaturesFromString(
self.font, features, filename=filename, tables=tables
)
def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"):
"""Add conditional substitutions to a Variable Font.
See `fontTools.varLib.featureVars.addFeatureVariations`.
"""
from .varLib import featureVars
if "fvar" not in self.font:
raise KeyError("'fvar' table is missing; can't add FeatureVariations.")
featureVars.addFeatureVariations(
self.font, conditionalSubstitutions, featureTag=featureTag
)
def setupCOLR(
self,
colorLayers,
version=None,
varStore=None,
varIndexMap=None,
clipBoxes=None,
allowLayerReuse=True,
):
"""Build new COLR table using color layers dictionary.
Cf. `fontTools.colorLib.builder.buildCOLR`.
"""
from fontTools.colorLib.builder import buildCOLR
glyphMap = self.font.getReverseGlyphMap()
self.font["COLR"] = buildCOLR(
colorLayers,
version=version,
glyphMap=glyphMap,
varStore=varStore,
varIndexMap=varIndexMap,
clipBoxes=clipBoxes,
allowLayerReuse=allowLayerReuse,
)
def setupCPAL(
self,
palettes,
paletteTypes=None,
paletteLabels=None,
paletteEntryLabels=None,
):
"""Build new CPAL table using list of palettes.
Optionally build CPAL v1 table using paletteTypes, paletteLabels and
paletteEntryLabels.
Cf. `fontTools.colorLib.builder.buildCPAL`.
"""
from fontTools.colorLib.builder import buildCPAL
self.font["CPAL"] = buildCPAL(
palettes,
paletteTypes=paletteTypes,
paletteLabels=paletteLabels,
paletteEntryLabels=paletteEntryLabels,
nameTable=self.font.get("name"),
)
def setupStat(self, axes, locations=None, elidedFallbackName=2):
"""Build a new 'STAT' table.
See `fontTools.otlLib.builder.buildStatTable` for details about
the arguments.
"""
from .otlLib.builder import buildStatTable
buildStatTable(self.font, axes, locations, elidedFallbackName)
def buildCmapSubTable(cmapping, format, platformID, platEncID):
subTable = cmap_classes[format](format)
subTable.cmap = cmapping
subTable.platformID = platformID
subTable.platEncID = platEncID
subTable.language = 0
return subTable
def addFvar(font, axes, instances):
from .ttLib.tables._f_v_a_r import Axis, NamedInstance
assert axes
fvar = newTable("fvar")
nameTable = font["name"]
for axis_def in axes:
axis = Axis()
if isinstance(axis_def, tuple):
(
axis.axisTag,
axis.minValue,
axis.defaultValue,
axis.maxValue,
name,
) = axis_def
else:
(axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = (
axis_def.tag,
axis_def.minimum,
axis_def.default,
axis_def.maximum,
axis_def.name,
)
if isinstance(name, str):
name = dict(en=name)
axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font)
fvar.axes.append(axis)
for instance in instances:
if isinstance(instance, dict):
coordinates = instance["location"]
name = instance["stylename"]
psname = instance.get("postscriptfontname")
else:
coordinates = instance.location
name = instance.localisedStyleName or instance.styleName
psname = instance.postScriptFontName
if isinstance(name, str):
name = dict(en=name)
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font)
if psname is not None:
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = coordinates
fvar.instances.append(inst)
font["fvar"] = fvar
| mit | 3aac6baf4bd5fbae11feeebc75fe51fc | 31.563674 | 104 | 0.622195 | 3.734706 | false | false | false | false |
watchdogpolska/feder | feder/main/filters.py | 1 | 1091 | from django_filters import DateRangeFilter, FilterSet
from django.conf import settings
from django.utils.timezone import now
class InitialFilterSet(FilterSet):
def __init__(self, data=None, *args, **kwargs):
initial = kwargs.pop("initial", {})
if data is None:
data = {}
data = dict(list(initial.items()) + list(data.copy().items()))
super().__init__(data, *args, **kwargs)
class MinYearRangeFilter(DateRangeFilter):
def __init__(self, choices=None, filters=None, *args, **kwargs):
years = range(now().year, settings.MIN_FILTER_YEAR - 1, -1)
if choices is None:
choices = DateRangeFilter.choices + [(str(year), year) for year in years]
if filters is None:
filters = dict(DateRangeFilter.filters)
for year in years:
filters[str(year)] = lambda qs, name: qs.filter(
**{
"%s__year" % name: now().year,
}
)
super().__init__(choices=choices, filters=filters, *args, **kwargs)
| mit | 808fadda75abcbadbf17fa07907441ad | 37.964286 | 85 | 0.566453 | 4.070896 | false | false | false | false |
watchdogpolska/feder | feder/virus_scan/migrations/0001_initial.py | 1 | 3489 | # Generated by Django 2.2.7 on 2019-11-30 05:25
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="Request",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="created",
),
),
(
"modified",
model_utils.fields.AutoLastModifiedField(
default=django.utils.timezone.now,
editable=False,
verbose_name="modified",
),
),
("object_id", models.PositiveIntegerField()),
("field_name", models.CharField(max_length=50)),
(
"engine_name",
models.CharField(
blank=True, max_length=20, verbose_name="Engine name"
),
),
(
"engine_id",
models.CharField(
blank=True, max_length=100, verbose_name="External ID"
),
),
(
"engine_report",
jsonfield.fields.JSONField(
blank=True, verbose_name="Engine result"
),
),
(
"engine_link",
models.CharField(
blank=True, max_length=150, verbose_name="Engine result URL"
),
),
(
"status",
models.IntegerField(
choices=[
(0, "Created"),
(1, "Queued"),
(2, "Infected"),
(3, "Not detected"),
(4, "Failed"),
],
default=0,
),
),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
],
options={
"verbose_name": "Request of virus scan",
"verbose_name_plural": "Requests of virus scan",
"ordering": ["created"],
},
),
migrations.AddIndex(
model_name="request",
index=models.Index(
fields=["content_type", "object_id"],
name="virus_scan__content_e045f6_idx",
),
),
]
| mit | 07fb4455e6d26bc00e24dc724551e90f | 31.607477 | 84 | 0.3683 | 5.766942 | false | false | false | false |
watchdogpolska/feder | feder/monitorings/filters.py | 1 | 1117 | import django_filters
from django.utils.translation import gettext_lazy as _
from .models import Monitoring
from feder.teryt.filters import (
DisabledWhenVoivodeshipFilter,
DisabledWhenCountyFilter,
DisabledWhenCommunityFilter,
)
from feder.cases.filters import CaseReportFilter
class MonitoringFilter(django_filters.FilterSet):
created = django_filters.DateRangeFilter(label=_("Creation date"))
voivodeship = DisabledWhenVoivodeshipFilter()
county = DisabledWhenCountyFilter()
community = DisabledWhenCommunityFilter()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters["name"].lookup_expr = "icontains"
self.filters["name"].label = _("Name")
class Meta:
model = Monitoring
fields = ["name", "created"]
class MonitoringCaseReportFilter(CaseReportFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self.filters["monitoring"]
class Meta(CaseReportFilter.Meta):
fields = [el for el in CaseReportFilter.Meta.fields if el != "monitoring"]
| mit | 6c54c35349cc98b5a4ed997e46acbecd | 30.914286 | 82 | 0.693823 | 3.946996 | false | false | false | false |
ucfopen/canvasapi | canvasapi/enrollment.py | 1 | 1951 | from canvasapi.canvas_object import CanvasObject
from canvasapi.util import combine_kwargs
class Enrollment(CanvasObject):
def __str__(self):
return "{} ({})".format(self.type, self.id)
def deactivate(self, task, **kwargs):
"""
Delete, conclude, or deactivate an enrollment.
The following tasks can be performed on an enrollment: conclude, delete, \
inactivate, deactivate.
:calls: `DELETE /api/v1/courses/:course_id/enrollments/:id \
<https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.destroy>`_
:param task: The task to perform on the enrollment.
:type task: str
:rtype: :class:`canvasapi.enrollment.Enrollment`
"""
ALLOWED_TASKS = ["conclude", "delete", "inactivate", "deactivate"]
if task not in ALLOWED_TASKS:
raise ValueError(
"{} is not a valid task. Please use one of the following: {}".format(
task, ",".join(ALLOWED_TASKS)
)
)
kwargs["task"] = task
response = self._requester.request(
"DELETE",
"courses/{}/enrollments/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Enrollment(self._requester, response.json())
def reactivate(self, **kwargs):
"""
Activate an inactive enrollment.
:calls: `PUT /api/v1/courses/:course_id/enrollments/:id/reactivate \
<https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.reactivate>`_
:rtype: :class:`canvasapi.enrollment.Enrollment`
"""
response = self._requester.request(
"PUT",
"courses/{}/enrollments/{}/reactivate".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Enrollment(self._requester, response.json())
| mit | 17931612081bf7d6a38583f4647f33a8 | 34.472727 | 101 | 0.597642 | 4.142251 | false | false | false | false |
ucfopen/canvasapi | canvasapi/assignment.py | 1 | 23017 | from canvasapi.canvas_object import CanvasObject
from canvasapi.exceptions import CanvasException, RequiredFieldMissing
from canvasapi.grade_change_log import GradeChangeEvent
from canvasapi.paginated_list import PaginatedList
from canvasapi.peer_review import PeerReview
from canvasapi.progress import Progress
from canvasapi.submission import Submission
from canvasapi.upload import FileOrPathLike, Uploader
from canvasapi.user import User, UserDisplay
from canvasapi.util import combine_kwargs, obj_or_id
class Assignment(CanvasObject):
def __init__(self, requester, attributes):
super(Assignment, self).__init__(requester, attributes)
if "overrides" in attributes:
self.overrides = [
AssignmentOverride(requester, override)
for override in attributes["overrides"]
]
def __str__(self):
return "{} ({})".format(self.name, self.id)
def create_override(self, **kwargs):
"""
Create an override for this assignment.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/overrides \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.create>`_
:rtype: :class:`canvasapi.assignment.AssignmentOverride`
"""
response = self._requester.request(
"POST",
"courses/{}/assignments/{}/overrides".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return AssignmentOverride(self._requester, response_json)
def delete(self, **kwargs):
"""
Delete this assignment.
:calls: `DELETE /api/v1/courses/:course_id/assignments/:id \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments.destroy>`_
:rtype: :class:`canvasapi.assignment.Assignment`
"""
response = self._requester.request(
"DELETE",
"courses/{}/assignments/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Assignment(self._requester, response.json())
def edit(self, **kwargs):
"""
Modify this assignment.
:calls: `PUT /api/v1/courses/:course_id/assignments/:id \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update>`_
:rtype: :class:`canvasapi.assignment.Assignment`
"""
response = self._requester.request(
"PUT",
"courses/{}/assignments/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
if "name" in response.json():
super(Assignment, self).set_attributes(response.json())
return Assignment(self._requester, response.json())
def get_grade_change_events(self, **kwargs):
"""
Returns the grade change events for the assignment.
:calls: `/api/v1/audit/grade_change/assignments/:assignment_id \
<https://canvas.instructure.com/doc/api/grade_change_log.html#method.grade_change_audit_api.for_assignment>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.grade_change_log.GradeChangeEvent`
"""
return PaginatedList(
GradeChangeEvent,
self._requester,
"GET",
"audit/grade_change/assignments/{}".format(self.id),
_root="events",
_kwargs=combine_kwargs(**kwargs),
)
def get_gradeable_students(self, **kwargs):
"""
List students eligible to submit the assignment.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/gradeable_students \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.gradeable_students>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.UserDisplay`
"""
return PaginatedList(
UserDisplay,
self._requester,
"GET",
"courses/{}/assignments/{}/gradeable_students".format(
self.course_id, self.id
),
{"course_id": self.course_id},
_kwargs=combine_kwargs(**kwargs),
)
def get_override(self, override, **kwargs):
"""
Get a single assignment override with the given override id.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/overrides/:id \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.show>`_
:param override: The object or ID of the override to get
:type override: :class:`canvasapi.assignment.AssignmentOverride` or int
:rtype: :class:`canvasapi.assignment.AssignmentOverride`
"""
override_id = obj_or_id(override, "override", (AssignmentOverride,))
response = self._requester.request(
"GET",
"courses/{}/assignments/{}/overrides/{}".format(
self.course_id, self.id, override_id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return AssignmentOverride(self._requester, response_json)
def get_overrides(self, **kwargs):
"""
Get a paginated list of overrides for this assignment that target
sections/groups/students visible to the current user.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/overrides \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.AssignmentOverride`
"""
return PaginatedList(
AssignmentOverride,
self._requester,
"GET",
"courses/{}/assignments/{}/overrides".format(self.course_id, self.id),
{"course_id": self.course_id},
_kwargs=combine_kwargs(**kwargs),
)
def get_peer_reviews(self, **kwargs):
"""
Get a list of all Peer Reviews for this assignment.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/peer_reviews \
<https://canvas.instructure.com/doc/api/peer_reviews.html#method.peer_reviews_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.peer_review.PeerReview`
"""
return PaginatedList(
PeerReview,
self._requester,
"GET",
"courses/{}/assignments/{}/peer_reviews".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_provisional_grades_status(self, student_id, **kwargs):
"""
Tell whether the student's submission needs one or more provisional grades.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/provisional_grades/
status \
<https://canvas.instructure.com/doc/api/all_resources.html#method.provisional_grades.status>`_
:param student_id: The object or ID of the related student
:type student_id: :class:`canvasapi.user.User` or int
:rtype: bool
"""
kwargs["student_id"] = obj_or_id(student_id, "student_id", (User,))
request = self._requester.request(
"GET",
"courses/{}/assignments/{}/provisional_grades/status".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
request_json = request.json()
return request_json.get("needs_provisional_grade")
def get_students_selected_for_moderation(self, **kwargs):
"""
Get a list of students selected for moderation.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/moderated_students \
<https://canvas.instructure.com/doc/api/moderated_grading.html#method.moderation_set.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
return PaginatedList(
User,
self._requester,
"GET",
"courses/{}/assignments/{}/moderated_students".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
def get_submission(self, user, **kwargs):
"""
Get a single submission, based on user id.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.show>`_
:param user: The object or ID of the related user
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.submission.Submission`
"""
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"GET",
"courses/{}/assignments/{}/submissions/{}".format(
self.course_id, self.id, user_id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return Submission(self._requester, response_json)
def get_submissions(self, **kwargs):
"""
Get all existing submissions for this assignment.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.submission.Submission`
"""
return PaginatedList(
Submission,
self._requester,
"GET",
"courses/{}/assignments/{}/submissions".format(self.course_id, self.id),
{"course_id": self.course_id},
_kwargs=combine_kwargs(**kwargs),
)
def publish_provisional_grades(self, **kwargs):
"""
Publish the selected provisional grade for all submissions to an assignment.
Use the “Select provisional grade” endpoint to choose which provisional grade to publish
for a particular submission.
Students not in the moderation set will have their one
and only provisional grade published.
WARNING: This is irreversible. This will overwrite existing grades in the gradebook.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/provisional_grades
/publish \
<https://canvas.instructure.com/doc/api/all_resources.html#method.provisional_grades.publish>`_
:rtype: dict
"""
response = self._requester.request(
"POST",
"courses/{}/assignments/{}/provisional_grades/publish".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def select_students_for_moderation(self, **kwargs):
"""
Select student(s) for moderation.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/moderated_students \
<https://canvas.instructure.com/doc/api/moderated_grading.html#method.moderation_set.create>`_
:returns: The list of users that were selected
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
return PaginatedList(
User,
self._requester,
"POST",
"courses/{}/assignments/{}/moderated_students".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
def selected_provisional_grade(self, provisional_grade_id, **kwargs):
"""
Choose which provisional grade the student should receive for a submission.
The caller must be the final grader for the assignment
or an admin with :select_final_grade rights.
:calls: `PUT /api/v1/courses/:course_id/assignments/:assignment_id/provisional_grades/
:provisonal_grade_id/select \
<https://canvas.instructure.com/doc/api/all_resources.html#method.provisional_grades.select>`_
:param provisional_grade_id: ID of the provisional grade
:type provisional_grade_id: int
:rtype: dict
"""
response = self._requester.request(
"PUT",
"courses/{}/assignments/{}/provisional_grades/{}/select".format(
self.course_id, self.id, provisional_grade_id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def set_extensions(self, assignment_extensions, **kwargs):
"""
Set extensions for student assignment submissions
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/extensions \
<https://canvas.instructure.com/doc/api/assignment_extensions.html#method.assignment_extensions.create>`_
:param assignment_extensions: list of dictionaries representing extensions
:type assignment_extensions: list
:rtype: list of :class:`canvasapi.assignment.AssignmentExtension`
Example Usage:
>>> assignment.set_extensions([
... {
... 'user_id': 3,
... 'extra_attempts': 2
... },
... {
... 'user_id': 2,
... 'extra_attempts': 2
... }
... ])
"""
if not isinstance(assignment_extensions, list) or not assignment_extensions:
raise ValueError("Param `assignment_extensions` must be a non-empty list.")
if any(not isinstance(extension, dict) for extension in assignment_extensions):
raise ValueError(
"Param `assignment_extensions` must only contain dictionaries"
)
if any("user_id" not in extension for extension in assignment_extensions):
raise RequiredFieldMissing(
"Dictionaries in `assignment_extensions` must contain key `user_id`"
)
kwargs["assignment_extensions"] = assignment_extensions
response = self._requester.request(
"POST",
"courses/{}/assignments/{}/extensions".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
extension_list = response.json()["assignment_extensions"]
return [
AssignmentExtension(self._requester, extension)
for extension in extension_list
]
def show_provisonal_grades_for_student(self, anonymous_id, **kwargs):
"""
:call: `GET /api/v1/courses/:course_id/assignments/:assignment_id/
anonymous_provisional_grades/status \
<https://canvas.instructure.com/doc/api/all_resources.html#method.anonymous_provisional_grades.status>`_
:param anonymous_id: The ID of the student to show the status for
:type anonymous_id: :class:`canvasapi.user.User` or int
:rtype: dict
"""
kwargs["anonymous_id"] = obj_or_id(anonymous_id, "anonymous_id", (User,))
request = self._requester.request(
"GET",
"courses/{}/assignments/{}/anonymous_provisional_grades/status".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return request.json().get("needs_provisional_grade")
def submissions_bulk_update(self, **kwargs):
"""
Update the grading and comments on multiple student's assignment
submissions in an asynchronous job.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/ \
submissions/update_grades \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.bulk_update>`_
:rtype: :class:`canvasapi.progress.Progress`
"""
response = self._requester.request(
"POST",
"courses/{}/assignments/{}/submissions/update_grades".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return Progress(self._requester, response.json())
def submit(self, submission, file=None, **kwargs):
"""
Makes a submission for an assignment.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/submissions \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions.create>`_
:param submission: The attributes of the submission.
:type submission: dict
:param file: A file to upload with the submission. (Optional,
defaults to `None`. Submission type must be `online_upload`)
:type file: file or str
:rtype: :class:`canvasapi.submission.Submission`
"""
if isinstance(submission, dict) and "submission_type" in submission:
kwargs["submission"] = submission
else:
raise RequiredFieldMissing(
"Dictionary with key 'submission_type' is required."
)
if file:
if submission.get("submission_type") != "online_upload":
raise ValueError(
"To upload a file, `submission['submission_type']` must be `online_upload`."
)
upload_response = self.upload_to_submission(file, **kwargs)
if upload_response[0]:
kwargs["submission"]["file_ids"] = [upload_response[1]["id"]]
else:
raise CanvasException("File upload failed. Not submitting.")
response = self._requester.request(
"POST",
"courses/{}/assignments/{}/submissions".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return Submission(self._requester, response_json)
def upload_to_submission(self, file: FileOrPathLike, user="self", **kwargs):
"""
Upload a file to a submission.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/ \
submissions/:user_id/files \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.create_file>`_
:param file: The file or path of the file to upload.
:type file: FileLike
:param user: The object or ID of the related user, or 'self' for the
current user. Defaults to 'self'.
:type user: :class:`canvasapi.user.User`, int, or str
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple
"""
user_id = obj_or_id(user, "user", (User,))
return Uploader(
self._requester,
"courses/{}/assignments/{}/submissions/{}/files".format(
self.course_id, self.id, user_id
),
file,
**kwargs
).start()
class AssignmentExtension(CanvasObject):
def __str__(self):
return "{} ({})".format(self.assignment_id, self.user_id)
class AssignmentGroup(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def delete(self, **kwargs):
"""
Delete this assignment.
:calls: `DELETE /api/v1/courses/:course_id/assignment_groups/:assignment_group_id \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.destroy>`_
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
response = self._requester.request(
"DELETE",
"courses/{}/assignment_groups/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
return AssignmentGroup(self._requester, response.json())
def edit(self, **kwargs):
"""
Modify this assignment group.
:calls: `PUT /api/v1/courses/:course_id/assignment_groups/:assignment_group_id \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.update>`_
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
response = self._requester.request(
"PUT",
"courses/{}/assignment_groups/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
if "name" in response.json():
super(AssignmentGroup, self).set_attributes(response.json())
return AssignmentGroup(self._requester, response.json())
class AssignmentOverride(CanvasObject):
def __str__(self):
return "{} ({})".format(self.title, self.id)
def delete(self, **kwargs):
"""
Delete this assignment override.
:calls: `DELETE /api/v1/courses/:course_id/assignments/:assignment_id/overrides/:id
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.destroy>`_
:returns: The previous content of the now-deleted assignment override.
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
response = self._requester.request(
"DELETE",
"courses/{}/assignments/{}/overrides/{}".format(
self.course_id, self.assignment_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return AssignmentOverride(self._requester, response_json)
def edit(self, **kwargs):
"""
Update this assignment override.
Note: All current overridden values must be supplied if they are to be retained.
:calls: `PUT /api/v1/courses/:course_id/assignments/:assignment_id/overrides/:id
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.update>`_
:rtype: :class:`canvasapi.assignment.AssignmentOverride`
"""
response = self._requester.request(
"PUT",
"courses/{}/assignments/{}/overrides/{}".format(
self.course_id, self.assignment_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update(course_id=self.course_id)
if "title" in response_json:
super(AssignmentOverride, self).set_attributes(response_json)
return self
| mit | d1d047cfd3ba1ab71795407d27f9caed | 36.788177 | 117 | 0.605484 | 4.235002 | false | false | false | false |
ucfopen/canvasapi | tests/test_sis_import.py | 1 | 1659 | import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.progress import Progress
from canvasapi.sis_import import SisImport
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestSisImportGroup(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
requires = {
"account": ["get_by_id", "get_role"],
"sis_import": ["get_by_id"],
}
register_uris(requires, m)
self.account = self.canvas.get_account(1)
self.sis_import = self.account.get_sis_import(2)
# abort()
def test_abort_sis_import(self, m):
register_uris({"sis_import": ["abort_sis_import"]}, m)
aborted_sis_import = self.sis_import.abort()
self.assertIsInstance(aborted_sis_import, SisImport)
self.assertTrue(
aborted_sis_import.workflow_state == "aborted"
if aborted_sis_import.progress < 100
else True
)
# restore_states()
def test_restore_states(self, m):
register_uris({"sis_import": ["restore_sis_import_states"]}, m)
restore_state_progress = self.sis_import.restore_states()
self.assertIsInstance(restore_state_progress, Progress)
self.assertEqual(restore_state_progress.context_id, self.sis_import.id)
self.assertEqual(restore_state_progress.context_type, "SisBatch")
self.assertEqual(restore_state_progress.tag, "sis_batch_state_restore")
| mit | c29e4c5beeecb1d8c12bf2543485db6f | 31.18 | 79 | 0.625075 | 3.876168 | false | true | false | false |
ucfopen/canvasapi | canvasapi/quiz.py | 1 | 32609 | from canvasapi.canvas_object import CanvasObject
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.paginated_list import PaginatedList
from canvasapi.quiz_group import QuizGroup
from canvasapi.submission import Submission
from canvasapi.user import User
from canvasapi.util import combine_kwargs, obj_or_id
class Quiz(CanvasObject):
def __str__(self):
return "{} ({})".format(self.title, self.id)
def broadcast_message(self, conversations, **kwargs):
"""
Send a message to unsubmitted or submitted users for the quiz.
:calls: `POST /api/v1/courses/:course_id/quizzes/:id/submission_users/message \
<https://canvas.instructure.com/doc/api/quiz_submission_user_list.html#method.quizzes/quiz_submission_users.message>`_
:param conversations: A dictionary representing a Conversation.
Requires `'body'`, `'recipients'`, and `'subject'` keys.
:type conversations: dict
:returns: True if the message was created, False otherwize
:rtype: bool
"""
required_key_list = ["body", "recipients", "subject"]
required_keys_present = all((x in conversations for x in required_key_list))
if isinstance(conversations, dict) and required_keys_present:
kwargs["conversations"] = conversations
else:
raise RequiredFieldMissing(
(
"conversations must be a dictionary with keys "
"'body', 'recipients', and 'subject'."
)
)
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/submission_users/message".format(
self.course_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 201
def create_question(self, **kwargs):
"""
Create a new quiz question for this quiz.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/questions \
<https://canvas.instructure.com/doc/api/quiz_questions.html#method.quizzes/quiz_questions.create>`_
:rtype: :class:`canvasapi.quiz.QuizQuestion`
"""
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/questions".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
return QuizQuestion(self._requester, response_json)
def create_question_group(self, quiz_groups, **kwargs):
"""
Create a new question group for the given quiz id
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/groups \
<https://canvas.instructure.com/doc/api/quiz_question_groups.html#method.quizzes/quiz_groups.create>`_
:param quiz_groups: The name, pick count, question points,
and/or assessment question bank id.
All of these parameters are optional, but at least one must exist
(even if empty) to receive a response.
The request expects a list, but will only create 1 question group per request.
:type quiz_groups: list[dict]
:returns: `QuizGroup` object
:rtype: :class:`canvasapi.quiz_group.QuizGroup`
"""
if not isinstance(quiz_groups, list) or not quiz_groups:
raise ValueError("Param `quiz_groups` must be a non-empty list.")
if not isinstance(quiz_groups[0], dict):
raise ValueError("Param `quiz_groups must contain a dictionary")
param_list = [
"name",
"pick_count",
"question_points",
"assessment_question_bank_id",
]
if not any(param in quiz_groups[0] for param in param_list):
raise RequiredFieldMissing("quiz_groups must contain at least 1 parameter.")
kwargs["quiz_groups"] = quiz_groups
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/groups".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json["quiz_groups"][0].update({"course_id": self.id})
return QuizGroup(self._requester, response_json.get("quiz_groups")[0])
def create_report(self, report_type, **kwargs):
"""
Create and return a new report for this quiz. If a previously generated report
matches the arguments and is still current (i.e. there have been no new submissions),
it will be returned.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/reports \
<https://canvas.instructure.com/doc/api/quiz_reports.html#method.quizzes/quiz_reports.create>`_
:param report_type: The type of report, either student_analysis or item_analysis
:type report_type: str
:returns: `QuizReport` object
:rtype: :class:`canvasapi.quiz.QuizReport`
"""
if report_type not in ["student_analysis", "item_analysis"]:
raise ValueError(
"Param `report_type` must be a either 'student_analysis' or 'item_analysis'"
)
kwargs["quiz_report"] = {"report_type": report_type}
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/reports".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
return QuizReport(self._requester, response_json)
def create_submission(self, **kwargs):
"""
Start taking a Quiz by creating a QuizSubmission can be used to answer
questions and submit answers.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/submissions \
<https://canvas.instructure.com/doc/api/quiz_submissions.html#method.quizzes/quiz_submissions_api.create>`_
:rtype: :class:`canvasapi.quiz.QuizSubmission`
"""
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/submissions".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()["quiz_submissions"][0]
response_json.update({"course_id": self.course_id})
return QuizSubmission(self._requester, response_json)
def delete(self, **kwargs):
"""
Delete this quiz.
:calls: `DELETE /api/v1/courses/:course_id/quizzes/:id \
<https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.destroy>`_
:rtype: :class:`canvasapi.quiz.Quiz`
"""
response = self._requester.request(
"DELETE",
"courses/{}/quizzes/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
quiz_json = response.json()
quiz_json.update({"course_id": self.course_id})
return Quiz(self._requester, quiz_json)
def edit(self, **kwargs):
"""
Modify this quiz.
:calls: `PUT /api/v1/courses/:course_id/quizzes/:id \
<https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.update>`_
:returns: The updated quiz.
:rtype: :class:`canvasapi.quiz.Quiz`
"""
response = self._requester.request(
"PUT",
"courses/{}/quizzes/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
quiz_json = response.json()
quiz_json.update({"course_id": self.course_id})
return Quiz(self._requester, quiz_json)
def get_all_quiz_reports(self, **kwargs):
"""
Get a list of all quiz reports for this quiz
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/reports \
<https://canvas.instructure.com/doc/api/quiz_reports.html#method.quizzes/quiz_reports.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.QuizReport`
"""
return PaginatedList(
QuizReport,
self._requester,
"GET",
"courses/{}/quizzes/{}/reports".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_question(self, question, **kwargs):
"""
Get as single quiz question by ID.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/questions/:id \
<https://canvas.instructure.com/doc/api/quiz_questions.html#method.quizzes/quiz_questions.show>`_
:param question: The object or ID of the quiz question to retrieve.
:type question: int, str or :class:`canvasapi.quiz.QuizQuestion`
:rtype: :class:`canvasapi.quiz.QuizQuestion`
"""
question_id = obj_or_id(question, "question", (QuizQuestion,))
response = self._requester.request(
"GET",
"courses/{}/quizzes/{}/questions/{}".format(
self.course_id, self.id, question_id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
return QuizQuestion(self._requester, response_json)
def get_questions(self, **kwargs):
"""
List all questions for a quiz.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/questions \
<https://canvas.instructure.com/doc/api/quiz_questions.html#method.quizzes/quiz_questions.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.QuizQuestion`
"""
return PaginatedList(
QuizQuestion,
self._requester,
"GET",
"courses/{}/quizzes/{}/questions".format(self.course_id, self.id),
{"course_id": self.course_id},
_kwargs=combine_kwargs(**kwargs),
)
def get_quiz_group(self, id, **kwargs):
"""
Get details of the quiz group with the given id
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/groups/:id \
<https://canvas.instructure.com/doc/api/quiz_question_groups.html#method.quizzes/quiz_groups.show>`_
:param id: The ID of the question group.
:type id: int
:returns: `QuizGroup` object
:rtype: :class:`canvasapi.quiz_group.QuizGroup`
"""
response = self._requester.request(
"GET",
"courses/{}/quizzes/{}/groups/{}".format(self.course_id, self.id, id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
return QuizGroup(self._requester, response_json)
def get_quiz_report(self, id, **kwargs):
"""
Returns the data for a single quiz report.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/reports/:id \
<https://canvas.instructure.com/doc/api/quiz_reports.html#method.quizzes/quiz_reports.show>`_
:param id: The ID of the quiz report you want to retrieve, or the report object
:type id: int or :class:`canvasapi.quiz.QuizReport`
:returns: `QuizReport` object
:rtype: :class:`canvasapi.quiz.QuizReport`
"""
id = obj_or_id(id, "id", (QuizReport,))
response = self._requester.request(
"GET",
"courses/{}/quizzes/{}/reports/{}".format(self.course_id, self.id, id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
return QuizReport(self._requester, response_json)
def get_quiz_submission(self, quiz_submission, **kwargs):
"""
Get a single quiz submission.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/submissions/:id \
<https://canvas.instructure.com/doc/api/quiz_submissions.html#method.quizzes/quiz_submissions_api.show>`_
:param quiz_submission: The object or ID of the quiz submission to retrieve.
:type quiz_submission: int, string, :class:`canvasapi.quiz.QuizSubmission`
:rtype: :class:`canvasapi.quiz.QuizSubmission`
"""
quiz_submission_id = obj_or_id(
quiz_submission, "quiz_submission", (QuizSubmission,)
)
response = self._requester.request(
"GET",
"courses/{}/quizzes/{}/submissions/{}".format(
self.course_id, self.id, quiz_submission_id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()["quiz_submissions"][0]
response_json.update({"course_id": self.course_id})
if len(response.json().get("quizzes", [])) > 0:
response_json.update(
{"quiz": Quiz(self._requester, response.json()["quizzes"][0])}
)
if len(response.json().get("submissions", [])) > 0:
response_json.update(
{
"submission": Submission(
self._requester, response.json()["submissions"][0]
)
}
)
if len(response.json().get("users", [])) > 0:
response_json.update(
{"user": User(self._requester, response.json()["users"][0])}
)
return QuizSubmission(self._requester, response_json)
def get_statistics(self, **kwargs):
"""
Get statistics for for all quiz versions, or the latest quiz version.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/statistics \
<https://canvas.instructure.com/doc/api/quiz_statistics.html#method.quizzes/quiz_statistics.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.QuizStatistic`
"""
return PaginatedList(
QuizStatistic,
self._requester,
"GET",
"courses/{}/quizzes/{}/statistics".format(self.course_id, self.id),
{"course_id": self.course_id},
_root="quiz_statistics",
_kwargs=combine_kwargs(**kwargs),
)
def get_submissions(self, **kwargs):
"""
Get a list of all submissions for this quiz.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/submissions \
<https://canvas.instructure.com/doc/api/quiz_submissions.html#method.quizzes/quiz_submissions_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.QuizSubmission`
"""
return PaginatedList(
QuizSubmission,
self._requester,
"GET",
"courses/{}/quizzes/{}/submissions".format(self.course_id, self.id),
{"course_id": self.course_id},
_root="quiz_submissions",
_kwargs=combine_kwargs(**kwargs),
)
def set_extensions(self, quiz_extensions, **kwargs):
"""
Set extensions for student quiz submissions.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/extensions
<https://canvas.instructure.com/doc/api/quiz_extensions.html#method.quizzes/quiz_extensions.create>`_
:param quiz_extensions: List of dictionaries representing extensions.
:type quiz_extensions: list
:rtype: list of :class:`canvasapi.quiz.QuizExtension`
Example Usage:
>>> quiz.set_extensions([
... {
... 'user_id': 1,
... 'extra_time': 60,
... 'extra_attempts': 1
... },
... {
... 'user_id': 2,
... 'extra_attempts': 3
... },
... {
... 'user_id': 3,
... 'extra_time': 20
... }
... ])
"""
if not isinstance(quiz_extensions, list) or not quiz_extensions:
raise ValueError("Param `quiz_extensions` must be a non-empty list.")
if any(not isinstance(extension, dict) for extension in quiz_extensions):
raise ValueError("Param `quiz_extensions` must only contain dictionaries")
if any("user_id" not in extension for extension in quiz_extensions):
raise RequiredFieldMissing(
"Dictionaries in `quiz_extensions` must contain key `user_id`"
)
kwargs["quiz_extensions"] = quiz_extensions
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/extensions".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
extension_list = response.json()["quiz_extensions"]
return [
QuizExtension(self._requester, extension) for extension in extension_list
]
class QuizStatistic(CanvasObject):
def __str__(self):
return "Quiz Statistic {}".format(self.id)
class QuizSubmission(CanvasObject):
def __str__(self):
return "Quiz {} - User {} ({})".format(self.quiz_id, self.user_id, self.id)
def answer_submission_questions(self, validation_token=None, **kwargs):
"""
Provide or update an answer to one or more quiz questions.
:calls: `POST /api/v1/quiz_submissions/:quiz_submission_id/questions \
<https://canvas.instructure.com/doc/api/quiz_submission_questions.html#method.quizzes/quiz_submission_questions.answer>`_
:param validation_token: (Optional) The unique validation token for this quiz submission.
If one is not provided, canvasapi will attempt to use `self.validation_token`.
:type validation_token: str
:returns: A list of quiz submission questions.
:rtype: list of :class:`canvasapi.quiz.QuizSubmissionQuestion`
"""
try:
kwargs["validation_token"] = validation_token or self.validation_token
except AttributeError:
raise RequiredFieldMissing(
"`validation_token` not set on this QuizSubmission, must be passed"
" as a function argument."
)
# Only the latest attempt for a quiz submission can be updated, and Canvas
# automatically returns the latest attempt with every quiz submission response,
# so we can just use that.
kwargs["attempt"] = self.attempt
response = self._requester.request(
"POST",
"quiz_submissions/{}/questions".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
questions = list()
for question in response.json().get("quiz_submission_questions", []):
question.update(
{
"quiz_submission_id": self.id,
"validation_token": kwargs["validation_token"],
"attempt": self.attempt,
}
)
questions.append(QuizSubmissionQuestion(self._requester, question))
return questions
def complete(self, validation_token=None, **kwargs):
"""
Complete the quiz submission by marking it as complete and grading it. When the quiz
submission has been marked as complete, no further modifications will be allowed.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/submissions/:id/complete \
<https://canvas.instructure.com/doc/api/quiz_submissions.html#method.quizzes/quiz_submissions_api.complete>`_
:param validation_token: (Optional) The unique validation token for this quiz submission.
If one is not provided, canvasapi will attempt to use `self.validation_token`.
:type validation_token: str
:rtype: :class:`canvasapi.quiz.QuizSubmission`
"""
try:
kwargs["validation_token"] = validation_token or self.validation_token
except AttributeError:
raise RequiredFieldMissing(
"`validation_token` not set on this QuizSubmission, must be passed"
" as a function argument."
)
# Only the latest attempt for a quiz submission can be updated, and Canvas
# automatically returns the latest attempt with every quiz submission response,
# so we can just use that.
kwargs["attempt"] = self.attempt
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/submissions/{}/complete".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()["quiz_submissions"][0]
return QuizSubmission(self._requester, response_json)
def get_submission_events(self, **kwargs):
"""
Retrieve the set of events captured during a specific submission attempt.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/submissions/:id/events \
<https://canvas.instructure.com/doc/api/quiz_submission_events.html#method.quizzes/quiz_submission_events_api.index>`_
:returns: PaginatedList of QuizSubmissionEvents.
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.QuizSubmissionEvent`
"""
return PaginatedList(
QuizSubmissionEvent,
self._requester,
"GET",
"courses/{}/quizzes/{}/submissions/{}/events".format(
self.course_id, self.quiz_id, self.id
),
_root="quiz_submission_events",
_kwargs=combine_kwargs(**kwargs),
)
def get_submission_questions(self, **kwargs):
"""
Get a list of all the question records for this quiz submission.
:calls: `GET /api/v1/quiz_submissions/:quiz_submission_id/questions \
<https://canvas.instructure.com/doc/api/quiz_submission_questions.html#method.quizzes/quiz_submission_questions.index>`_
:returns: A list of quiz submission questions.
:rtype: list of :class:`canvasapi.quiz.QuizSubmissionQuestion`
"""
response = self._requester.request(
"GET",
"quiz_submissions/{}/questions".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
questions = list()
for question in response.json().get("quiz_submission_questions", []):
question.update({"quiz_submission_id": self.id, "attempt": self.attempt})
questions.append(QuizSubmissionQuestion(self._requester, question))
return questions
def get_times(self, **kwargs):
"""
Get the current timing data for the quiz attempt, both the end_at timestamp and the
time_left parameter.
:calls: `GET /api/v1/courses/:course_id/quizzes/:quiz_id/submissions/:id/time \
<https://canvas.instructure.com/doc/api/quiz_submissions.html#method.quizzes/quiz_submissions_api.time>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/quizzes/{}/submissions/{}/time".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def submit_events(self, quiz_submission_events, **kwargs):
"""
Store a set of events which were captured during a quiz taking session.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/submissions/:id/events \
<https://canvas.instructure.com/doc/api/quiz_submission_events.html#method.quizzes/quiz_submission_events_api.create>`_
:param quiz_submission_events: The submission events to be recorded.
:type quiz_submission_events: list
:returns: True if the submission was successful, false otherwise.
:rtype: bool
"""
if isinstance(quiz_submission_events, list) and isinstance(
quiz_submission_events[0], QuizSubmissionEvent
):
kwargs["quiz_submission_events"] = quiz_submission_events
else:
raise RequiredFieldMissing(
"Required parameter quiz_submission_events missing."
)
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/submissions/{}/events".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 204
def update_score_and_comments(self, **kwargs):
"""
Update the amount of points a student has scored for questions they've answered, provide
comments for the student about their answer(s), or simply fudge the total score by a
specific amount of points.
:calls: `PUT /api/v1/courses/:course_id/quizzes/:quiz_id/submissions/:id \
<https://canvas.instructure.com/doc/api/quiz_submissions.html#method.quizzes/quiz_submissions_api.update>`_
:returns: The updated quiz.
:rtype: :class:`canvasapi.quiz.QuizSubmission`
"""
response = self._requester.request(
"PUT",
"courses/{}/quizzes/{}/submissions/{}".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()["quiz_submissions"][0]
response_json.update({"course_id": self.course_id})
return QuizSubmission(self._requester, response_json)
class QuizExtension(CanvasObject):
def __str__(self):
return "{}-{}".format(self.quiz_id, self.user_id)
class QuizQuestion(CanvasObject):
def __str__(self):
return "{} ({})".format(self.question_name, self.id)
def delete(self, **kwargs):
"""
Delete an existing quiz question.
:calls: `DELETE /api/v1/courses/:course_id/quizzes/:quiz_id/questions/:id \
<https://canvas.instructure.com/doc/api/quiz_questions.html#method.quizzes/quiz_questions.destroy>`_
:returns: True if question was successfully deleted; False otherwise.
:rtype: bool
"""
response = self._requester.request(
"DELETE",
"courses/{}/quizzes/{}/questions/{}".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 204
def edit(self, **kwargs):
"""
Update an existing quiz question.
:calls: `PUT /api/v1/courses/:course_id/quizzes/:quiz_id/questions/:id \
<https://canvas.instructure.com/doc/api/quiz_questions.html#method.quizzes/quiz_questions.update>`_
:rtype: :class:`canvasapi.quiz.QuizQuestion`
"""
response = self._requester.request(
"PUT",
"courses/{}/quizzes/{}/questions/{}".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.course_id})
super(QuizQuestion, self).set_attributes(response_json)
return self
class QuizReport(CanvasObject):
def __str__(self):
return "{} ({})".format(self.report_type, self.id)
def abort_or_delete(self, **kwargs):
"""
This API allows you to cancel a previous request you issued for a report to be generated.
Or in the case of an already generated report, you'd like to remove it, perhaps to generate
it another time with an updated version that provides new features.
:calls: `DELETE /api/v1/courses/:course_id/quizzes/:quiz_id/reports/:id \
<https://canvas.instructure.com/doc/api/quiz_reports.html#method.quizzes/quiz_reports.abort>`_
:returns: True if attempt was successful; False otherwise
:rtype: bool
"""
response = self._requester.request(
"DELETE",
"courses/{}/quizzes/{}/reports/{}".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 204
class QuizSubmissionEvent(CanvasObject):
def __str__(self):
return "{}".format(self.event_type)
class QuizSubmissionQuestion(CanvasObject):
def __str__(self):
return "QuizSubmissionQuestion #{}".format(self.id)
def flag(self, validation_token=None, **kwargs):
"""
Set a flag on a quiz question to indicate that it should be returned to later.
:calls: `PUT /api/v1/quiz_submissions/:quiz_submission_id/questions/:id/flag \
<https://canvas.instructure.com/doc/api/quiz_submission_questions.html#method.quizzes/quiz_submission_questions.flag>`_
:param validation_token: (Optional) The unique validation token for the quiz submission.
If one is not provided, canvasapi will attempt to use `self.validation_token`.
:type validation_token: str
:returns: True if the question was successfully flagged, False otherwise.
:rtype: bool
"""
try:
kwargs["validation_token"] = validation_token or self.validation_token
except AttributeError:
raise RequiredFieldMissing(
"`validation_token` not set on this QuizSubmissionQuestion, must be passed"
" as a function argument."
)
# Only the latest attempt for a quiz submission can be updated, and Canvas
# automatically returns the latest attempt with every quiz submission response,
# so we can just use that.
kwargs["attempt"] = self.attempt
response = self._requester.request(
"PUT",
"quiz_submissions/{}/questions/{}/flag".format(
self.quiz_submission_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
question = response.json()["quiz_submission_questions"][0]
question.update(
{
"validation_token": kwargs["validation_token"],
"quiz_submission_id": self.quiz_submission_id,
}
)
super(QuizSubmissionQuestion, self).set_attributes(question)
return True
def unflag(self, validation_token=None, **kwargs):
"""
Remove a previously set flag on a quiz question.
:calls: `PUT /api/v1/quiz_submissions/:quiz_submission_id/questions/:id/unflag \
<https://canvas.instructure.com/doc/api/quiz_submission_questions.html#method.quizzes/quiz_submission_questions.unflag>`_
:param validation_token: (Optional) The unique validation token for the quiz submission.
If one is not provided, canvasapi will attempt to use `self.validation_token`.
:type validation_token: str
:returns: True if the question was successfully unflagged, False otherwise.
:rtype: bool
"""
try:
kwargs["validation_token"] = validation_token or self.validation_token
except AttributeError:
raise RequiredFieldMissing(
"`validation_token` not set on this QuizSubmissionQuestion, must be passed"
" as a function argument."
)
# Only the latest attempt for a quiz submission can be updated, and Canvas
# automatically returns the latest attempt with every quiz submission response,
# so we can just use that.
kwargs["attempt"] = self.attempt
response = self._requester.request(
"PUT",
"quiz_submissions/{}/questions/{}/unflag".format(
self.quiz_submission_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
question = response.json()["quiz_submission_questions"][0]
question.update(
{
"validation_token": kwargs["validation_token"],
"quiz_submission_id": self.quiz_submission_id,
}
)
super(QuizSubmissionQuestion, self).set_attributes(question)
return True
class QuizAssignmentOverrideSet(CanvasObject):
def __str__(self):
return "Overrides for quiz_id {}".format(self.quiz_id)
| mit | efaf37b83a15ea9e33c0c29e57300f15 | 36.961583 | 129 | 0.598638 | 3.972831 | false | false | false | false |
ucfopen/canvasapi | tests/test_outcome.py | 1 | 19528 | import unittest
import requests_mock
from canvasapi import Canvas
from canvasapi.outcome import Outcome, OutcomeGroup, OutcomeLink
from tests import settings
from tests.util import register_uris
@requests_mock.Mocker()
class TestOutcome(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"course": ["get_by_id"],
"outcome": [
"account_root_outcome_group",
"canvas_root_outcome_group",
"course_root_outcome_group",
"course_outcome_links_in_context",
"outcome_example",
],
},
m,
)
self.course = self.canvas.get_course(1)
self.course_outcome_links = self.course.get_all_outcome_links_in_context()
self.example_outcome = self.course_outcome_links[0].get_outcome()
# __str__()
def test__str__(self, m):
string = str(self.example_outcome)
self.assertIsInstance(string, str)
# update()
def test_update(self, m):
register_uris({"outcome": ["outcome_update"]}, m)
self.assertEqual(self.example_outcome.title, "Outcome Show Example")
result = self.example_outcome.update(title="new_title")
self.assertTrue(result)
self.assertIsInstance(self.example_outcome, Outcome)
self.assertEqual(self.example_outcome.title, "new_title")
@requests_mock.Mocker()
class TestOutcomeLink(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"account": ["get_by_id"],
"course": ["get_by_id"],
"outcome": [
"account_outcome_links_in_context",
"course_outcome_links_in_context",
],
},
m,
)
self.account = self.canvas.get_account(1)
self.account_outcome_links = self.account.get_all_outcome_links_in_context()
self.course = self.canvas.get_course(1)
self.course_outcome_links = self.course.get_all_outcome_links_in_context()
# __str__()
def test__str__(self, m):
register_uris({"outcome": ["course_outcome_links_in_context"]}, m)
string = str(self.course_outcome_links[0])
self.assertIsInstance(string, str)
# get_outcome()
def test_get_outcome(self, m):
register_uris(
{"outcome": ["outcome_example", "course_outcome_links_in_context"]}, m
)
result = self.course_outcome_links[0].get_outcome()
self.assertIsInstance(result, Outcome)
# get_outcome_group()
def test_get_outcome_group(self, m):
register_uris(
{
"outcome": [
"outcome_group_example_account",
"account_outcome_links_in_context",
"outcome_group_example_course",
"course_outcome_links_in_context",
]
},
m,
)
result = self.course_outcome_links[0].get_outcome_group()
self.assertIsInstance(result, OutcomeGroup)
result = self.account_outcome_links[0].get_outcome_group()
self.assertIsInstance(result, OutcomeGroup)
@requests_mock.Mocker()
class TestOutcomeGroup(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"account": ["get_by_id"],
"course": ["get_by_id"],
"outcome": [
"account_root_outcome_group",
"canvas_root_outcome_group",
"course_root_outcome_group",
"course_outcome_links_in_context",
"outcome_example",
],
},
m,
)
self.canvas_outcome_group = self.canvas.get_root_outcome_group()
self.account = self.canvas.get_account(1)
self.account_outcome_group = self.account.get_root_outcome_group()
self.account_outcome_groups = self.account.get_outcome_groups_in_context()
self.account_outcome_links = self.account.get_all_outcome_links_in_context()
self.course = self.canvas.get_course(1)
self.course_outcome_group = self.course.get_root_outcome_group()
self.course_outcome_groups = self.course.get_outcome_groups_in_context()
self.course_outcome_links = self.course.get_all_outcome_links_in_context()
self.example_outcome = self.course_outcome_links[0].get_outcome()
# __str__()
def test__str__(self, m):
string = str(self.canvas_outcome_group)
self.assertIsInstance(string, str)
# update()
def test_update(self, m):
register_uris(
{
"outcome": [
"outcome_group_update_global",
"outcome_group_update_account",
"outcome_group_update_course",
]
},
m,
)
new_title = "New Outcome Group Title"
self.assertEqual(self.account_outcome_group.title, "ROOT")
result = self.account_outcome_group.update(title=new_title)
self.assertTrue(result)
self.assertIsInstance(self.account_outcome_group, OutcomeGroup)
self.assertEqual(self.account_outcome_group.title, new_title)
self.assertEqual(self.canvas_outcome_group.title, "ROOT")
result = self.canvas_outcome_group.update(title=new_title)
self.assertTrue(result)
self.assertIsInstance(self.canvas_outcome_group, OutcomeGroup)
self.assertEqual(self.canvas_outcome_group.title, new_title)
self.assertEqual(self.course_outcome_group.title, "ROOT")
result = self.course_outcome_group.update(title=new_title)
self.assertTrue(result)
self.assertIsInstance(self.course_outcome_group, OutcomeGroup)
self.assertEqual(self.course_outcome_group.title, new_title)
# delete()
def test_delete(self, m):
register_uris(
{
"outcome": [
"outcome_group_delete_global",
"outcome_group_delete_account",
"outcome_group_delete_course",
]
},
m,
)
self.assertEqual(self.account_outcome_group.title, "ROOT")
result = self.account_outcome_group.delete()
self.assertTrue(result)
self.assertEqual(self.canvas_outcome_group.title, "ROOT")
result = self.canvas_outcome_group.delete()
self.assertTrue(result)
self.assertEqual(self.course_outcome_group.title, "ROOT")
result = self.course_outcome_group.delete()
self.assertTrue(result)
# get_linked_outcomes()
def test_get_linked_outcomes(self, m):
register_uris(
{
"outcome": [
"outcome_group_list_linked_outcomes_account",
"outcome_group_list_linked_outcomes_global",
"outcome_group_list_linked_outcomes_courses",
]
},
m,
)
result = self.account_outcome_group.get_linked_outcomes()
self.assertIsInstance(result[0], OutcomeLink)
self.assertEqual(result[0].outcome_group["id"], 2)
self.assertEqual(result[0].outcome_group["title"], "Account Test Outcome Group")
result = self.canvas_outcome_group.get_linked_outcomes()
self.assertIsInstance(result[0], OutcomeLink)
self.assertEqual(result[0].outcome_group["id"], 2)
self.assertEqual(result[0].outcome_group["title"], "Global Test Outcome Group")
result = self.course_outcome_group.get_linked_outcomes()
self.assertIsInstance(result[0], OutcomeLink)
self.assertEqual(result[0].outcome_group["id"], 2)
self.assertEqual(result[0].outcome_group["title"], "Course Test Outcome Group")
# link_existing()
def test_link_existing(self, m):
register_uris(
{
"outcome": [
"outcome_example",
"outcome_group_link_existing_global",
"outcome_group_link_existing_account",
"outcome_group_link_existing_course",
]
},
m,
)
result = self.canvas_outcome_group.link_existing(self.example_outcome)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.account_outcome_group.link_existing(self.example_outcome)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.course_outcome_group.link_existing(self.example_outcome)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.canvas_outcome_group.link_existing(3)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.account_outcome_group.link_existing(3)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
result = self.course_outcome_group.link_existing(3)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 2)
# link_new()
def test_link_new(self, m):
register_uris(
{
"outcome": [
"outcome_group_link_new_global",
"outcome_group_link_new_account",
"outcome_group_link_new_course",
]
},
m,
)
new_title = "New Outcome"
result = self.canvas_outcome_group.link_new(title=new_title)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 1)
self.assertEqual(result.outcome["id"], 2)
self.assertEqual(result.outcome["context_type"], None)
result = self.account_outcome_group.link_new(title=new_title)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 1)
self.assertEqual(result.outcome["id"], 2)
self.assertEqual(result.outcome["context_type"], "Account")
result = self.course_outcome_group.link_new(title=new_title)
self.assertIsInstance(result, OutcomeLink)
self.assertEqual(result.outcome_group["id"], 1)
self.assertEqual(result.outcome["id"], 2)
self.assertEqual(result.outcome["context_type"], "Course")
# unlink_outcome()
def test_unlink_outcome(self, m):
register_uris(
{
"outcome": [
"outcome_example",
"outcome_group_unlink_outcome_global",
"outcome_group_unlink_outcome_account",
"outcome_group_unlink_outcome_course",
]
},
m,
)
result = self.canvas_outcome_group.unlink_outcome(self.example_outcome)
self.assertTrue(result)
result = self.account_outcome_group.unlink_outcome(self.example_outcome)
self.assertTrue(result)
result = self.course_outcome_group.unlink_outcome(self.example_outcome)
self.assertTrue(result)
result = self.canvas_outcome_group.unlink_outcome(3)
self.assertTrue(result)
result = self.account_outcome_group.unlink_outcome(3)
self.assertTrue(result)
result = self.course_outcome_group.unlink_outcome(3)
self.assertTrue(result)
# get_subgroups()
def test_get_subgroups(self, m):
register_uris(
{
"outcome": [
"outcome_group_list_subgroups_global",
"outcome_group_list_subgroups_account",
"outcome_group_list_subgroups_course",
]
},
m,
)
result = self.canvas_outcome_group.get_subgroups()
self.assertIsInstance(result[0], OutcomeGroup)
self.assertEqual(result[0].id, 2)
self.assertEqual(result[0].title, "Global Listed Subgroup Title 1")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, None)
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, None)
self.assertIsInstance(result[1], OutcomeGroup)
self.assertEqual(result[1].id, 3)
self.assertEqual(result[1].title, "Global Listed Subgroup Title 2")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, None)
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, None)
result = self.account_outcome_group.get_subgroups()
self.assertIsInstance(result[0], OutcomeGroup)
self.assertEqual(result[0].id, 2)
self.assertEqual(result[0].title, "Account Listed Subgroup Title 1")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Account")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.account.id)
self.assertIsInstance(result[1], OutcomeGroup)
self.assertEqual(result[1].id, 3)
self.assertEqual(result[1].title, "Account Listed Subgroup Title 2")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Account")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.account.id)
result = self.course_outcome_group.get_subgroups()
self.assertIsInstance(result[0], OutcomeGroup)
self.assertEqual(result[0].id, 2)
self.assertEqual(result[0].title, "Course Listed Subgroup Title 1")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Course")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.course.id)
self.assertIsInstance(result[1], OutcomeGroup)
self.assertEqual(result[1].id, 3)
self.assertEqual(result[1].title, "Course Listed Subgroup Title 2")
self.assertTrue(hasattr(result[0], "context_type"))
self.assertEqual(result[0].context_type, "Course")
self.assertTrue(hasattr(result[0], "context_id"))
self.assertEqual(result[0].context_id, self.course.id)
# create_subgroup()
def test_create_subgroup(self, m):
register_uris(
{
"outcome": [
"outcome_group_create_subgroup_global",
"outcome_group_create_subgroup_account",
"outcome_group_create_subgroup_course",
]
},
m,
)
new_title = "New Subgroup Title"
result = self.canvas_outcome_group.create_subgroup(new_title)
self.assertEqual(
self.canvas_outcome_group.id, result.parent_outcome_group["id"]
)
self.assertEqual(result.parent_outcome_group["title"], "Parent of Subgroup")
self.assertEqual(result.title, "New Subgroup Title")
result = self.account_outcome_group.create_subgroup(new_title)
self.assertEqual(
self.canvas_outcome_group.id, result.parent_outcome_group["id"]
)
self.assertEqual(result.parent_outcome_group["title"], "Parent of Subgroup")
self.assertEqual(result.title, "New Subgroup Title")
result = self.course_outcome_group.create_subgroup(new_title)
self.assertEqual(
self.canvas_outcome_group.id, result.parent_outcome_group["id"]
)
self.assertEqual(result.parent_outcome_group["title"], "Parent of Subgroup")
self.assertEqual(result.title, "New Subgroup Title")
# import_outcome_group()
def test_import_outcome_group(self, m):
register_uris(
{
"outcome": [
"outcome_group_import_outcome_group_global",
"outcome_group_import_outcome_group_account",
"outcome_group_import_outcome_group_course",
]
},
m,
)
result = self.canvas_outcome_group.import_outcome_group(3)
self.assertEqual(result.id, 4)
self.assertEqual(result.title, "Global Imported Subgroup Title")
self.assertEqual(
result.parent_outcome_group["id"], self.canvas_outcome_group.id
)
self.assertEqual(
result.parent_outcome_group["title"], self.canvas_outcome_group.title
)
result = self.account_outcome_group.import_outcome_group(3)
self.assertEqual(result.id, 4)
self.assertEqual(result.title, "Account Imported Subgroup Title")
self.assertEqual(
result.parent_outcome_group["id"], self.account_outcome_group.id
)
self.assertEqual(
result.parent_outcome_group["title"], self.account_outcome_group.title
)
result = self.course_outcome_group.import_outcome_group(3)
self.assertEqual(result.id, 4)
self.assertEqual(result.title, "Course Imported Subgroup Title")
self.assertEqual(
result.parent_outcome_group["id"], self.course_outcome_group.id
)
self.assertEqual(
result.parent_outcome_group["title"], self.course_outcome_group.title
)
result_by_obj = self.course_outcome_group.import_outcome_group(result)
self.assertEqual(result_by_obj.id, 4)
self.assertEqual(result_by_obj.title, "Course Imported Subgroup Title")
self.assertEqual(
result_by_obj.parent_outcome_group["id"], self.course_outcome_group.id
)
self.assertEqual(
result_by_obj.parent_outcome_group["title"], self.course_outcome_group.title
)
@requests_mock.Mocker()
class TestOutcomeResult(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"course": ["get_by_id"],
"outcome": ["outcome_example", "outcome_result_example"],
},
m,
)
self.course = self.canvas.get_course(1)
self.course_outcome_results = self.course.get_outcome_results()
self.outcome_result_example = self.course_outcome_results[0]
# self.example_outcome = self.course_outcome_links[0].get_outcome()
# __str__()
def test__str__(self, m):
string = str(self.outcome_result_example)
self.assertIsInstance(string, str)
| mit | 36f83dbe20aa4c4913dfc0b08cfed609 | 37.440945 | 88 | 0.586747 | 4.053134 | false | true | false | false |
ucfopen/canvasapi | tests/test_assignment.py | 1 | 17832 | import unittest
import uuid
from pathlib import Path
import requests_mock
from canvasapi import Canvas
from canvasapi.assignment import (
Assignment,
AssignmentExtension,
AssignmentGroup,
AssignmentOverride,
)
from canvasapi.exceptions import CanvasException, RequiredFieldMissing
from canvasapi.grade_change_log import GradeChangeEvent
from canvasapi.paginated_list import PaginatedList
from canvasapi.peer_review import PeerReview
from canvasapi.progress import Progress
from canvasapi.submission import Submission
from canvasapi.user import User, UserDisplay
from tests import settings
from tests.util import cleanup_file, register_uris
@requests_mock.Mocker()
class TestAssignment(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({"course": ["get_by_id", "get_assignment_by_id"]}, m)
self.course = self.canvas.get_course(1)
self.assignment = self.course.get_assignment(1)
def test__init__overrides(self, m):
register_uris({"assignment": ["get_assignment_with_overrides"]}, m)
assignment = self.course.get_assignment(1)
self.assertTrue(hasattr(assignment, "overrides"))
self.assertIsInstance(assignment.overrides, list)
self.assertEqual(len(assignment.overrides), 1)
self.assertIsInstance(assignment.overrides[0], AssignmentOverride)
# create_override()
def test_create_override(self, m):
register_uris({"assignment": ["create_override"]}, m)
override = self.assignment.create_override(
assignment_override={
"student_ids": [1, 2, 3],
"title": "New Assignment Override",
}
)
self.assertIsInstance(override, AssignmentOverride)
self.assertEqual(override.title, "New Assignment Override")
# delete()
def test_delete_assignments(self, m):
register_uris({"assignment": ["delete_assignment"]}, m)
deleted_assignment = self.assignment.delete()
self.assertIsInstance(deleted_assignment, Assignment)
# edit()
def test_edit_assignment(self, m):
register_uris({"assignment": ["edit_assignment"]}, m)
name = "New Name"
edited_assignment = self.assignment.edit(assignment={"name": name})
self.assertIsInstance(edited_assignment, Assignment)
self.assertTrue(hasattr(edited_assignment, "name"))
self.assertEqual(edited_assignment.name, name)
# get_gradeable_students()
def test_get_gradeable_students(self, m):
register_uris({"course": ["list_gradeable_students"]}, m)
students = self.assignment.get_gradeable_students()
student_list = [student for student in students]
self.assertEqual(len(student_list), 2)
self.assertIsInstance(student_list[0], UserDisplay)
# get_grade_change_events()
def test_get_grade_change_events(self, m):
register_uris({"assignment": ["get_grade_change_events"]}, m)
response = self.assignment.get_grade_change_events()
self.assertIsInstance(response, PaginatedList)
self.assertEqual(len([event for event in response]), 2)
for event in response:
self.assertEqual(event.links["course"], self.assignment.id)
self.assertIsInstance(event, GradeChangeEvent)
self.assertEqual(event.event_type, "grade_change")
# get_override()
def test_get_override(self, m):
register_uris({"assignment": ["get_assignment_override"]}, m)
override = self.assignment.get_override(1)
self.assertIsInstance(override, AssignmentOverride)
# get_overrides()
def test_get_overrides(self, m):
register_uris(
{
"assignment": [
"list_assignment_overrides",
"list_assignment_overrides_p2",
]
},
m,
)
overrides = self.assignment.get_overrides()
override_list = [override for override in overrides]
self.assertEqual(len(override_list), 4)
self.assertIsInstance(override_list[0], AssignmentOverride)
self.assertIsInstance(override_list[3], AssignmentOverride)
# get_peer_reviews()
def test_get_peer_reviews(self, m):
register_uris({"assignment": ["list_peer_reviews"]}, m)
peer_reviews = self.assignment.get_peer_reviews()
peer_review_list = [peer_review for peer_review in peer_reviews]
self.assertEqual(len(peer_review_list), 2)
self.assertIsInstance(peer_review_list[0], PeerReview)
# get_students_selected_for_moderation()
def test_get_students_selected_for_moderation(self, m):
register_uris({"assignment": ["get_students_selected_moderation"]}, m)
selected_students = self.assignment.get_students_selected_for_moderation()
selected_student_list = list(selected_students)
self.assertEqual(len(selected_student_list), 2)
self.assertIsInstance(selected_student_list[0], User)
# get_submission()
def test_get_submission(self, m):
register_uris({"submission": ["get_by_id_course"], "user": ["get_by_id"]}, m)
user_id = 1
submission_by_id = self.assignment.get_submission(user_id)
self.assertIsInstance(submission_by_id, Submission)
self.assertTrue(hasattr(submission_by_id, "submission_type"))
user = self.canvas.get_user(user_id)
submission_by_obj = self.assignment.get_submission(user)
self.assertIsInstance(submission_by_obj, Submission)
self.assertTrue(hasattr(submission_by_obj, "submission_type"))
# get_submissions()
def test_get_submissions(self, m):
register_uris({"submission": ["list_submissions"]}, m)
submissions = self.assignment.get_submissions()
submission_list_by_id = [submission for submission in submissions]
self.assertEqual(len(submission_list_by_id), 2)
self.assertIsInstance(submission_list_by_id[0], Submission)
# set_extensions()
def test_set_extensions(self, m):
register_uris({"assignment": ["set_extensions"]}, m)
extension = self.assignment.set_extensions(
[{"user_id": 3, "extra_attempts": 2}, {"user_id": 2, "extra_attempts": 2}]
)
self.assertIsInstance(extension, list)
self.assertEqual(len(extension), 2)
self.assertIsInstance(extension[0], AssignmentExtension)
self.assertEqual(extension[0].user_id, 3)
self.assertTrue(hasattr(extension[0], "extra_attempts"))
self.assertEqual(extension[0].extra_attempts, 2)
self.assertIsInstance(extension[1], AssignmentExtension)
self.assertEqual(extension[1].user_id, 2)
self.assertTrue(hasattr(extension[1], "extra_attempts"))
self.assertEqual(extension[1].extra_attempts, 2)
def test_set_extensions_not_list(self, m):
with self.assertRaises(ValueError):
self.assignment.set_extensions({"user_id": 3, "exrra_attempts": 2})
def test_set_extensions_empty_list(self, m):
with self.assertRaises(ValueError):
self.assignment.set_extensions([])
def test_set_extensions_non_dicts(self, m):
with self.assertRaises(ValueError):
self.assignment.set_extensions([("user_id", 1), ("extra_attempts", 2)])
def test_set_extensions_missing_key(self, m):
with self.assertRaises(RequiredFieldMissing):
self.assignment.set_extensions([{"extra_attempts": 3}])
# submit()
def test_submit(self, m):
register_uris({"assignment": ["submit"]}, m)
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
submission = self.assignment.submit(sub_dict)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, "submission_type"))
self.assertEqual(submission.submission_type, sub_type)
def test_submit_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
self.assignment.submit({})
def test_submit_file(self, m):
register_uris({"assignment": ["submit", "upload", "upload_final"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
try:
with open(filename, "w+") as file:
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
submission = self.assignment.submit(sub_dict, file)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, "submission_type"))
self.assertEqual(submission.submission_type, sub_type)
finally:
cleanup_file(filename)
def test_submit_file_pathlib(self, m):
register_uris({"assignment": ["submit", "upload", "upload_final"]}, m)
filename = Path("testfile_assignment_{}".format(uuid.uuid4().hex))
filename.write_bytes(b"test data")
try:
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
submission = self.assignment.submit(sub_dict, filename)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, "submission_type"))
self.assertEqual(submission.submission_type, sub_type)
finally:
cleanup_file(filename)
def test_submit_file_wrong_type(self, m):
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
sub_type = "online_text_entry"
sub_dict = {"submission_type": sub_type}
with self.assertRaises(ValueError):
self.assignment.submit(sub_dict, filename)
def test_submit_file_upload_failure(self, m):
register_uris({"assignment": ["submit", "upload", "upload_fail"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
try:
with open(filename, "w+") as file:
sub_type = "online_upload"
sub_dict = {"submission_type": sub_type}
with self.assertRaises(CanvasException):
self.assignment.submit(sub_dict, file)
finally:
cleanup_file(filename)
# __str__()
def test__str__(self, m):
string = str(self.assignment)
self.assertIsInstance(string, str)
# submissions_bulk_update()
def test_submissions_bulk_update(self, m):
register_uris({"assignment": ["update_submissions"]}, m)
register_uris({"progress": ["course_progress"]}, m)
progress = self.assignment.submissions_bulk_update(
grade_data={"1": {"posted_grade": 97}, "2": {"posted_grade": 98}}
)
self.assertIsInstance(progress, Progress)
self.assertTrue(progress.context_type == "Course")
progress = progress.query()
self.assertTrue(progress.context_type == "Course")
# upload_to_submission()
def test_upload_to_submission_self(self, m):
register_uris({"assignment": ["upload", "upload_final"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
try:
with open(filename, "w+") as file:
response = self.assignment.upload_to_submission(file)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn("url", response[1])
finally:
cleanup_file(filename)
def test_upload_to_submission_user(self, m):
register_uris({"assignment": ["upload_by_id", "upload_final"]}, m)
filename = "testfile_assignment_{}".format(uuid.uuid4().hex)
user_id = 1
try:
with open(filename, "w+") as file:
response = self.assignment.upload_to_submission(file, user_id)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn("url", response[1])
finally:
cleanup_file(filename)
# get_provisional_grades_status
def test_get_provisional_grades_status(self, m):
register_uris(
{"assignment": ["get_provisional_grades_status"], "user": ["get_by_id"]}, m
)
student_id = 1
user = self.canvas.get_user(student_id)
status = self.assignment.get_provisional_grades_status(user)
self.assertIsInstance(status, bool)
self.assertFalse(status)
# select_students_for_moderation()
def test_select_students_for_moderation(self, m):
register_uris({"assignment": ["select_students_for_moderation"]}, m)
selected_students = self.assignment.select_students_for_moderation(
student_ids=[11, 12]
)
selected_student_list = list(selected_students)
self.assertEqual(len(selected_student_list), 2)
self.assertIsInstance(selected_student_list[0], User)
# selected_provisional_grade
def test_selected_provisional_grade(self, m):
register_uris({"assignment": ["selected_provisional_grade"]}, m)
provisional_grade_id = 1
selected_provisional_grade = self.assignment.selected_provisional_grade(
provisional_grade_id
)
self.assertIsInstance(selected_provisional_grade, dict)
self.assertIn("assignment_id", selected_provisional_grade)
# publish_provisional_grades
def test_publish_provisional_grades(self, m):
register_uris({"assignment": ["publish_provisional_grades"]}, m)
publish = self.assignment.publish_provisional_grades()
self.assertIsInstance(publish, dict)
self.assertIn("message", publish)
# show_provisional_grades_for_student
def test_show_provisonal_grades_for_student(self, m):
register_uris(
{
"assignment": ["show_provisonal_grades_for_student"],
"user": ["get_by_id"],
},
m,
)
anonymous_id = 1
user = self.canvas.get_user(anonymous_id)
show_status = self.assignment.show_provisonal_grades_for_student(user)
self.assertIsInstance(show_status, bool)
self.assertFalse(show_status)
@requests_mock.Mocker()
class TestAssignmentExtension(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.extension = AssignmentExtension(
self.canvas._Canvas__requester,
{"assignment_id": 2, "user_id": 3, "extra_attempts": 2},
)
# __str__()
def test__str__(self, m):
string = str(self.extension)
self.assertIsInstance(string, str)
@requests_mock.Mocker()
class TestAssignmentGroup(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{"course": ["get_by_id"], "assignment": ["get_assignment_group"]}, m
)
self.course = self.canvas.get_course(1)
self.assignment_group = self.course.get_assignment_group(5)
# edit()
def test_edit_assignment_group(self, m):
register_uris({"assignment": ["edit_assignment_group"]}, m)
name = "New Name"
edited_assignment_group = self.assignment_group.edit(
assignment_group={"name": name}
)
self.assertIsInstance(edited_assignment_group, AssignmentGroup)
self.assertTrue(hasattr(edited_assignment_group, "name"))
self.assertEqual(edited_assignment_group.name, name)
# delete()
def test_delete_assignment_group(self, m):
register_uris({"assignment": ["delete_assignment_group"]}, m)
deleted_assignment_group = self.assignment_group.delete()
self.assertIsInstance(deleted_assignment_group, AssignmentGroup)
self.assertTrue(hasattr(deleted_assignment_group, "name"))
self.assertEqual(deleted_assignment_group.name, "Assignment Group 5")
# __str__()
def test__str__(self, m):
string = str(self.assignment_group)
self.assertIsInstance(string, str)
@requests_mock.Mocker()
class TestAssignmentOverride(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris(
{
"course": ["get_by_id", "get_assignment_by_id"],
"assignment": ["get_assignment_override"],
},
m,
)
self.course = self.canvas.get_course(1)
self.assignment = self.course.get_assignment(1)
self.assignment_override = self.assignment.get_override(1)
# __str__()
def test__str__(self, m):
string = str(self.assignment_override)
self.assertIsInstance(string, str)
self.assertEqual(string, "Assignment Override 1 (1)")
# delete()
def test_delete(self, m):
register_uris({"assignment": ["delete_override"]}, m)
deleted = self.assignment_override.delete()
self.assertIsInstance(deleted, AssignmentOverride)
self.assertEqual(deleted.id, self.assignment_override.id)
# edit()
def test_edit(self, m):
register_uris({"assignment": ["edit_override"]}, m)
edited = self.assignment_override.edit(
assignment_override={
"title": "New Title",
"student_ids": self.assignment_override.student_ids,
}
)
self.assertEqual(edited, self.assignment_override)
self.assertIsInstance(self.assignment_override, AssignmentOverride)
self.assertEqual(edited.title, "New Title")
| mit | de6dc07af8b670ae2ab5b1898f35a39d | 35.024242 | 87 | 0.629318 | 4.081483 | false | true | false | false |
ucfopen/canvasapi | canvasapi/grading_period.py | 1 | 2156 | from canvasapi.canvas_object import CanvasObject
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.util import combine_kwargs
class GradingPeriod(CanvasObject):
def __str__(self):
return "{} ({})".format(self.title, self.id)
def delete(self, **kwargs):
"""
Delete a grading period for a course.
:calls: `DELETE /api/v1/courses/:course_id/grading_periods/:id \
<https://canvas.instructure.com/doc/api/grading_periods.html#method.grading_periods.update>`_
:returns: Status code 204 if delete was successful
:rtype: int
"""
response = self._requester.request(
"DELETE",
"courses/{}/grading_periods/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code
def update(self, grading_period, **kwargs):
"""
Update a grading period for a course.
:calls: `PUT /api/v1/courses/:course_id/grading_periods/:id \
<https://canvas.instructure.com/doc/api/grading_periods.html#method.grading_periods.update>`_
:param grading_period: List of nested paramameters.
:type grading_period: list[dict]
:rtype: :class:`canvasapi.grading_period.GradingPeriod`
"""
if isinstance(grading_period, list):
kwargs["grading_periods"] = grading_period
else:
raise RequiredFieldMissing("List is required")
if "start_date" not in kwargs["grading_periods"][0]:
raise RequiredFieldMissing("start_date is missing")
if "end_date" not in kwargs["grading_periods"][0]:
raise RequiredFieldMissing("end_date is missing")
response = self._requester.request(
"PUT",
"courses/{}/grading_periods/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
grading_period = response_json["grading_periods"][0]
grading_period.update({"course_id": self.course_id})
return GradingPeriod(self._requester, grading_period)
| mit | d120ee487732440fe51adfcefe5652d8 | 34.344262 | 101 | 0.624304 | 4.138196 | false | false | false | false |
dj-stripe/dj-stripe | djstripe/migrations/0001_initial.py | 1 | 209454 | # Generated by Django 3.2.11 on 2022-01-17 03:13
import uuid
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import djstripe.enums
import djstripe.fields
import djstripe.models.api
import djstripe.models.webhooks
DJSTRIPE_SUBSCRIBER_MODEL: str = getattr(
settings, "DJSTRIPE_SUBSCRIBER_MODEL", settings.AUTH_USER_MODEL
) # type: ignore
# Needed here for external apps that have added the DJSTRIPE_SUBSCRIBER_MODEL
# *not* in the '__first__' migration of the app, which results in:
# ValueError: Related model 'DJSTRIPE_SUBSCRIBER_MODEL' cannot be resolved
# Context: https://github.com/dj-stripe/dj-stripe/issues/707
DJSTRIPE_SUBSCRIBER_MODEL_MIGRATION_DEPENDENCY = getattr(
settings, "DJSTRIPE_SUBSCRIBER_MODEL_MIGRATION_DEPENDENCY", "__first__"
)
DJSTRIPE_SUBSCRIBER_MODEL_DEPENDENCY = migrations.swappable_dependency(
DJSTRIPE_SUBSCRIBER_MODEL
)
if DJSTRIPE_SUBSCRIBER_MODEL != settings.AUTH_USER_MODEL:
DJSTRIPE_SUBSCRIBER_MODEL_DEPENDENCY = migrations.migration.SwappableTuple(
(
DJSTRIPE_SUBSCRIBER_MODEL.split(".", 1)[0],
DJSTRIPE_SUBSCRIBER_MODEL_MIGRATION_DEPENDENCY,
),
DJSTRIPE_SUBSCRIBER_MODEL,
)
class Migration(migrations.Migration):
initial = True
dependencies = [DJSTRIPE_SUBSCRIBER_MODEL_DEPENDENCY]
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("business_profile", djstripe.fields.JSONField(blank=True, null=True)),
(
"business_type",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.BusinessType,
max_length=10,
),
),
(
"charges_enabled",
models.BooleanField(
help_text="Whether the account can create live charges"
),
),
(
"country",
models.CharField(
help_text="The country of the account", max_length=2
),
),
("company", djstripe.fields.JSONField(blank=True, null=True)),
(
"default_currency",
djstripe.fields.StripeCurrencyCodeField(max_length=3),
),
(
"details_submitted",
models.BooleanField(
help_text="Whether account details have been submitted. Standard accounts cannot receive payouts before this is true."
),
),
(
"email",
models.CharField(
help_text="The primary user's email address.", max_length=255
),
),
("individual", djstripe.fields.JSONField(blank=True, null=True)),
(
"payouts_enabled",
models.BooleanField(
help_text="Whether Stripe can send payouts to this account",
null=True,
),
),
(
"product_description",
models.CharField(
blank=True,
default="",
help_text="Internal-only description of the product sold or service provided by the business. It's used by Stripe for risk and underwriting purposes.",
max_length=255,
),
),
("requirements", djstripe.fields.JSONField(blank=True, null=True)),
("settings", djstripe.fields.JSONField(blank=True, null=True)),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.AccountType, max_length=8
),
),
("tos_acceptance", djstripe.fields.JSONField(blank=True, null=True)),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Charge",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_refunded",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"captured",
models.BooleanField(
default=False,
help_text="If the charge was created without capturing, this boolean represents whether or not it is still uncaptured or has since been captured.",
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"failure_code",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.ApiErrorCode,
max_length=42,
),
),
(
"failure_message",
models.TextField(
blank=True,
default="",
help_text="Message to user further explaining reason for charge failure if available.",
max_length=5000,
),
),
("fraud_details", djstripe.fields.JSONField(blank=True, null=True)),
("outcome", djstripe.fields.JSONField(blank=True, null=True)),
(
"paid",
models.BooleanField(
default=False,
help_text="True if the charge succeeded, or was successfully authorized for later capture, False otherwise.",
),
),
(
"payment_method_details",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"receipt_email",
models.TextField(
blank=True,
default="",
help_text="The email address that the receipt for this charge was sent to.",
max_length=800,
),
),
(
"receipt_number",
models.CharField(
blank=True,
default="",
help_text="The transaction number that appears on email receipts sent for this charge.",
max_length=14,
),
),
(
"receipt_url",
models.TextField(
blank=True,
default="",
help_text="This is the URL to view the receipt for this charge. The receipt is kept up-to-date to the latest state of the charge, including any refunds. If the charge is for an Invoice, the receipt will be stylized as an Invoice receipt.",
max_length=5000,
),
),
(
"refunded",
models.BooleanField(
default=False,
help_text="Whether or not the charge has been fully refunded. If the charge is only partially refunded, this attribute will still be false.",
),
),
("shipping", djstripe.fields.JSONField(blank=True, null=True)),
(
"statement_descriptor",
models.CharField(
blank=True,
help_text="For card charges, use statement_descriptor_suffix instead. Otherwise, you can use this value as the complete description of a charge on your customers' statements. Must contain at least one letter, maximum 22 characters.",
max_length=22,
null=True,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ChargeStatus, max_length=9
),
),
(
"transfer_group",
models.CharField(
blank=True,
help_text="A string that identifies this transaction as part of a group.",
max_length=255,
null=True,
),
),
(
"on_behalf_of",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="charges",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The account (if any) the charge was made on behalf of without triggering an automatic transfer.",
),
),
(
"amount_captured",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"application",
models.CharField(
blank=True,
help_text="ID of the Connect application that created the charge.",
max_length=255,
),
),
(
"application_fee_amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"billing_details",
djstripe.fields.JSONField(null=True),
),
(
"calculated_statement_descriptor",
models.CharField(
default="",
help_text="The full statement descriptor that is passed to card networks, and that is displayed on your customers' credit card and bank statements. Allows you to see what the statement descriptor looks like after the static and dynamic portions are combined.",
max_length=22,
),
),
(
"disputed",
models.BooleanField(
default=False, help_text="Whether the charge has been disputed."
),
),
(
"statement_descriptor_suffix",
models.CharField(
blank=True,
help_text="Provides information about the charge that customers see on their statements. Concatenated with the prefix (shortened descriptor) or statement descriptor that's set on the account to form the complete statement descriptor. Maximum 22 characters for the concatenated descriptor.",
max_length=22,
null=True,
),
),
(
"transfer_data",
djstripe.fields.JSONField(blank=True, null=True),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Coupon",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("id", djstripe.fields.StripeIdField(max_length=500)),
(
"amount_off",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"currency",
djstripe.fields.StripeCurrencyCodeField(
blank=True, max_length=3, null=True
),
),
(
"duration",
djstripe.fields.StripeEnumField(
default="once", enum=djstripe.enums.CouponDuration, max_length=9
),
),
(
"duration_in_months",
models.PositiveIntegerField(
blank=True,
help_text="If `duration` is `repeating`, the number of months the coupon applies.",
null=True,
),
),
(
"max_redemptions",
models.PositiveIntegerField(
blank=True,
help_text="Maximum number of times this coupon can be redeemed, in total, before it is no longer valid.",
null=True,
),
),
(
"percent_off",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
(
"redeem_by",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"times_redeemed",
models.PositiveIntegerField(
default=0,
editable=False,
help_text="Number of times this coupon has been applied to a customer.",
),
),
(
"name",
models.TextField(
blank=True,
default="",
help_text="Name of the coupon displayed to customers on for instance invoices or receipts.",
max_length=5000,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={
"get_latest_by": "created",
"unique_together": {("id", "livemode")},
},
),
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("billing_details", djstripe.fields.JSONField()),
("card", djstripe.fields.JSONField(blank=True, null=True)),
("card_present", djstripe.fields.JSONField(blank=True, null=True)),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PaymentMethodType, max_length=15
),
),
("alipay", djstripe.fields.JSONField(blank=True, null=True)),
("au_becs_debit", djstripe.fields.JSONField(blank=True, null=True)),
("bacs_debit", djstripe.fields.JSONField(blank=True, null=True)),
("bancontact", djstripe.fields.JSONField(blank=True, null=True)),
("eps", djstripe.fields.JSONField(blank=True, null=True)),
("fpx", djstripe.fields.JSONField(blank=True, null=True)),
("giropay", djstripe.fields.JSONField(blank=True, null=True)),
("ideal", djstripe.fields.JSONField(blank=True, null=True)),
("interac_present", djstripe.fields.JSONField(blank=True, null=True)),
("oxxo", djstripe.fields.JSONField(blank=True, null=True)),
("p24", djstripe.fields.JSONField(blank=True, null=True)),
("sepa_debit", djstripe.fields.JSONField(blank=True, null=True)),
("sofort", djstripe.fields.JSONField(blank=True, null=True)),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Customer",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"balance",
djstripe.fields.StripeQuantumCurrencyAmountField(default=0),
),
(
"currency",
djstripe.fields.StripeCurrencyCodeField(
blank=True, default="", max_length=3
),
),
(
"delinquent",
models.BooleanField(
default=False,
help_text="Whether or not the latest charge for the customer's latest invoice has failed.",
),
),
(
"coupon_start",
djstripe.fields.StripeDateTimeField(
blank=True, editable=False, null=True
),
),
(
"coupon_end",
djstripe.fields.StripeDateTimeField(
blank=True, editable=False, null=True
),
),
("email", models.TextField(blank=True, default="", max_length=5000)),
("shipping", djstripe.fields.JSONField(blank=True, null=True)),
("date_purged", models.DateTimeField(editable=False, null=True)),
(
"coupon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.coupon",
),
),
(
"default_source",
djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="customers",
to="djstripe.paymentmethod",
),
),
(
"subscriber",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="djstripe_customers",
to=DJSTRIPE_SUBSCRIBER_MODEL,
),
),
("address", djstripe.fields.JSONField(blank=True, null=True)),
(
"invoice_prefix",
models.CharField(
blank=True,
default="",
help_text="The prefix for the customer used to generate unique invoice numbers.",
max_length=255,
),
),
("invoice_settings", djstripe.fields.JSONField(blank=True, null=True)),
(
"name",
models.TextField(
blank=True,
default="",
help_text="The customer's full name or business name.",
max_length=5000,
),
),
(
"phone",
models.TextField(
blank=True,
default="",
help_text="The customer's phone number.",
max_length=5000,
),
),
("preferred_locales", djstripe.fields.JSONField(blank=True, null=True)),
(
"tax_exempt",
djstripe.fields.StripeEnumField(
default="", enum=djstripe.enums.CustomerTaxExempt, max_length=7
),
),
(
"default_payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="default payment method used for subscriptions and invoices for the customer.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={
"get_latest_by": "created",
"unique_together": {
("subscriber", "livemode", "djstripe_owner_account")
},
},
),
migrations.CreateModel(
name="Dispute",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("evidence", djstripe.fields.JSONField()),
("evidence_details", djstripe.fields.JSONField()),
(
"is_charge_refundable",
models.BooleanField(
help_text="If true, it is still possible to refund the disputed payment. Once the payment has been fully refunded, no further funds will be withdrawn from your Stripe account as a result of this dispute."
),
),
(
"reason",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.DisputeReason, max_length=25
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.DisputeStatus, max_length=22
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Event",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"api_version",
models.CharField(
blank=True,
help_text="the API version at which the event data was rendered. Blank for old entries only, all new entries will have this value",
max_length=15,
),
),
("data", djstripe.fields.JSONField()),
(
"request_id",
models.CharField(
blank=True,
default="",
help_text="Information about the request that triggered this event, for traceability purposes. If empty string then this is an old entry without that data. If Null then this is not an old entry, but a Stripe 'automated' event with no associated request.",
max_length=50,
),
),
("idempotency_key", models.TextField(blank=True, default="")),
(
"type",
models.CharField(
help_text="Stripe's event description code", max_length=250
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="FileUpload",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"filename",
models.CharField(
help_text="A filename for the file, suitable for saving to a filesystem.",
max_length=255,
),
),
(
"purpose",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.FilePurpose, max_length=35
),
),
(
"size",
models.IntegerField(
help_text="The size in bytes of the file upload object."
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.FileType, max_length=4
),
),
(
"url",
models.CharField(
help_text="A read-only URL where the uploaded file can be accessed.",
max_length=200,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="DjstripePaymentMethod",
fields=[
(
"id",
models.CharField(max_length=255, primary_key=True, serialize=False),
),
("type", models.CharField(db_index=True, max_length=50)),
],
),
migrations.CreateModel(
name="Plan",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"active",
models.BooleanField(
help_text="Whether the plan can be used for new purchases."
),
),
(
"aggregate_usage",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.PlanAggregateUsage,
max_length=18,
),
),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"amount_decimal",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=12, max_digits=19, null=True
),
),
(
"billing_scheme",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.BillingScheme,
max_length=8,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"interval",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PlanInterval, max_length=5
),
),
(
"interval_count",
models.PositiveIntegerField(
blank=True,
help_text="The number of intervals (specified in the interval property) between each subscription billing.",
null=True,
),
),
(
"nickname",
models.TextField(
blank=True,
default="",
help_text="A brief description of the plan, hidden from customers.",
max_length=5000,
),
),
("tiers", djstripe.fields.JSONField(blank=True, null=True)),
(
"tiers_mode",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.PriceTiersMode,
max_length=9,
null=True,
),
),
("transform_usage", djstripe.fields.JSONField(blank=True, null=True)),
(
"trial_period_days",
models.IntegerField(
blank=True,
help_text="Number of trial period days granted when subscribing a customer to this plan. Null if the plan has no trial period.",
null=True,
),
),
(
"usage_type",
djstripe.fields.StripeEnumField(
default="licensed",
enum=djstripe.enums.PriceUsageType,
max_length=8,
),
),
],
options={"ordering": ["amount"]},
),
migrations.CreateModel(
name="Product",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"name",
models.TextField(
help_text="The product's name, meant to be displayable to the customer. Applicable to both `service` and `good` types.",
max_length=5000,
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ProductType, max_length=7
),
),
(
"active",
models.BooleanField(
help_text="Whether the product is currently available for purchase. Only applicable to products of `type=good`.",
null=True,
),
),
("attributes", djstripe.fields.JSONField(blank=True, null=True)),
(
"caption",
models.TextField(
blank=True,
default="",
help_text="A short one-line description of the product, meant to be displayableto the customer. Only applicable to products of `type=good`.",
max_length=5000,
),
),
("deactivate_on", djstripe.fields.JSONField(blank=True, null=True)),
("images", djstripe.fields.JSONField(blank=True, null=True)),
(
"package_dimensions",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"shippable",
models.BooleanField(
blank=True,
help_text="Whether this product is a shipped good. Only applicable to products of `type=good`.",
null=True,
),
),
(
"url",
models.CharField(
blank=True,
help_text="A URL of a publicly-accessible webpage for this product. Only applicable to products of `type=good`.",
max_length=799,
null=True,
),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="Extra information about a product which will appear on your customer's credit card statement. In the case that multiple products are billed at once, the first statement descriptor will be used. Only available on products of type=`service`.",
max_length=22,
),
),
("unit_label", models.CharField(blank=True, default="", max_length=12)),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"application_fee_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0),
],
),
),
(
"collection_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.InvoiceCollectionMethod, max_length=20
),
),
(
"billing_cycle_anchor",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"cancel_at_period_end",
models.BooleanField(
default=False,
help_text="If the subscription has been canceled with the ``at_period_end`` flag set to true, ``cancel_at_period_end`` on the subscription will be true. You can use this attribute to determine whether a subscription that has a status of active is scheduled to be canceled at the end of the current period.",
),
),
(
"canceled_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
("current_period_end", djstripe.fields.StripeDateTimeField()),
("current_period_start", djstripe.fields.StripeDateTimeField()),
(
"days_until_due",
models.IntegerField(
blank=True,
help_text="Number of days a customer has to pay invoices generated by this subscription. This value will be `null` for subscriptions where `billing=charge_automatically`.",
null=True,
),
),
("discount", djstripe.fields.JSONField(blank=True, null=True)),
(
"ended_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"next_pending_invoice_item_invoice",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"pending_invoice_item_interval",
djstripe.fields.JSONField(blank=True, null=True),
),
("pending_update", djstripe.fields.JSONField(blank=True, null=True)),
(
"quantity",
models.IntegerField(
blank=True,
help_text="The quantity applied to this subscription. This value will be `null` for multi-plan subscriptions",
null=True,
),
),
(
"start_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SubscriptionStatus, max_length=18
),
),
(
"tax_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0),
],
),
),
(
"trial_end",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"trial_start",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The customer associated with this subscription.",
),
),
(
"plan",
models.ForeignKey(
blank=True,
help_text="The plan associated with this subscription. This value will be `null` for multi-plan subscriptions",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="djstripe.plan",
),
),
(
"billing_thresholds",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"cancel_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Transfer",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_reversed",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("destination", djstripe.fields.StripeIdField(max_length=255)),
(
"destination_payment",
djstripe.fields.StripeIdField(
blank=True, max_length=255, null=True
),
),
(
"reversed",
models.BooleanField(
default=False,
help_text="Whether or not the transfer has been fully reversed. If the transfer is only partially reversed, this attribute will still be false.",
),
),
(
"source_transaction",
djstripe.fields.StripeIdField(max_length=255, null=True),
),
(
"source_type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.LegacySourceType, max_length=16
),
),
(
"transfer_group",
models.CharField(
blank=True,
default="",
help_text="A string that identifies this transaction as part of a group.",
max_length=255,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="WebhookEventTrigger",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
(
"remote_ip",
models.GenericIPAddressField(
help_text="IP address of the request client."
),
),
("headers", djstripe.fields.JSONField()),
("body", models.TextField(blank=True)),
(
"valid",
models.BooleanField(
default=False,
help_text="Whether or not the webhook event has passed validation",
),
),
(
"processed",
models.BooleanField(
default=False,
help_text="Whether or not the webhook event has been successfully processed",
),
),
("exception", models.CharField(blank=True, max_length=128)),
(
"traceback",
models.TextField(
blank=True,
help_text="Traceback if an exception was thrown during processing",
),
),
(
"djstripe_version",
models.CharField(
default=djstripe.models.webhooks._get_version,
help_text="The version of dj-stripe when the webhook was received",
max_length=32,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
(
"event",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.event",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Event object contained in the (valid) Webhook",
),
),
],
),
migrations.AddField(
model_name="paymentmethod",
name="customer",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="payment_methods",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Customer to which this PaymentMethod is saved. This will not be set when the PaymentMethod has not been saved to a Customer.",
),
),
migrations.AddField(
model_name="plan",
name="product",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.product",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The product whose pricing this plan determines.",
),
),
migrations.CreateModel(
name="Invoice",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount_due",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_paid",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"amount_remaining",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"application_fee_amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"attempt_count",
models.IntegerField(
help_text="Number of payment attempts made for this invoice, from the perspective of the payment retry schedule. Any payment attempt counts as the first attempt, and subsequently only automatic retries increment the attempt count. In other words, manual payment attempts after the first attempt do not affect the retry schedule."
),
),
(
"attempted",
models.BooleanField(
default=False,
help_text="Whether or not an attempt has been made to pay the invoice. An invoice is not attempted until 1 hour after the ``invoice.created`` webhook, for example, so you might not want to display that invoice as unpaid to your users.",
),
),
(
"collection_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.InvoiceCollectionMethod,
max_length=20,
null=True,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"due_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"ending_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(null=True),
),
(
"hosted_invoice_url",
models.TextField(
blank=True,
default="",
help_text="The URL for the hosted invoice page, which allows customers to view and pay an invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"invoice_pdf",
models.TextField(
blank=True,
default="",
help_text="The link to download the PDF for the invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"next_payment_attempt",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"number",
models.CharField(
blank=True,
default="",
help_text="A unique, identifying string that appears on emails sent to the customer for this invoice. This starts with the customer's unique invoice_prefix if it is specified.",
max_length=64,
),
),
(
"paid",
models.BooleanField(
default=False,
help_text="Whether payment was successfully collected for this invoice. An invoice can be paid (most commonly) with a charge or with credit from the customer's account balance.",
),
),
("period_end", djstripe.fields.StripeDateTimeField()),
("period_start", djstripe.fields.StripeDateTimeField()),
(
"receipt_number",
models.CharField(
blank=True,
help_text="This is the transaction number that appears on email receipts sent for this invoice.",
max_length=64,
null=True,
),
),
(
"starting_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="An arbitrary string to be displayed on your customer's credit card statement. The statement description may not include <>\"' characters, and will appear on your customer's statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.",
max_length=22,
),
),
(
"subscription_proration_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"subtotal",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"tax",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"tax_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0),
],
),
),
(
"total",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2,
max_digits=11,
verbose_name="Total (as decimal) after discount.",
),
),
(
"webhooks_delivered_at",
djstripe.fields.StripeDateTimeField(null=True),
),
(
"charge",
models.OneToOneField(
help_text="The latest charge generated for this invoice, if any.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="latest_%(class)s",
to="djstripe.charge",
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)ss",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The customer associated with this invoice.",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)ss",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The subscription that this invoice was prepared for, if any.",
),
),
(
"auto_advance",
models.BooleanField(
help_text="Controls whether Stripe will perform automatic collection of the invoice. When false, the invoice's state will not automatically advance without an explicit action.",
null=True,
),
),
(
"status_transitions",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"account_country",
models.CharField(
blank=True,
default="",
help_text="The country of the business associated with this invoice, most often the business creating the invoice.",
max_length=2,
),
),
(
"account_name",
models.TextField(
blank=True,
help_text="The public name of the business associated with this invoice, most often the business creating the invoice.",
max_length=5000,
),
),
(
"billing_reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceBillingReason,
max_length=22,
),
),
(
"customer_address",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"customer_email",
models.TextField(
blank=True,
help_text="The customer's email. Until the invoice is finalized, this field will equal customer.email. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_name",
models.TextField(
blank=True,
help_text="The customer's name. Until the invoice is finalized, this field will equal customer.name. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_phone",
models.TextField(
blank=True,
help_text="The customer's phone number. Until the invoice is finalized, this field will equal customer.phone. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_shipping",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"customer_tax_exempt",
djstripe.fields.StripeEnumField(
default="", enum=djstripe.enums.CustomerTaxExempt, max_length=7
),
),
(
"footer",
models.TextField(
blank=True,
help_text="Footer displayed on the invoice.",
max_length=5000,
),
),
(
"post_payment_credit_notes_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"pre_payment_credit_notes_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"threshold_reason",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"status",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceStatus,
max_length=13,
),
),
(
"discount",
djstripe.fields.JSONField(blank=True, null=True),
),
],
options={"get_latest_by": "created", "ordering": ["-created"]},
),
migrations.CreateModel(
name="IdempotencyKey",
fields=[
(
"uuid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("action", models.CharField(max_length=100)),
(
"livemode",
models.BooleanField(
help_text="Whether the key was used in live or test mode."
),
),
("created", models.DateTimeField(auto_now_add=True)),
],
options={"unique_together": {("action", "livemode")}},
),
migrations.AddField(
model_name="charge",
name="customer",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The customer associated with this charge.",
),
),
migrations.AddField(
model_name="charge",
name="dispute",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.dispute",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Details about the dispute if the charge has been disputed.",
),
),
migrations.AddField(
model_name="charge",
name="invoice",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="charges",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The invoice this charge is for if one exists.",
),
),
migrations.AddField(
model_name="charge",
name="source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.paymentmethod",
),
),
migrations.AddField(
model_name="charge",
name="transfer",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.transfer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The transfer to the `destination` account (only applicable if the charge was created using the `destination` parameter).",
),
),
migrations.CreateModel(
name="BankAccount",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"account_holder_name",
models.TextField(
blank=True,
help_text="The name of the person or business that owns the bank account.",
max_length=5000,
),
),
(
"account_holder_type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BankAccountHolderType, max_length=10
),
),
(
"bank_name",
models.CharField(
help_text="Name of the bank associated with the routing number (e.g., `WELLS FARGO`).",
max_length=255,
),
),
(
"country",
models.CharField(
help_text="Two-letter ISO code representing the country the bank account is located in.",
max_length=2,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"default_for_currency",
models.BooleanField(
help_text="Whether this external account is the default account for its currency.",
null=True,
),
),
(
"fingerprint",
models.CharField(
help_text="Uniquely identifies this particular bank account. You can use this attribute to check whether two bank accounts are the same.",
max_length=16,
),
),
("last4", models.CharField(max_length=4)),
(
"routing_number",
models.CharField(
help_text="The routing transit number for the bank account.",
max_length=255,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BankAccountStatus, max_length=19
),
),
(
"account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="bank_account",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="bank_account",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="BalanceTransaction",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("available_on", djstripe.fields.StripeDateTimeField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"exchange_rate",
models.DecimalField(decimal_places=6, max_digits=8, null=True),
),
("fee", djstripe.fields.StripeQuantumCurrencyAmountField()),
("fee_details", djstripe.fields.JSONField()),
("net", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BalanceTransactionStatus, max_length=9
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BalanceTransactionType, max_length=29
),
),
(
"reporting_category",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BalanceTransactionReportingCategory,
max_length=29,
),
),
("source", djstripe.fields.StripeIdField(max_length=255)),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="ApplicationFee",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("amount_refunded", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"refunded",
models.BooleanField(
help_text="Whether the fee has been fully refunded. If the fee is only partially refunded, this attribute will still be false."
),
),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Balance transaction that describes the impact on your account balance.",
),
),
(
"charge",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.charge",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The charge that the application fee was taken from.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="balance_transaction",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The balance transaction that describes the impact of this charge on your account balance (not including refunds or disputes).",
),
),
migrations.AddField(
model_name="transfer",
name="balance_transaction",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Balance transaction that describes the impact on your account balance.",
),
),
migrations.CreateModel(
name="SetupIntent",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"application",
models.CharField(
blank=True,
help_text="ID of the Connect application that created the SetupIntent.",
max_length=255,
),
),
(
"cancellation_reason",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.SetupIntentCancellationReason,
max_length=21,
),
),
(
"client_secret",
models.TextField(
blank=True,
help_text="The client secret of this SetupIntent. Used for client-side retrieval using a publishable key.",
max_length=5000,
),
),
("last_setup_error", djstripe.fields.JSONField(blank=True, null=True)),
("next_action", djstripe.fields.JSONField(blank=True, null=True)),
("payment_method_types", djstripe.fields.JSONField()),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SetupIntentStatus, max_length=23
),
),
(
"usage",
djstripe.fields.StripeEnumField(
default="off_session",
enum=djstripe.enums.IntentUsage,
max_length=11,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Customer this SetupIntent belongs to, if one exists.",
),
),
(
"on_behalf_of",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="setup_intents",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The account (if any) for which the setup is intended.",
),
),
(
"payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Payment method used in this PaymentIntent.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="PaymentIntent",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"amount_capturable",
djstripe.fields.StripeQuantumCurrencyAmountField(),
),
("amount_received", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"canceled_at",
djstripe.fields.StripeDateTimeField(
blank=True, default=None, null=True
),
),
(
"cancellation_reason",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.PaymentIntentCancellationReason,
max_length=21,
),
),
(
"capture_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CaptureMethod, max_length=9
),
),
(
"client_secret",
models.TextField(
help_text="The client secret of this PaymentIntent. Used for client-side retrieval using a publishable key.",
max_length=5000,
),
),
(
"confirmation_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ConfirmationMethod, max_length=9
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"description",
models.TextField(
blank=True,
default="",
help_text="An arbitrary string attached to the object. Often useful for displaying to users.",
max_length=1000,
),
),
(
"last_payment_error",
djstripe.fields.JSONField(blank=True, null=True),
),
("next_action", djstripe.fields.JSONField(blank=True, null=True)),
("payment_method_types", djstripe.fields.JSONField()),
(
"receipt_email",
models.CharField(
blank=True,
help_text="Email address that the receipt for the resulting payment will be sent to.",
max_length=255,
),
),
(
"setup_future_usage",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.IntentUsage,
max_length=11,
null=True,
),
),
("shipping", djstripe.fields.JSONField(blank=True, null=True)),
(
"statement_descriptor",
models.CharField(
blank=True,
help_text="For non-card charges, you can use this value as the complete description that appears on your customers' statements. Must contain at least one letter, maximum 22 characters.",
max_length=22,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PaymentIntentStatus, max_length=23
),
),
("transfer_data", djstripe.fields.JSONField(blank=True, null=True)),
(
"transfer_group",
models.CharField(
blank=True,
help_text="A string that identifies the resulting payment as part of a group. See the PaymentIntents Connect usage guide for details.",
max_length=255,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Customer this PaymentIntent is for if one exists.",
),
),
(
"on_behalf_of",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="payment_intents",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The account (if any) for which the funds of the PaymentIntent are intended.",
),
),
(
"payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Payment method used in this PaymentIntent.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="payment_intent",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.paymentintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="PaymentIntent associated with this charge, if one exists.",
),
),
migrations.AddField(
model_name="invoice",
name="payment_intent",
field=models.OneToOneField(
help_text="The PaymentIntent associated with this invoice. The PaymentIntent is generated when the invoice is finalized, and can then be used to pay the invoice.Note that voiding an invoice will cancel the PaymentIntent",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.paymentintent",
),
),
migrations.AddField(
model_name="subscription",
name="pending_setup_intent",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="setup_intents",
to="djstripe.setupintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="We can use this SetupIntent to collect user authentication when creating a subscription without immediate payment or updating a subscription's payment method, allowing you to optimize for off-session payments.",
),
),
migrations.AddField(
model_name="charge",
name="payment_method",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="PaymentMethod used in this charge.",
),
),
migrations.AddField(
model_name="invoice",
name="default_payment_method",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Default payment method for the invoice. It must belong to the customer associated with the invoice. If not set, defaults to the subscription's default payment method, if any, or to the default payment method in the customer's invoice settings.",
),
),
migrations.CreateModel(
name="UpcomingInvoice",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"account_country",
models.CharField(
blank=True,
default="",
help_text="The country of the business associated with this invoice, most often the business creating the invoice.",
max_length=2,
),
),
(
"account_name",
models.TextField(
blank=True,
help_text="The public name of the business associated with this invoice, most often the business creating the invoice.",
max_length=5000,
),
),
(
"amount_due",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_paid",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"amount_remaining",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"application_fee_amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"attempt_count",
models.IntegerField(
help_text="Number of payment attempts made for this invoice, from the perspective of the payment retry schedule. Any payment attempt counts as the first attempt, and subsequently only automatic retries increment the attempt count. In other words, manual payment attempts after the first attempt do not affect the retry schedule."
),
),
(
"attempted",
models.BooleanField(
default=False,
help_text="Whether or not an attempt has been made to pay the invoice. An invoice is not attempted until 1 hour after the ``invoice.created`` webhook, for example, so you might not want to display that invoice as unpaid to your users.",
),
),
(
"auto_advance",
models.BooleanField(
help_text="Controls whether Stripe will perform automatic collection of the invoice. When false, the invoice's state will not automatically advance without an explicit action.",
null=True,
),
),
(
"billing_reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceBillingReason,
max_length=22,
),
),
(
"collection_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.InvoiceCollectionMethod,
max_length=20,
null=True,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("customer_address", djstripe.fields.JSONField(blank=True, null=True)),
(
"customer_email",
models.TextField(
blank=True,
help_text="The customer's email. Until the invoice is finalized, this field will equal customer.email. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_name",
models.TextField(
blank=True,
help_text="The customer's name. Until the invoice is finalized, this field will equal customer.name. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_phone",
models.TextField(
blank=True,
help_text="The customer's phone number. Until the invoice is finalized, this field will equal customer.phone. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
("customer_shipping", djstripe.fields.JSONField(blank=True, null=True)),
(
"customer_tax_exempt",
djstripe.fields.StripeEnumField(
default="", enum=djstripe.enums.CustomerTaxExempt, max_length=7
),
),
(
"due_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"ending_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(null=True),
),
(
"footer",
models.TextField(
blank=True,
help_text="Footer displayed on the invoice.",
max_length=5000,
),
),
(
"hosted_invoice_url",
models.TextField(
blank=True,
default="",
help_text="The URL for the hosted invoice page, which allows customers to view and pay an invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"invoice_pdf",
models.TextField(
blank=True,
default="",
help_text="The link to download the PDF for the invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"next_payment_attempt",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"number",
models.CharField(
blank=True,
default="",
help_text="A unique, identifying string that appears on emails sent to the customer for this invoice. This starts with the customer's unique invoice_prefix if it is specified.",
max_length=64,
),
),
(
"paid",
models.BooleanField(
default=False,
help_text="Whether payment was successfully collected for this invoice. An invoice can be paid (most commonly) with a charge or with credit from the customer's account balance.",
),
),
("period_end", djstripe.fields.StripeDateTimeField()),
("period_start", djstripe.fields.StripeDateTimeField()),
(
"post_payment_credit_notes_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"pre_payment_credit_notes_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"receipt_number",
models.CharField(
blank=True,
help_text="This is the transaction number that appears on email receipts sent for this invoice.",
max_length=64,
null=True,
),
),
(
"starting_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="An arbitrary string to be displayed on your customer's credit card statement. The statement description may not include <>\"' characters, and will appear on your customer's statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.",
max_length=22,
),
),
(
"status_transitions",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"subscription_proration_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"subtotal",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"tax",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"tax_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
("threshold_reason", djstripe.fields.JSONField(blank=True, null=True)),
(
"total",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2,
max_digits=11,
verbose_name="Total (as decimal) after discount.",
),
),
(
"webhooks_delivered_at",
djstripe.fields.StripeDateTimeField(null=True),
),
(
"charge",
models.OneToOneField(
help_text="The latest charge generated for this invoice, if any.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="latest_%(class)s",
to="djstripe.charge",
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="%(class)ss",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The customer associated with this invoice.",
),
),
(
"default_payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Default payment method for the invoice. It must belong to the customer associated with the invoice. If not set, defaults to the subscription's default payment method, if any, or to the default payment method in the customer's invoice settings.",
),
),
(
"payment_intent",
models.OneToOneField(
help_text="The PaymentIntent associated with this invoice. The PaymentIntent is generated when the invoice is finalized, and can then be used to pay the invoice.Note that voiding an invoice will cancel the PaymentIntent",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.paymentintent",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="%(class)ss",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The subscription that this invoice was prepared for, if any.",
),
),
(
"status",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceStatus,
max_length=13,
),
),
(
"default_source",
djstripe.fields.PaymentMethodForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="upcoming_invoices",
to="djstripe.djstripepaymentmethod",
),
),
("discount", djstripe.fields.JSONField(blank=True, null=True)),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"get_latest_by": "created", "ordering": ["-created"]},
),
migrations.CreateModel(
name="TaxRate",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"active",
models.BooleanField(
default=True,
help_text="Defaults to true. When set to false, this tax rate cannot be applied to objects in the API, but will still be applied to subscriptions and invoices that already have it set.",
),
),
(
"display_name",
models.CharField(
blank=True,
default="",
help_text="The display name of the tax rates as it will appear to your customer on their receipt email, PDF, and the hosted invoice page.",
max_length=50,
),
),
(
"inclusive",
models.BooleanField(
help_text="This specifies if the tax rate is inclusive or exclusive."
),
),
(
"jurisdiction",
models.CharField(
blank=True,
default="",
help_text="The jurisdiction for the tax rate.",
max_length=50,
),
),
(
"percentage",
djstripe.fields.StripePercentField(
decimal_places=2,
max_digits=5,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={
"get_latest_by": "created",
"verbose_name": "Tax Rate",
},
),
migrations.AddField(
model_name="invoice",
name="default_tax_rates",
field=models.ManyToManyField(
blank=True,
db_table="djstripe_djstripeinvoicedefaulttaxrate",
help_text="The tax rates applied to this invoice, if any.",
related_name="+",
to="djstripe.TaxRate",
),
),
migrations.AddField(
model_name="subscription",
name="default_tax_rates",
field=models.ManyToManyField(
blank=True,
db_table="djstripe_djstripesubscriptiondefaulttaxrate",
help_text="The tax rates that will apply to any subscription item that does not have tax_rates set. Invoices created will have their default_tax_rates populated from the subscription.",
related_name="+",
to="djstripe.TaxRate",
),
),
migrations.CreateModel(
name="DjstripeUpcomingInvoiceTotalTaxAmount",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"inclusive",
models.BooleanField(
help_text="Whether this tax amount is inclusive or exclusive."
),
),
(
"invoice",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="djstripe.upcominginvoice",
),
),
(
"tax_rate",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.taxrate",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The tax rate that was applied to get this tax amount.",
),
),
],
options={"unique_together": {("invoice", "tax_rate")}},
),
migrations.CreateModel(
name="DjstripeInvoiceTotalTaxAmount",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"inclusive",
models.BooleanField(
help_text="Whether this tax amount is inclusive or exclusive."
),
),
(
"invoice",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="total_tax_amounts",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"tax_rate",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.taxrate",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The tax rate that was applied to get this tax amount.",
),
),
],
options={"unique_together": {("invoice", "tax_rate")}},
),
migrations.AddField(
model_name="subscription",
name="default_payment_method",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The default payment method for the subscription. It must belong to the customer associated with the subscription. If not set, invoices will use the default payment method in the customer's invoice settings.",
),
),
migrations.AddField(
model_name="invoice",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoices",
to="djstripe.djstripepaymentmethod",
),
),
migrations.AddField(
model_name="subscription",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="subscriptions",
to="djstripe.djstripepaymentmethod",
),
),
migrations.CreateModel(
name="ApplicationFeeRefund",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Balance transaction that describes the impact on your account balance.",
),
),
(
"fee",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="refunds",
to="djstripe.applicationfee",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The application fee that was refunded",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Card",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"address_city",
models.TextField(
blank=True,
default="",
help_text="City/District/Suburb/Town/Village.",
max_length=5000,
),
),
(
"address_country",
models.TextField(
blank=True,
default="",
help_text="Billing address country.",
max_length=5000,
),
),
(
"address_line1",
models.TextField(
blank=True,
default="",
help_text="Street address/PO Box/Company name.",
max_length=5000,
),
),
(
"address_line1_check",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardCheckResult,
max_length=11,
),
),
(
"address_line2",
models.TextField(
blank=True,
default="",
help_text="Apartment/Suite/Unit/Building.",
max_length=5000,
),
),
(
"address_state",
models.TextField(
blank=True,
default="",
help_text="State/County/Province/Region.",
max_length=5000,
),
),
(
"address_zip",
models.TextField(
blank=True,
default="",
help_text="ZIP or postal code.",
max_length=5000,
),
),
(
"address_zip_check",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardCheckResult,
max_length=11,
),
),
(
"brand",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CardBrand, max_length=16
),
),
(
"country",
models.CharField(
blank=True,
default="",
help_text="Two-letter ISO code representing the country of the card.",
max_length=2,
),
),
(
"cvc_check",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardCheckResult,
max_length=11,
),
),
(
"dynamic_last4",
models.CharField(
blank=True,
default="",
help_text="(For tokenized numbers only.) The last four digits of the device account number.",
max_length=4,
),
),
("exp_month", models.IntegerField(help_text="Card expiration month.")),
("exp_year", models.IntegerField(help_text="Card expiration year.")),
(
"fingerprint",
models.CharField(
blank=True,
default="",
help_text="Uniquely identifies this particular card number.",
max_length=16,
),
),
(
"funding",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CardFundingType, max_length=7
),
),
(
"last4",
models.CharField(
help_text="Last four digits of Card number.", max_length=4
),
),
(
"name",
models.TextField(
blank=True,
default="",
help_text="Cardholder name.",
max_length=5000,
),
),
(
"tokenization_method",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardTokenizationMethod,
max_length=11,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="legacy_cards",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="djstripe_owner_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
migrations.CreateModel(
name="CountrySpec",
fields=[
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"id",
models.CharField(max_length=2, primary_key=True, serialize=False),
),
(
"default_currency",
djstripe.fields.StripeCurrencyCodeField(max_length=3),
),
("supported_bank_account_currencies", djstripe.fields.JSONField()),
("supported_payment_currencies", djstripe.fields.JSONField()),
("supported_payment_methods", djstripe.fields.JSONField()),
("supported_transfer_countries", djstripe.fields.JSONField()),
("verification_fields", djstripe.fields.JSONField()),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="invoice",
name="djstripe_owner_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
migrations.AddField(
model_name="paymentmethod",
name="djstripe_owner_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
migrations.AddField(
model_name="plan",
name="djstripe_owner_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
migrations.CreateModel(
name="Refund",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"failure_reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.RefundFailureReason,
max_length=24,
),
),
(
"reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.RefundReason,
max_length=25,
),
),
(
"receipt_number",
models.CharField(
blank=True,
default="",
help_text="The transaction number that appears on email receipts sent for this charge.",
max_length=9,
),
),
(
"status",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.RefundStatus, max_length=9
),
),
(
"charge",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="refunds",
to="djstripe.charge",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The charge that was refunded",
),
),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Balance transaction that describes the impact on your account balance.",
),
),
(
"failure_balance_transaction",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="failure_refunds",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="If the refund failed, this balance transaction describes the adjustment made on your account balance that reverses the initial balance transaction.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="ScheduledQueryRun",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("data_load_time", djstripe.fields.StripeDateTimeField()),
("error", djstripe.fields.JSONField(blank=True, null=True)),
("result_available_until", djstripe.fields.StripeDateTimeField()),
(
"sql",
models.TextField(help_text="SQL for the query.", max_length=5000),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ScheduledQueryRunStatus, max_length=9
),
),
(
"title",
models.TextField(help_text="Title of the query.", max_length=5000),
),
(
"file",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.fileupload",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The file object representing the results of the query.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Session",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"billing_address_collection",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.SessionBillingAddressCollection,
max_length=8,
),
),
(
"cancel_url",
models.TextField(
blank=True,
help_text="The URL the customer will be directed to if theydecide to cancel payment and return to your website.",
max_length=5000,
),
),
(
"client_reference_id",
models.TextField(
blank=True,
help_text="A unique string to reference the Checkout Session.This can be a customer ID, a cart ID, or similar, andcan be used to reconcile the session with your internal systems.",
max_length=5000,
),
),
(
"customer_email",
models.CharField(
blank=True,
help_text="If provided, this value will be used when the Customer object is created.",
max_length=255,
),
),
("display_items", djstripe.fields.JSONField(blank=True, null=True)),
(
"locale",
models.CharField(
blank=True,
help_text="The IETF language tag of the locale Checkout is displayed in.If blank or auto, the browser's locale is used.",
max_length=255,
),
),
("payment_method_types", djstripe.fields.JSONField()),
(
"submit_type",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.SubmitTypeStatus, max_length=6
),
),
(
"success_url",
models.TextField(
blank=True,
help_text="The URL the customer will be directed to after the payment or subscriptioncreation is successful.",
max_length=5000,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Customer this Checkout is for if one exists.",
),
),
(
"payment_intent",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.paymentintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="PaymentIntent created if SKUs or line items were provided.",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Subscription created if one or more plans were provided.",
),
),
(
"mode",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.SessionMode, max_length=12
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Source",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"client_secret",
models.CharField(
help_text="The client secret of the source. Used for client-side retrieval using a publishable key.",
max_length=255,
),
),
(
"currency",
djstripe.fields.StripeCurrencyCodeField(
blank=True, default="", max_length=3
),
),
(
"flow",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceFlow, max_length=17
),
),
("owner", djstripe.fields.JSONField()),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="Extra information about a source. This will appear on your customer's statement every time you charge the source.",
max_length=255,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceStatus, max_length=10
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceType, max_length=20
),
),
(
"usage",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceUsage, max_length=10
),
),
("code_verification", djstripe.fields.JSONField(blank=True, null=True)),
("receiver", djstripe.fields.JSONField(blank=True, null=True)),
("redirect", djstripe.fields.JSONField(blank=True, null=True)),
("source_data", djstripe.fields.JSONField()),
(
"customer",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="sources",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="subscription",
name="djstripe_owner_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
migrations.CreateModel(
name="SubscriptionItem",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"quantity",
models.PositiveIntegerField(
blank=True,
help_text="The quantity of the plan to which the customer should be subscribed.",
null=True,
),
),
(
"plan",
models.ForeignKey(
help_text="The plan the customer is subscribed to.",
on_delete=django.db.models.deletion.CASCADE,
related_name="subscription_items",
to="djstripe.plan",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="items",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The subscription this subscription item belongs to.",
),
),
(
"tax_rates",
models.ManyToManyField(
blank=True,
db_table="djstripe_djstripesubscriptionitemtaxrate",
help_text="The tax rates which apply to this subscription_item. When set, the default_tax_rates on the subscription do not apply to this subscription_item.",
related_name="+",
to="djstripe.TaxRate",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
(
"billing_thresholds",
djstripe.fields.JSONField(blank=True, null=True),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="transfer",
name="djstripe_owner_account",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
migrations.CreateModel(
name="TransferReversal",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="transfer_reversals",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Balance transaction that describes the impact on your account balance.",
),
),
(
"transfer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="reversals",
to="djstripe.transfer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The transfer that was reversed.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="UsageRecord",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"quantity",
models.PositiveIntegerField(
help_text="The quantity of the plan to which the customer should be subscribed."
),
),
(
"subscription_item",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="usage_records",
to="djstripe.subscriptionitem",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The subscription item this usage record contains data for.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Price",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"active",
models.BooleanField(
help_text="Whether the price can be used for new purchases."
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"nickname",
models.CharField(
blank=True,
help_text="A brief description of the plan, hidden from customers.",
max_length=250,
),
),
(
"recurring",
djstripe.fields.JSONField(blank=True, default=None, null=True),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PriceType, max_length=9
),
),
(
"unit_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"unit_amount_decimal",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=12, max_digits=19, null=True
),
),
(
"billing_scheme",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.BillingScheme, max_length=8
),
),
("tiers", djstripe.fields.JSONField(blank=True, null=True)),
(
"tiers_mode",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.PriceTiersMode,
max_length=9,
null=True,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
(
"product",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="prices",
to="djstripe.product",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The product this price is associated with.",
),
),
(
"lookup_key",
models.CharField(
blank=True,
help_text="A lookup key used to retrieve prices dynamically from a static string.",
max_length=250,
null=True,
),
),
(
"transform_quantity",
djstripe.fields.JSONField(blank=True, null=True),
),
],
options={"abstract": False, "ordering": ["unit_amount"]},
),
migrations.AddField(
model_name="subscriptionitem",
name="price",
field=models.ForeignKey(
blank=True,
help_text="The price the customer is subscribed to.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscription_items",
to="djstripe.price",
),
),
migrations.CreateModel(
name="TaxId",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"country",
models.CharField(
help_text="Two-letter ISO code representing the country of the tax ID.",
max_length=2,
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.TaxIdType, max_length=7
),
),
(
"value",
models.CharField(help_text="Value of the tax ID.", max_length=50),
),
("verification", djstripe.fields.JSONField()),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="tax_ids",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"get_latest_by": "created", "verbose_name": "Tax ID"},
),
migrations.CreateModel(
name="InvoiceItem",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("date", djstripe.fields.StripeDateTimeField()),
(
"discountable",
models.BooleanField(
default=False,
help_text="If True, discounts will apply to this invoice item. Always False for prorations.",
),
),
("period", djstripe.fields.JSONField()),
("period_end", djstripe.fields.StripeDateTimeField()),
("period_start", djstripe.fields.StripeDateTimeField()),
(
"proration",
models.BooleanField(
default=False,
help_text="Whether or not the invoice item was created automatically as a proration adjustment when the customer switched plans.",
),
),
(
"quantity",
models.IntegerField(
blank=True,
help_text="If the invoice item is a proration, the quantity of the subscription for which the proration was computed.",
null=True,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="invoiceitems",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The customer associated with this invoiceitem.",
),
),
(
"invoice",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="invoiceitems",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The invoice to which this invoiceitem is attached.",
),
),
(
"plan",
models.ForeignKey(
help_text="If the invoice item is a proration, the plan of the subscription for which the proration was computed.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.plan",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoiceitems",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The subscription that this invoice item has been created for, if any.",
),
),
(
"tax_rates",
models.ManyToManyField(
blank=True,
db_table="djstripe_djstripeinvoiceitemtaxrate",
help_text="The tax rates which apply to this invoice item. When set, the default_tax_rates on the invoice do not apply to this invoice item.",
related_name="+",
to="djstripe.TaxRate",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
(
"unit_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"unit_amount_decimal",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=12, max_digits=19, null=True
),
),
(
"price",
models.ForeignKey(
help_text="If the invoice item is a proration, the price of the subscription for which the proration was computed.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoiceitems",
to="djstripe.price",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="SubscriptionSchedule",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"canceled_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"completed_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
("current_phase", djstripe.fields.JSONField(blank=True, null=True)),
("default_settings", djstripe.fields.JSONField(blank=True, null=True)),
(
"end_behavior",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SubscriptionScheduleEndBehavior,
max_length=7,
),
),
("phases", djstripe.fields.JSONField(blank=True, null=True)),
(
"released_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SubscriptionScheduleStatus, max_length=11
),
),
(
"customer",
models.ForeignKey(
help_text="The customer who owns the subscription schedule.",
on_delete=django.db.models.deletion.CASCADE,
related_name="schedules",
to="djstripe.customer",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
(
"released_subscription",
models.ForeignKey(
blank=True,
help_text="The subscription once managed by this subscription schedule (if it is released).",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="released_schedules",
to="djstripe.subscription",
),
),
(
"billing_thresholds",
djstripe.fields.JSONField(blank=True, null=True),
),
],
options={"get_latest_by": "created", "abstract": False},
),
migrations.AddField(
model_name="subscription",
name="schedule",
field=models.ForeignKey(
blank=True,
help_text="The schedule associated with this subscription.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="djstripe.subscriptionschedule",
),
),
migrations.CreateModel(
name="Payout",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
("arrival_date", djstripe.fields.StripeDateTimeField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"failure_code",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.PayoutFailureCode,
max_length=23,
),
),
(
"failure_message",
models.TextField(
blank=True,
default="",
help_text="Message to user further explaining reason for payout failure if available.",
),
),
(
"method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutMethod, max_length=8
),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="Extra information about a payout to be displayed on the user's bank statement.",
max_length=255,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutStatus, max_length=10
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutType, max_length=12
),
),
(
"destination",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="djstripe.bankaccount",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Bank account or card the payout was sent to.",
),
),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="Balance transaction that describes the impact on your account balance.",
),
),
(
"failure_balance_transaction",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="failure_payouts",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="If the payout failed or was canceled, this will be the balance transaction that reversed the initial balance transaction, and puts the funds from the failed payout back in your balance.",
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
(
"automatic",
models.BooleanField(
help_text="`true` if the payout was created by an automated payout schedule, and `false` if it was requested manually.",
),
),
(
"source_type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutSourceType,
max_length=12,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="source_transfer",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="djstripe.transfer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The transfer which created this charge. Only present if the charge came from another Stripe account.",
),
),
migrations.AddField(
model_name="charge",
name="application_fee",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="fee_for_charge",
to="djstripe.applicationfee",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The application fee (if any) for the charge.",
),
),
migrations.CreateModel(
name="APIKey",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"livemode",
models.BooleanField(
help_text="Whether the key is valid for live or test mode."
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"id",
models.CharField(
default=djstripe.models.api.generate_api_key_id,
editable=False,
max_length=255,
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.APIKeyType, max_length=11
),
),
(
"name",
models.CharField(
blank=True,
help_text="An optional name to identify the key.",
max_length=100,
verbose_name="Key name",
),
),
(
"secret",
models.CharField(
help_text="The value of the key.",
max_length=128,
unique=True,
validators=[
django.core.validators.RegexValidator(
regex="^(pk|sk|rk)_(test|live)_([a-zA-Z0-9]{24,99})"
)
],
),
),
(
"djstripe_owner_account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
help_text="The Stripe Account this object belongs to.",
),
),
],
options={"get_latest_by": "created", "abstract": False},
),
]
| mit | fa82105ae79761c3029570ddd9acb976 | 43.16997 | 391 | 0.433909 | 5.484668 | false | false | false | false |
alphagov/notifications-api | app/dao/inbound_numbers_dao.py | 1 | 1411 | from app import db
from app.dao.dao_utils import autocommit
from app.models import InboundNumber
def dao_get_inbound_numbers():
return InboundNumber.query.order_by(InboundNumber.updated_at).all()
def dao_get_available_inbound_numbers():
return InboundNumber.query.filter(InboundNumber.active, InboundNumber.service_id.is_(None)).all()
def dao_get_inbound_number_for_service(service_id):
return InboundNumber.query.filter(InboundNumber.service_id == service_id).first()
def dao_get_inbound_number(inbound_number_id):
return InboundNumber.query.filter(InboundNumber.id == inbound_number_id).first()
@autocommit
def dao_set_inbound_number_to_service(service_id, inbound_number):
inbound_number.service_id = service_id
db.session.add(inbound_number)
@autocommit
def dao_set_inbound_number_active_flag(service_id, active):
inbound_number = InboundNumber.query.filter(InboundNumber.service_id == service_id).first()
inbound_number.active = active
db.session.add(inbound_number)
@autocommit
def dao_allocate_number_for_service(service_id, inbound_number_id):
updated = InboundNumber.query.filter_by(id=inbound_number_id, active=True, service_id=None).update(
{"service_id": service_id}
)
if not updated:
raise Exception("Inbound number: {} is not available".format(inbound_number_id))
return InboundNumber.query.get(inbound_number_id)
| mit | 15996397cab5605226ad81eb0d36221e | 31.813953 | 103 | 0.749114 | 3.383693 | false | false | false | false |
alphagov/notifications-api | app/service/callback_rest.py | 1 | 5366 | from flask import Blueprint, jsonify, request
from sqlalchemy.exc import SQLAlchemyError
from app.dao.service_callback_api_dao import (
delete_service_callback_api,
get_service_callback_api,
reset_service_callback_api,
save_service_callback_api,
)
from app.dao.service_inbound_api_dao import (
delete_service_inbound_api,
get_service_inbound_api,
reset_service_inbound_api,
save_service_inbound_api,
)
from app.errors import InvalidRequest, register_errors
from app.models import (
DELIVERY_STATUS_CALLBACK_TYPE,
ServiceCallbackApi,
ServiceInboundApi,
)
from app.schema_validation import validate
from app.service.service_callback_api_schema import (
create_service_callback_api_schema,
update_service_callback_api_schema,
)
service_callback_blueprint = Blueprint("service_callback", __name__, url_prefix="/service/<uuid:service_id>")
register_errors(service_callback_blueprint)
@service_callback_blueprint.route("/inbound-api", methods=["POST"])
def create_service_inbound_api(service_id):
data = request.get_json()
validate(data, create_service_callback_api_schema)
data["service_id"] = service_id
inbound_api = ServiceInboundApi(**data)
try:
save_service_inbound_api(inbound_api)
except SQLAlchemyError as e:
return handle_sql_error(e, "service_inbound_api")
return jsonify(data=inbound_api.serialize()), 201
@service_callback_blueprint.route("/inbound-api/<uuid:inbound_api_id>", methods=["POST"])
def update_service_inbound_api(service_id, inbound_api_id):
data = request.get_json()
validate(data, update_service_callback_api_schema)
to_update = get_service_inbound_api(inbound_api_id, service_id)
reset_service_inbound_api(
service_inbound_api=to_update,
updated_by_id=data["updated_by_id"],
url=data.get("url", None),
bearer_token=data.get("bearer_token", None),
)
return jsonify(data=to_update.serialize()), 200
@service_callback_blueprint.route("/inbound-api/<uuid:inbound_api_id>", methods=["GET"])
def fetch_service_inbound_api(service_id, inbound_api_id):
inbound_api = get_service_inbound_api(inbound_api_id, service_id)
return jsonify(data=inbound_api.serialize()), 200
@service_callback_blueprint.route("/inbound-api/<uuid:inbound_api_id>", methods=["DELETE"])
def remove_service_inbound_api(service_id, inbound_api_id):
inbound_api = get_service_inbound_api(inbound_api_id, service_id)
if not inbound_api:
error = "Service inbound API not found"
raise InvalidRequest(error, status_code=404)
delete_service_inbound_api(inbound_api)
return "", 204
@service_callback_blueprint.route("/delivery-receipt-api", methods=["POST"])
def create_service_callback_api(service_id):
data = request.get_json()
validate(data, create_service_callback_api_schema)
data["service_id"] = service_id
data["callback_type"] = DELIVERY_STATUS_CALLBACK_TYPE
callback_api = ServiceCallbackApi(**data)
try:
save_service_callback_api(callback_api)
except SQLAlchemyError as e:
return handle_sql_error(e, "service_callback_api")
return jsonify(data=callback_api.serialize()), 201
@service_callback_blueprint.route("/delivery-receipt-api/<uuid:callback_api_id>", methods=["POST"])
def update_service_callback_api(service_id, callback_api_id):
data = request.get_json()
validate(data, update_service_callback_api_schema)
to_update = get_service_callback_api(callback_api_id, service_id)
reset_service_callback_api(
service_callback_api=to_update,
updated_by_id=data["updated_by_id"],
url=data.get("url", None),
bearer_token=data.get("bearer_token", None),
)
return jsonify(data=to_update.serialize()), 200
@service_callback_blueprint.route("/delivery-receipt-api/<uuid:callback_api_id>", methods=["GET"])
def fetch_service_callback_api(service_id, callback_api_id):
callback_api = get_service_callback_api(callback_api_id, service_id)
return jsonify(data=callback_api.serialize()), 200
@service_callback_blueprint.route("/delivery-receipt-api/<uuid:callback_api_id>", methods=["DELETE"])
def remove_service_callback_api(service_id, callback_api_id):
callback_api = get_service_callback_api(callback_api_id, service_id)
if not callback_api:
error = "Service delivery receipt callback API not found"
raise InvalidRequest(error, status_code=404)
delete_service_callback_api(callback_api)
return "", 204
def handle_sql_error(e, table_name):
if (
hasattr(e, "orig")
and hasattr(e.orig, "pgerror")
and e.orig.pgerror
and ('duplicate key value violates unique constraint "ix_{}_service_id"'.format(table_name) in e.orig.pgerror)
):
return (
jsonify(result="error", message={"name": ["You can only have one URL and bearer token for your service."]}),
400,
)
elif (
hasattr(e, "orig")
and hasattr(e.orig, "pgerror")
and e.orig.pgerror
and (
'insert or update on table "{0}" violates '
'foreign key constraint "{0}_service_id_fkey"'.format(table_name) in e.orig.pgerror
)
):
return jsonify(result="error", message="No result found"), 404
else:
raise e
| mit | 05d8b8f3abc2ea5ceae24fdfa09d9a71 | 33.844156 | 120 | 0.689154 | 3.539578 | false | false | false | false |
alphagov/notifications-api | migrations/versions/0330_broadcast_invite_email.py | 1 | 2200 | """
Revision ID: 0330_broadcast_invite_email
Revises: 0329_purge_broadcast_data
Create Date: 2020-09-15 14:17:01.963181
"""
# revision identifiers, used by Alembic.
from datetime import datetime
from alembic import op
revision = "0330_broadcast_invite_email"
down_revision = "0329_purge_broadcast_data"
user_id = "6af522d0-2915-4e52-83a3-3690455a5fe6"
service_id = "d6aa2c68-a2d9-4437-ab19-3ae8eb202553"
template_id = "46152f7c-6901-41d5-8590-a5624d0d4359"
broadcast_invitation_template_name = "Notify broadcast invitation email"
broadcast_invitation_subject = "((user_name)) has invited you to join ((service_name)) on GOV.UK Notify"
broadcast_invitation_content = """((user_name)) has invited you to join ((service_name)) on GOV.UK Notify.
In an emergency, use Notify to broadcast an alert, warning the public about an imminent risk to life.
Use this link to join the team:
((url))
This invitation will stop working at midnight tomorrow. This is to keep ((service_name)) secure.
Thanks
GOV.UK Notify team
https://www.gov.uk/notify
"""
def upgrade():
insert_query = """
INSERT INTO {}
(id, name, template_type, created_at, content, archived, service_id,
subject, created_by_id, version, process_type, hidden)
VALUES
('{}', '{}', 'email', '{}', '{}', False, '{}', '{}', '{}', 1, 'normal', False)
"""
op.execute(
insert_query.format(
"templates_history",
template_id,
broadcast_invitation_template_name,
datetime.utcnow(),
broadcast_invitation_content,
service_id,
broadcast_invitation_subject,
user_id,
)
)
op.execute(
insert_query.format(
"templates",
template_id,
broadcast_invitation_template_name,
datetime.utcnow(),
broadcast_invitation_content,
service_id,
broadcast_invitation_subject,
user_id,
)
)
def downgrade():
op.get_bind()
op.execute("delete from templates where id = '{}'".format(template_id))
op.execute("delete from templates_history where id = '{}'".format(template_id))
| mit | fd659d9b6427085af80b6c6295bee999 | 27.179487 | 106 | 0.636488 | 3.407752 | false | false | false | false |
alphagov/notifications-api | migrations/versions/0128_noti_to_sms_sender.py | 1 | 2236 | """
Revision ID: 0128_noti_to_sms_sender
Revises: 0127_remove_unique_constraint
Create Date: 2017-10-26 15:17:00.752706
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "0128_noti_to_sms_sender"
down_revision = "0127_remove_unique_constraint"
def upgrade():
op.create_index(
op.f("ix_service_letter_contacts_service_id"), "service_letter_contacts", ["service_id"], unique=False
)
op.drop_index("ix_service_letter_contact_service_id", table_name="service_letter_contacts")
op.create_index(op.f("ix_service_sms_senders_service_id"), "service_sms_senders", ["service_id"], unique=False)
op.execute(
"ALTER TABLE templates_history ALTER COLUMN template_type TYPE template_type USING template_type::template_type"
)
# new table
op.create_table(
"notification_to_sms_sender",
sa.Column("notification_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("service_sms_sender_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(
["notification_id"],
["notifications.id"],
),
sa.ForeignKeyConstraint(
["service_sms_sender_id"],
["service_sms_senders.id"],
),
sa.PrimaryKeyConstraint("notification_id", "service_sms_sender_id"),
)
op.create_index(
op.f("ix_notification_to_sms_sender_notification_id"),
"notification_to_sms_sender",
["notification_id"],
unique=True,
)
op.create_index(
op.f("ix_notification_to_sms_sender_service_sms_sender_id"),
"notification_to_sms_sender",
["service_sms_sender_id"],
unique=False,
)
def downgrade():
op.drop_index(op.f("ix_service_sms_senders_service_id"), table_name="service_sms_senders")
op.create_index("ix_service_letter_contact_service_id", "service_letter_contacts", ["service_id"], unique=False)
op.drop_index(op.f("ix_service_letter_contacts_service_id"), table_name="service_letter_contacts")
op.alter_column("templates_history", "template_type", type_=sa.VARCHAR(), existing_nullable=False)
op.drop_table("notification_to_sms_sender")
| mit | 0383fcc998db92613e7f54e75d4d341e | 35.655738 | 120 | 0.662791 | 3.44 | false | false | false | false |
denisenkom/pytds | src/pytds/lcid.py | 17 | 18871 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Constants for Locale IDs. (LCIDs)"""
__docformat__ = "restructuredtext en"
__all__ = [
"LANGID_AFRIKAANS", "LANGID_ALBANIAN", "LANGID_AMHARIC", "LANGID_ARABIC",
"LANGID_ARABIC_ALGERIA", "LANGID_ARABIC_BAHRAIN", "LANGID_ARABIC_EGYPT",
"LANGID_ARABIC_IRAQ", "LANGID_ARABIC_JORDAN", "LANGID_ARABIC_KUWAIT",
"LANGID_ARABIC_LEBANON", "LANGID_ARABIC_LIBYA", "LANGID_ARABIC_MOROCCO",
"LANGID_ARABIC_OMAN", "LANGID_ARABIC_QATAR", "LANGID_ARABIC_SYRIA",
"LANGID_ARABIC_TUNISIA", "LANGID_ARABIC_UAE", "LANGID_ARABIC_YEMEN",
"LANGID_ARMENIAN", "LANGID_ASSAMESE", "LANGID_AZERI_CYRILLIC",
"LANGID_AZERI_LATIN", "LANGID_BASQUE", "LANGID_BELGIAN_DUTCH",
"LANGID_BELGIAN_FRENCH", "LANGID_BENGALI", "LANGID_BULGARIAN",
"LANGID_BURMESE", "LANGID_BYELORUSSIAN", "LANGID_CATALAN",
"LANGID_CHEROKEE", "LANGID_CHINESE_HONG_KONG_SAR",
"LANGID_CHINESE_MACAO_SAR", "LANGID_CHINESE_SINGAPORE", "LANGID_CROATIAN",
"LANGID_CZECH", "LANGID_DANISH", "LANGID_DIVEHI", "LANGID_DUTCH",
"LANGID_EDO", "LANGID_ENGLISH_AUS", "LANGID_ENGLISH_BELIZE",
"LANGID_ENGLISH_CANADIAN", "LANGID_ENGLISH_CARIBBEAN",
"LANGID_ENGLISH_INDONESIA", "LANGID_ENGLISH_IRELAND",
"LANGID_ENGLISH_JAMAICA", "LANGID_ENGLISH_NEW_ZEALAND",
"LANGID_ENGLISH_PHILIPPINES", "LANGID_ENGLISH_SOUTH_AFRICA",
"LANGID_ENGLISH_TRINIDAD_TOBAGO", "LANGID_ENGLISH_UK", "LANGID_ENGLISH_US",
"LANGID_ENGLISH_ZIMBABWE", "LANGID_ESTONIAN", "LANGID_FAEROESE",
"LANGID_FILIPINO", "LANGID_FINNISH", "LANGID_FRENCH",
"LANGID_FRENCH_CAMEROON", "LANGID_FRENCH_CANADIAN",
"LANGID_FRENCH_CONGO_D_R_C", "LANGID_FRENCH_COTED_IVOIRE",
"LANGID_FRENCH_HAITI", "LANGID_FRENCH_LUXEMBOURG", "LANGID_FRENCH_MALI",
"LANGID_FRENCH_MONACO", "LANGID_FRENCH_MOROCCO", "LANGID_FRENCH_REUNION",
"LANGID_FRENCH_SENEGAL", "LANGID_FRENCH_WEST_INDIES",
"LANGID_FRISIAN_NETHERLANDS", "LANGID_FULFULDE", "LANGID_GAELIC_IRELAND",
"LANGID_GAELIC_SCOTLAND", "LANGID_GALICIAN", "LANGID_GEORGIAN",
"LANGID_GERMAN", "LANGID_GERMAN_AUSTRIA", "LANGID_GERMAN_LIECHTENSTEIN",
"LANGID_GERMAN_LUXEMBOURG", "LANGID_GREEK", "LANGID_GUARANI",
"LANGID_GUJARATI", "LANGID_HAUSA", "LANGID_HAWAIIAN", "LANGID_HEBREW",
"LANGID_HINDI", "LANGID_HUNGARIAN", "LANGID_IBIBIO", "LANGID_ICELANDIC",
"LANGID_IGBO", "LANGID_INDONESIAN", "LANGID_INUKTITUT", "LANGID_ITALIAN",
"LANGID_JAPANESE", "LANGID_KANNADA", "LANGID_KANURI", "LANGID_KASHMIRI",
"LANGID_KAZAKH", "LANGID_KHMER", "LANGID_KIRGHIZ", "LANGID_KONKANI",
"LANGID_KOREAN", "LANGID_KYRGYZ", "LANGID_LANGUAGE_NONE", "LANGID_LAO",
"LANGID_LATIN", "LANGID_LATVIAN", "LANGID_LITHUANIAN",
"LANGID_MACEDONIAN_FYROM", "LANGID_MALAYALAM", "LANGID_MALAYSIAN",
"LANGID_MALAY_BRUNEI_DARUSSALAM", "LANGID_MALTESE", "LANGID_MANIPURI",
"LANGID_MARATHI", "LANGID_MEXICAN_SPANISH", "LANGID_MONGOLIAN",
"LANGID_NEPALI", "LANGID_NORWEGIAN_BOKMOL", "LANGID_NORWEGIAN_NYNORSK",
"LANGID_NO_PROOFING", "LANGID_ORIYA", "LANGID_OROMO", "LANGID_PASHTO",
"LANGID_PERSIAN", "LANGID_POLISH", "LANGID_PORTUGUESE",
"LANGID_PORTUGUESE_BRAZIL", "LANGID_PUNJABI", "LANGID_RHAETO_ROMANIC",
"LANGID_ROMANIAN", "LANGID_ROMANIAN_MOLDOVA", "LANGID_RUSSIAN",
"LANGID_RUSSIAN_MOLDOVA", "LANGID_SAMI_LAPPISH", "LANGID_SANSKRIT",
"LANGID_SERBIAN_CYRILLIC", "LANGID_SERBIAN_LATIN", "LANGID_SESOTHO",
"LANGID_SIMPLIFIED_CHINESE", "LANGID_SINDHI", "LANGID_SINDHI_PAKISTAN",
"LANGID_SINHALESE", "LANGID_SLOVAK", "LANGID_SLOVENIAN", "LANGID_SOMALI",
"LANGID_SORBIAN", "LANGID_SPANISH", "LANGID_SPANISH_ARGENTINA",
"LANGID_SPANISH_BOLIVIA", "LANGID_SPANISH_CHILE",
"LANGID_SPANISH_COLOMBIA", "LANGID_SPANISH_COSTA_RICA",
"LANGID_SPANISH_DOMINICAN_REPUBLIC", "LANGID_SPANISH_ECUADOR",
"LANGID_SPANISH_EL_SALVADOR", "LANGID_SPANISH_GUATEMALA",
"LANGID_SPANISH_HONDURAS", "LANGID_SPANISH_MODERN_SORT",
"LANGID_SPANISH_NICARAGUA", "LANGID_SPANISH_PANAMA",
"LANGID_SPANISH_PARAGUAY", "LANGID_SPANISH_PERU",
"LANGID_SPANISH_PUERTO_RICO", "LANGID_SPANISH_URUGUAY",
"LANGID_SPANISH_VENEZUELA", "LANGID_SUTU", "LANGID_SWAHILI",
"LANGID_SWEDISH", "LANGID_SWEDISH_FINLAND", "LANGID_SWISS_FRENCH",
"LANGID_SWISS_GERMAN", "LANGID_SWISS_ITALIAN", "LANGID_SYRIAC",
"LANGID_TAJIK", "LANGID_TAMAZIGHT", "LANGID_TAMAZIGHT_LATIN",
"LANGID_TAMIL", "LANGID_TATAR", "LANGID_TELUGU", "LANGID_THAI",
"LANGID_TIBETAN", "LANGID_TIGRIGNA_ERITREA", "LANGID_TIGRIGNA_ETHIOPIC",
"LANGID_TRADITIONAL_CHINESE", "LANGID_TSONGA", "LANGID_TSWANA",
"LANGID_TURKISH", "LANGID_TURKMEN", "LANGID_UKRAINIAN", "LANGID_URDU",
"LANGID_UZBEK_CYRILLIC", "LANGID_UZBEK_LATIN", "LANGID_VENDA",
"LANGID_VIETNAMESE", "LANGID_WELSH", "LANGID_XHOSA", "LANGID_YI",
"LANGID_YIDDISH", "LANGID_YORUBA", "LANGID_ZULU",
"lang_id_names"
]
LANGID_AFRIKAANS = 1078
LANGID_ALBANIAN = 1052
LANGID_AMHARIC = 1118
LANGID_ARABIC = 1025
LANGID_ARABIC_ALGERIA = 5121
LANGID_ARABIC_BAHRAIN = 15361
LANGID_ARABIC_EGYPT = 3073
LANGID_ARABIC_IRAQ = 2049
LANGID_ARABIC_JORDAN = 11265
LANGID_ARABIC_KUWAIT = 13313
LANGID_ARABIC_LEBANON = 12289
LANGID_ARABIC_LIBYA = 4097
LANGID_ARABIC_MOROCCO = 6145
LANGID_ARABIC_OMAN = 8193
LANGID_ARABIC_QATAR = 16385
LANGID_ARABIC_SYRIA = 10241
LANGID_ARABIC_TUNISIA = 7169
LANGID_ARABIC_UAE = 14337
LANGID_ARABIC_YEMEN = 9217
LANGID_ARMENIAN = 1067
LANGID_ASSAMESE = 1101
LANGID_AZERI_CYRILLIC = 2092
LANGID_AZERI_LATIN = 1068
LANGID_BASQUE = 1069
LANGID_BELGIAN_DUTCH = 2067
LANGID_BELGIAN_FRENCH = 2060
LANGID_BENGALI = 1093
LANGID_BULGARIAN = 1026
LANGID_BURMESE = 1109
LANGID_BYELORUSSIAN = 1059
LANGID_CATALAN = 1027
LANGID_CHEROKEE = 1116
LANGID_CHINESE_HONG_KONG_SAR = 3076
LANGID_CHINESE_MACAO_SAR = 5124
LANGID_CHINESE_SINGAPORE = 4100
LANGID_CROATIAN = 1050
LANGID_CZECH = 1029
LANGID_DANISH = 1030
LANGID_DIVEHI = 1125
LANGID_DUTCH = 1043
LANGID_EDO = 1126
LANGID_ENGLISH_AUS = 3081
LANGID_ENGLISH_BELIZE = 10249
LANGID_ENGLISH_CANADIAN = 4105
LANGID_ENGLISH_CARIBBEAN = 9225
LANGID_ENGLISH_INDONESIA = 14345
LANGID_ENGLISH_IRELAND = 6153
LANGID_ENGLISH_JAMAICA = 8201
LANGID_ENGLISH_NEW_ZEALAND = 5129
LANGID_ENGLISH_PHILIPPINES = 13321
LANGID_ENGLISH_SOUTH_AFRICA = 7177
LANGID_ENGLISH_TRINIDAD_TOBAGO = 11273
LANGID_ENGLISH_UK = 2057
LANGID_ENGLISH_US = 1033
LANGID_ENGLISH_ZIMBABWE = 12297
LANGID_ESTONIAN = 1061
LANGID_FAEROESE = 1080
LANGID_FILIPINO = 1124
LANGID_FINNISH = 1035
LANGID_FRENCH = 1036
LANGID_FRENCH_CAMEROON = 11276
LANGID_FRENCH_CANADIAN = 3084
LANGID_FRENCH_CONGO_D_R_C = 9228
LANGID_FRENCH_COTED_IVOIRE = 12300
LANGID_FRENCH_HAITI = 15372
LANGID_FRENCH_LUXEMBOURG = 5132
LANGID_FRENCH_MALI = 13324
LANGID_FRENCH_MONACO = 6156
LANGID_FRENCH_MOROCCO = 14348
LANGID_FRENCH_REUNION = 8204
LANGID_FRENCH_SENEGAL = 10252
LANGID_FRENCH_WEST_INDIES = 7180
LANGID_FRISIAN_NETHERLANDS = 1122
LANGID_FULFULDE = 1127
LANGID_GAELIC_IRELAND = 2108
LANGID_GAELIC_SCOTLAND = 1084
LANGID_GALICIAN = 1110
LANGID_GEORGIAN = 1079
LANGID_GERMAN = 1031
LANGID_GERMAN_AUSTRIA = 3079
LANGID_GERMAN_LIECHTENSTEIN = 5127
LANGID_GERMAN_LUXEMBOURG = 4103
LANGID_GREEK = 1032
LANGID_GUARANI = 1140
LANGID_GUJARATI = 1095
LANGID_HAUSA = 1128
LANGID_HAWAIIAN = 1141
LANGID_HEBREW = 1037
LANGID_HINDI = 1081
LANGID_HUNGARIAN = 1038
LANGID_IBIBIO = 1129
LANGID_ICELANDIC = 1039
LANGID_IGBO = 1136
LANGID_INDONESIAN = 1057
LANGID_INUKTITUT = 1117
LANGID_ITALIAN = 1040
LANGID_JAPANESE = 1041
LANGID_KANNADA = 1099
LANGID_KANURI = 1137
LANGID_KASHMIRI = 1120
LANGID_KAZAKH = 1087
LANGID_KHMER = 1107
LANGID_KIRGHIZ = 1088
LANGID_KONKANI = 1111
LANGID_KOREAN = 1042
LANGID_KYRGYZ = 1088
LANGID_LANGUAGE_NONE = 0
LANGID_LAO = 1108
LANGID_LATIN = 1142
LANGID_LATVIAN = 1062
LANGID_LITHUANIAN = 1063
LANGID_MACEDONIAN_FYROM = 1071
LANGID_MALAYALAM = 1100
LANGID_MALAY_BRUNEI_DARUSSALAM = 2110
LANGID_MALAYSIAN = 1086
LANGID_MALTESE = 1082
LANGID_MANIPURI = 1112
LANGID_MARATHI = 1102
LANGID_MEXICAN_SPANISH = 2058
LANGID_MONGOLIAN = 1104
LANGID_NEPALI = 1121
LANGID_NO_PROOFING = 1024
LANGID_NORWEGIAN_BOKMOL = 1044
LANGID_NORWEGIAN_NYNORSK = 2068
LANGID_ORIYA = 1096
LANGID_OROMO = 1138
LANGID_PASHTO = 1123
LANGID_PERSIAN = 1065
LANGID_POLISH = 1045
LANGID_PORTUGUESE = 2070
LANGID_PORTUGUESE_BRAZIL = 1046
LANGID_PUNJABI = 1094
LANGID_RHAETO_ROMANIC = 1047
LANGID_ROMANIAN = 1048
LANGID_ROMANIAN_MOLDOVA = 2072
LANGID_RUSSIAN = 1049
LANGID_RUSSIAN_MOLDOVA = 2073
LANGID_SAMI_LAPPISH = 1083
LANGID_SANSKRIT = 1103
LANGID_SERBIAN_CYRILLIC = 3098
LANGID_SERBIAN_LATIN = 2074
LANGID_SESOTHO = 1072
LANGID_SIMPLIFIED_CHINESE = 2052
LANGID_SINDHI = 1113
LANGID_SINDHI_PAKISTAN = 2137
LANGID_SINHALESE = 1115
LANGID_SLOVAK = 1051
LANGID_SLOVENIAN = 1060
LANGID_SOMALI = 1143
LANGID_SORBIAN = 1070
LANGID_SPANISH = 1034
LANGID_SPANISH_ARGENTINA = 11274
LANGID_SPANISH_BOLIVIA = 16394
LANGID_SPANISH_CHILE = 13322
LANGID_SPANISH_COLOMBIA = 9226
LANGID_SPANISH_COSTA_RICA = 5130
LANGID_SPANISH_DOMINICAN_REPUBLIC = 7178
LANGID_SPANISH_ECUADOR = 12298
LANGID_SPANISH_EL_SALVADOR = 17418
LANGID_SPANISH_GUATEMALA = 4106
LANGID_SPANISH_HONDURAS = 18442
LANGID_SPANISH_MODERN_SORT = 3082
LANGID_SPANISH_NICARAGUA = 19466
LANGID_SPANISH_PANAMA = 6154
LANGID_SPANISH_PARAGUAY = 15370
LANGID_SPANISH_PERU = 10250
LANGID_SPANISH_PUERTO_RICO = 20490
LANGID_SPANISH_URUGUAY = 14346
LANGID_SPANISH_VENEZUELA = 8202
LANGID_SUTU = 1072
LANGID_SWAHILI = 1089
LANGID_SWEDISH = 1053
LANGID_SWEDISH_FINLAND = 2077
LANGID_SWISS_FRENCH = 4108
LANGID_SWISS_GERMAN = 2055
LANGID_SWISS_ITALIAN = 2064
LANGID_SYRIAC = 1114
LANGID_TAJIK = 1064
LANGID_TAMAZIGHT = 1119
LANGID_TAMAZIGHT_LATIN = 2143
LANGID_TAMIL = 1097
LANGID_TATAR = 1092
LANGID_TELUGU = 1098
LANGID_THAI = 1054
LANGID_TIBETAN = 1105
LANGID_TIGRIGNA_ERITREA = 2163
LANGID_TIGRIGNA_ETHIOPIC = 1139
LANGID_TRADITIONAL_CHINESE = 1028
LANGID_TSONGA = 1073
LANGID_TSWANA = 1074
LANGID_TURKISH = 1055
LANGID_TURKMEN = 1090
LANGID_UKRAINIAN = 1058
LANGID_URDU = 1056
LANGID_UZBEK_CYRILLIC = 2115
LANGID_UZBEK_LATIN = 1091
LANGID_VENDA = 1075
LANGID_VIETNAMESE = 1066
LANGID_WELSH = 1106
LANGID_XHOSA = 1076
LANGID_YI = 1144
LANGID_YIDDISH = 1085
LANGID_YORUBA = 1130
LANGID_ZULU = 1077
lang_id_names = {
LANGID_AFRIKAANS: "African",
LANGID_ALBANIAN: "Albanian",
LANGID_AMHARIC: "Amharic",
LANGID_ARABIC: "Arabic",
LANGID_ARABIC_ALGERIA: "Arabic Algerian",
LANGID_ARABIC_BAHRAIN: "Arabic Bahraini",
LANGID_ARABIC_EGYPT: "Arabic Egyptian",
LANGID_ARABIC_IRAQ: "Arabic Iraqi",
LANGID_ARABIC_JORDAN: "Arabic Jordanian",
LANGID_ARABIC_KUWAIT: "Arabic Kuwaiti",
LANGID_ARABIC_LEBANON: "Arabic Lebanese",
LANGID_ARABIC_LIBYA: "Arabic Libyan",
LANGID_ARABIC_MOROCCO: "Arabic Moroccan",
LANGID_ARABIC_OMAN: "Arabic Omani",
LANGID_ARABIC_QATAR: "Arabic Qatari",
LANGID_ARABIC_SYRIA: "Arabic Syrian",
LANGID_ARABIC_TUNISIA: "Arabic Tunisian",
LANGID_ARABIC_UAE: "Arabic United Arab Emirates",
LANGID_ARABIC_YEMEN: "Arabic Yemeni",
LANGID_ARMENIAN: "Armenian",
LANGID_ASSAMESE: "Assamese",
LANGID_AZERI_CYRILLIC: "Azeri Cyrillic",
LANGID_AZERI_LATIN: "Azeri Latin",
LANGID_BASQUE: "Basque",
LANGID_BELGIAN_DUTCH: "Belgian Dutch",
LANGID_BELGIAN_FRENCH: "Belgian French",
LANGID_BENGALI: "Bengali",
LANGID_BULGARIAN: "Bulgarian",
LANGID_BURMESE: "Burmese",
LANGID_BYELORUSSIAN: "Byelorussian",
LANGID_CATALAN: "Catalan",
LANGID_CHEROKEE: "Cherokee",
LANGID_CHINESE_HONG_KONG_SAR: "Chinese Hong Kong SAR",
LANGID_CHINESE_MACAO_SAR: "Chinese Macao SAR",
LANGID_CHINESE_SINGAPORE: "Chinese Singapore",
LANGID_CROATIAN: "Croatian",
LANGID_CZECH: "Czech",
LANGID_DANISH: "Danish",
LANGID_DIVEHI: "Divehi",
LANGID_DUTCH: "Dutch",
LANGID_EDO: "Edo",
LANGID_ENGLISH_AUS: "Australian English",
LANGID_ENGLISH_BELIZE: "Belize English",
LANGID_ENGLISH_CANADIAN: "Canadian English",
LANGID_ENGLISH_CARIBBEAN: "Caribbean English",
LANGID_ENGLISH_INDONESIA: "Indonesian English",
LANGID_ENGLISH_IRELAND: "Irish English",
LANGID_ENGLISH_JAMAICA: "Jamaican English",
LANGID_ENGLISH_NEW_ZEALAND: "New Zealand English",
LANGID_ENGLISH_PHILIPPINES: "Filipino English",
LANGID_ENGLISH_SOUTH_AFRICA: "South African English",
LANGID_ENGLISH_TRINIDAD_TOBAGO: "Tobago Trinidad English",
LANGID_ENGLISH_UK: "United Kingdom English",
LANGID_ENGLISH_US: "United States English",
LANGID_ENGLISH_ZIMBABWE: "Zimbabwe English",
LANGID_ESTONIAN: "Estonian",
LANGID_FAEROESE: "Faeroese",
LANGID_FILIPINO: "Filipino",
LANGID_FINNISH: "Finnish",
LANGID_FRENCH: "French",
LANGID_FRENCH_CAMEROON: "French Cameroon",
LANGID_FRENCH_CANADIAN: "French Canadian",
LANGID_FRENCH_CONGO_D_R_C: "French (Congo (DRC))",
LANGID_FRENCH_COTED_IVOIRE: "French Cote d'Ivoire",
LANGID_FRENCH_HAITI: "French Haiti",
LANGID_FRENCH_LUXEMBOURG: "French Luxembourg",
LANGID_FRENCH_MALI: "French Mali",
LANGID_FRENCH_MONACO: "French Monaco",
LANGID_FRENCH_MOROCCO: "French Morocco",
LANGID_FRENCH_REUNION: "French Reunion",
LANGID_FRENCH_SENEGAL: "French Senegal",
LANGID_FRENCH_WEST_INDIES: "French West Indies",
LANGID_FRISIAN_NETHERLANDS: "Frisian Netherlands",
LANGID_FULFULDE: "Fulfulde",
LANGID_GAELIC_IRELAND: "Gaelic Irish",
LANGID_GAELIC_SCOTLAND: "Gaelic Scottish",
LANGID_GALICIAN: "Galician",
LANGID_GEORGIAN: "Georgian",
LANGID_GERMAN: "German",
LANGID_GERMAN_AUSTRIA: "German Austrian",
LANGID_GERMAN_LIECHTENSTEIN: "German Liechtenstein",
LANGID_GERMAN_LUXEMBOURG: "German Luxembourg",
LANGID_GREEK: "Greek",
LANGID_GUARANI: "Guarani",
LANGID_GUJARATI: "Gujarati",
LANGID_HAUSA: "Hausa",
LANGID_HAWAIIAN: "Hawaiian",
LANGID_HEBREW: "Hebrew",
LANGID_HINDI: "Hindi",
LANGID_HUNGARIAN: "Hungarian",
LANGID_IBIBIO: "Ibibio",
LANGID_ICELANDIC: "Icelandic",
LANGID_IGBO: "Igbo",
LANGID_INDONESIAN: "Indonesian",
LANGID_INUKTITUT: "Inuktitut",
LANGID_ITALIAN: "Italian",
LANGID_JAPANESE: "Japanese",
LANGID_KANNADA: "Kannada",
LANGID_KANURI: "Kanuri",
LANGID_KASHMIRI: "Kashmiri",
LANGID_KAZAKH: "Kazakh",
LANGID_KHMER: "Khmer",
LANGID_KIRGHIZ: "Kirghiz",
LANGID_KONKANI: "Konkani",
LANGID_KOREAN: "Korean",
LANGID_KYRGYZ: "Kyrgyz",
LANGID_LANGUAGE_NONE: "No specified",
LANGID_LAO: "Lao",
LANGID_LATIN: "Latin",
LANGID_LATVIAN: "Latvian",
LANGID_LITHUANIAN: "Lithuanian",
LANGID_MACEDONIAN_FYROM: "Macedonian (FYROM)",
LANGID_MALAYALAM: "Malayalam",
LANGID_MALAY_BRUNEI_DARUSSALAM: "Malay Brunei Darussalam",
LANGID_MALAYSIAN: "Malaysian",
LANGID_MALTESE: "Maltese",
LANGID_MANIPURI: "Manipuri",
LANGID_MARATHI: "Marathi",
LANGID_MEXICAN_SPANISH: "Mexican Spanish",
LANGID_MONGOLIAN: "Mongolian",
LANGID_NEPALI: "Nepali",
LANGID_NO_PROOFING: "Disables proofing",
LANGID_NORWEGIAN_BOKMOL: "Norwegian Bokmol",
LANGID_NORWEGIAN_NYNORSK: "Norwegian Nynorsk",
LANGID_ORIYA: "Oriya",
LANGID_OROMO: "Oromo",
LANGID_PASHTO: "Pashto",
LANGID_PERSIAN: "Persian",
LANGID_POLISH: "Polish",
LANGID_PORTUGUESE: "Portuguese",
LANGID_PORTUGUESE_BRAZIL: "Portuguese (Brazil)",
LANGID_PUNJABI: "Punjabi",
LANGID_RHAETO_ROMANIC: "Rhaeto Romanic",
LANGID_ROMANIAN: "Romanian",
LANGID_ROMANIAN_MOLDOVA: "Romanian Moldova",
LANGID_RUSSIAN: "Russian",
LANGID_RUSSIAN_MOLDOVA: "Russian Moldova",
LANGID_SAMI_LAPPISH: "Sami Lappish",
LANGID_SANSKRIT: "Sanskrit",
LANGID_SERBIAN_CYRILLIC: "Serbian Cyrillic",
LANGID_SERBIAN_LATIN: "Serbian Latin",
LANGID_SESOTHO: "Sesotho",
LANGID_SIMPLIFIED_CHINESE: "Simplified Chinese",
LANGID_SINDHI: "Sindhi",
LANGID_SINDHI_PAKISTAN: "Sindhi (Pakistan)",
LANGID_SINHALESE: "Sinhalese",
LANGID_SLOVAK: "Slovakian",
LANGID_SLOVENIAN: "Slovenian",
LANGID_SOMALI: "Somali",
LANGID_SORBIAN: "Sorbian",
LANGID_SPANISH: "Spanish",
LANGID_SPANISH_ARGENTINA: "Spanish Argentina",
LANGID_SPANISH_BOLIVIA: "Spanish Bolivian",
LANGID_SPANISH_CHILE: "Spanish Chilean",
LANGID_SPANISH_COLOMBIA: "Spanish Colombian",
LANGID_SPANISH_COSTA_RICA: "Spanish Costa Rican",
LANGID_SPANISH_DOMINICAN_REPUBLIC: "Spanish Dominican Republic",
LANGID_SPANISH_ECUADOR: "Spanish Ecuadorian",
LANGID_SPANISH_EL_SALVADOR: "Spanish El Salvadorian",
LANGID_SPANISH_GUATEMALA: "Spanish Guatemala",
LANGID_SPANISH_HONDURAS: "Spanish Honduran",
LANGID_SPANISH_MODERN_SORT: "Spanish Modern Sort",
LANGID_SPANISH_NICARAGUA: "Spanish Nicaraguan",
LANGID_SPANISH_PANAMA: "Spanish Panamanian",
LANGID_SPANISH_PARAGUAY: "Spanish Paraguayan",
LANGID_SPANISH_PERU: "Spanish Peruvian",
LANGID_SPANISH_PUERTO_RICO: "Spanish Puerto Rican",
LANGID_SPANISH_URUGUAY: "Spanish Uruguayan",
LANGID_SPANISH_VENEZUELA: "Spanish Venezuelan",
LANGID_SUTU: "Sutu",
LANGID_SWAHILI: "Swahili",
LANGID_SWEDISH: "Swedish",
LANGID_SWEDISH_FINLAND: "Swedish Finnish",
LANGID_SWISS_FRENCH: "Swiss French",
LANGID_SWISS_GERMAN: "Swiss German",
LANGID_SWISS_ITALIAN: "Swiss Italian",
LANGID_SYRIAC: "Syriac",
LANGID_TAJIK: "Tajik",
LANGID_TAMAZIGHT: "Tamazight",
LANGID_TAMAZIGHT_LATIN: "Tamazight Latin",
LANGID_TAMIL: "Tamil",
LANGID_TATAR: "Tatar",
LANGID_TELUGU: "Telugu",
LANGID_THAI: "Thai",
LANGID_TIBETAN: "Tibetan",
LANGID_TIGRIGNA_ERITREA: "Tigrigna Eritrea",
LANGID_TIGRIGNA_ETHIOPIC: "Tigrigna Ethiopic",
LANGID_TRADITIONAL_CHINESE: "Traditional Chinese",
LANGID_TSONGA: "Tsonga",
LANGID_TSWANA: "Tswana",
LANGID_TURKISH: "Turkish",
LANGID_TURKMEN: "Turkmen",
LANGID_UKRAINIAN: "Ukrainian",
LANGID_URDU: "Urdu",
LANGID_UZBEK_CYRILLIC: "Uzbek Cyrillic",
LANGID_UZBEK_LATIN: "Uzbek Latin",
LANGID_VENDA: "Venda",
LANGID_VIETNAMESE: "Vietnamese",
LANGID_WELSH: "Welsh",
LANGID_XHOSA: "Xhosa",
LANGID_YI: "Yi",
LANGID_YIDDISH: "Yiddish",
LANGID_YORUBA: "Yoruba",
LANGID_ZULU: "Zulu"
}
| mit | 01851bfb53ddca3615ef9abe86173c65 | 36.368317 | 79 | 0.721371 | 2.224567 | false | false | false | false |
alphagov/notifications-api | migrations/env.py | 1 | 2554 | from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
from pathlib import Path
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option("sqlalchemy.url", current_app.config.get("SQLALCHEMY_DATABASE_URI"))
target_metadata = current_app.extensions["migrate"].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool
)
connection = engine.connect()
context.configure(connection=connection, target_metadata=target_metadata, compare_type=True)
try:
with context.begin_transaction():
context.run_migrations()
# if we're running on the main db (as opposed to the test db)
if engine.url.database == 'notification_api':
with open(Path(__file__).parent / ".current-alembic-head", "w") as f:
# write the current head to `.current-alembic-head`. This will prevent conflicting migrations
# being merged at the same time and breaking the build.
head = context.get_head_revision()
f.write(head)
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | b97cc045053c06c23097725a453da406 | 30.146341 | 109 | 0.696163 | 4.041139 | false | true | false | false |
alphagov/notifications-api | migrations/versions/0260_remove_dvla_organisation.py | 1 | 4361 | """
Revision ID: 0260_remove_dvla_organisation
Revises: 0259_remove_service_postage
Create Date: 2019-02-12 17:39:02.517571
"""
import sqlalchemy as sa
from alembic import op
revision = "0260_remove_dvla_organisation"
down_revision = "0259_remove_service_postage"
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("services", "dvla_organisation_id")
op.drop_column("services_history", "dvla_organisation_id")
op.drop_table("dvla_organisation")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"dvla_organisation",
sa.Column("id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("name", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column("filename", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="dvla_organisation_pkey"),
)
# can't repopulate the services, but we can put the static data back in dvla_organisation
op.execute(
"""
INSERT INTO dvla_organisation VALUES
('001', 'HM Government', 'hm-government'),
('002', 'Office of the Public Guardian', 'opg'),
('003', 'Department for Work and Pensions', 'dwp'),
('004', 'Government Equalities Office', 'geo'),
('005', 'Companies House', 'ch'),
('006', 'DWP (Welsh)', 'dwp-welsh'),
('007', 'Department for Communities', 'dept-for-communities'),
('008', 'Marine Management Organisation', 'mmo'),
('009', 'HM Passport Office', 'hmpo'),
('010', 'Disclosure and Barring Service', 'dbs'),
('500', 'Land Registry', 'hm-land-registry'),
('501', 'Environment Agency (PDF letters ONLY)', 'ea'),
('502', 'Welsh Revenue Authority', 'wra'),
('503', 'East Riding of Yorkshire Council', 'eryc'),
('504', 'Rother District Council', 'rother'),
('505', 'CADW', 'cadw'),
('506', 'Tyne and Wear Fire and Rescue Service', 'twfrs'),
('507', 'Thames Valley Police', 'thames-valley-police'),
('508', 'Ofgem', 'ofgem'),
('509', 'Hackney Council', 'hackney'),
('510', 'Pension Wise', 'pension-wise'),
('511', 'NHS', 'nhs'),
('512', 'Vale of Glamorgan', 'vale-of-glamorgan'),
('513', 'Rother and Wealden', 'wdc'),
('514', 'Brighton and Hove city council', 'brighton-hove'),
('515', 'ACAS', 'acas'),
('516', 'Worcestershire County Council', 'worcestershire'),
('517', 'Buckinghamshire County Council', 'buckinghamshire'),
('518', 'Bournemouth Borough Council', 'bournemouth'),
('519', 'Hampshire County Council', 'hants'),
('520', 'Neath Port Talbot Council', 'npt'),
('521', 'North Somerset Council', 'north-somerset'),
('522', 'Anglesey Council', 'anglesey'),
('523', 'Angus Council', 'angus'),
('524', 'Cheshire East Council', 'cheshire-east'),
('525', 'Newham Council', 'newham'),
('526', 'Warwickshire Council', 'warwickshire'),
('527', 'Natural Resources Wales', 'natural-resources-wales'),
('528', 'North Yorkshire Council', 'north-yorkshire'),
('529', 'Redbridge Council', 'redbridge'),
('530', 'Wigan Council', 'wigan')
"""
)
op.add_column(
"services_history",
sa.Column(
"dvla_organisation_id",
sa.VARCHAR(),
server_default=sa.text("'001'::character varying"),
autoincrement=False,
nullable=False,
),
)
op.add_column(
"services",
sa.Column(
"dvla_organisation_id",
sa.VARCHAR(),
server_default=sa.text("'001'::character varying"),
autoincrement=False,
nullable=False,
),
)
op.create_index(
"ix_services_history_dvla_organisation_id", "services_history", ["dvla_organisation_id"], unique=False
)
op.create_foreign_key(
"services_dvla_organisation_id_fkey", "services", "dvla_organisation", ["dvla_organisation_id"], ["id"]
)
op.create_index("ix_services_dvla_organisation_id", "services", ["dvla_organisation_id"], unique=False)
# ### end Alembic commands ###
| mit | aa93e6c7d85b19f21bf12634fa5bbc88 | 39.37963 | 111 | 0.584499 | 3.318874 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.