text stringlengths 0 1.05M | meta dict |
|---|---|
from foam.sfa.util.method import Method
from foam.sfa.trust.credential import Credential
from foam.sfa.util.parameter import Parameter
class Update(Method):
"""
Update an object in the registry. Currently, this only updates the
PLC information associated with the record. The SFA fields (name, type,
GID) are fixed.
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(dict, "Record dictionary to be updated"),
Parameter(str, "Credential string"),
]
returns = Parameter(int, "1 if successful")
def call(self, record_dict, creds):
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "update")
# verify permissions
hrn = record_dict.get('hrn', '')
self.api.auth.verify_object_permission(hrn)
# log
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.Update(self.api, record_dict)
| {
"repo_name": "dana-i2cat/felix",
"path": "ofam/src/src/foam/sfa/methods/Update.py",
"copies": "3",
"size": "1322",
"license": "apache-2.0",
"hash": -5294919098851315000,
"line_mean": 31.243902439,
"line_max": 143,
"alpha_frac": 0.6512859304,
"autogenerated": false,
"ratio": 3.820809248554913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.052492245783022785,
"num_lines": 41
} |
from foam.sfa.util.sfalogging import logger
from foam.sfa.util.xml import XpathFilter
from foam.sfa.util.xrn import Xrn
from foam.sfa.rspecs.elements.element import Element
from foam.sfa.rspecs.elements.node import Node
from foam.sfa.rspecs.elements.sliver import Sliver
from foam.sfa.rspecs.elements.location import Location
from foam.sfa.rspecs.elements.hardware_type import HardwareType
from foam.sfa.rspecs.elements.disk_image import DiskImage
from foam.sfa.rspecs.elements.interface import Interface
from foam.sfa.rspecs.elements.bwlimit import BWlimit
from foam.sfa.rspecs.elements.pltag import PLTag
from foam.sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
from foam.sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
from foam.sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from foam.sfa.rspecs.elements.lease import Lease
from foam.sfa.rspecs.elements.channel import Channel
class NITOSv1Lease:
@staticmethod
def add_leases(xml, leases, channels):
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
elif len(leases) > 0:
network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
network_elem = xml.add_element('network', name = network_urn)
else:
network_elem = xml
# group the leases by slice and timeslots
grouped_leases = []
while leases:
slice_id = leases[0]['slice_id']
start_time = leases[0]['start_time']
duration = leases[0]['duration']
group = []
for lease in leases:
if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
group.append(lease)
grouped_leases.append(group)
for lease1 in group:
leases.remove(lease1)
lease_elems = []
for lease in grouped_leases:
#lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
lease_fields = ['slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
# add nodes of this lease
for node in lease:
lease_elem.add_instance('node', node, ['component_id'])
# add reserved channels of this lease
#channels = [{'channel_id': 1}, {'channel_id': 2}]
for channel in channels:
if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
lease_elem.add_instance('channel', channel, ['channel_num'])
@staticmethod
def get_leases(xml, filter={}):
xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
lease_elems = xml.xpath(xpath)
return NITOSv1Lease.get_lease_objs(lease_elems)
@staticmethod
def get_lease_objs(lease_elems):
leases = []
channels = []
for lease_elem in lease_elems:
#get nodes
node_elems = lease_elem.xpath('./default:node | ./node')
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
leases.append(lease)
#get channels
channel_elems = lease_elem.xpath('./default:channel | ./channel')
for channel_elem in channel_elems:
channel = Channel(channel_elem.attrib, channel_elem)
channel['slice_id'] = lease_elem.attrib['slice_id']
channel['start_time'] = lease_elem.attrib['start_time']
channel['duration'] = lease_elem.attrib['duration']
channel['channel_num'] = channel_elem.attrib['channel_num']
channels.append(channel)
return (leases, channels)
| {
"repo_name": "ict-felix/stack",
"path": "ofam/src/src/foam/sfa/rspecs/elements/versions/nitosv1Lease.py",
"copies": "3",
"size": "4424",
"license": "apache-2.0",
"hash": -4407758054690855400,
"line_mean": 41.9514563107,
"line_max": 164,
"alpha_frac": 0.601039783,
"autogenerated": false,
"ratio": 3.7683134582623508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023792810764769617,
"num_lines": 103
} |
from foam.sfa.util.xrn import urn_to_hrn
from foam.sfa.util.method import Method
from foam.sfa.util.foam.sfa.ablesRuntime import run_foam.sfa.ables
from foam.sfa.trust.credential import Credential
from foam.sfa.util.parameter import Parameter, Mixed
class GetTicket(Method):
"""
Retrieve a ticket. This operation is currently implemented on PLC
only (see SFA, engineering decisions); it is not implemented on
components.
The ticket is filled in with information from the PLC database. This
information includes resources, and attributes such as user keys and
initscripts.
@param cred credential string
@param name name of the slice to retrieve a ticket for (hrn or urn)
@param rspec resource specification dictionary
@return the string representation of a ticket object
"""
interfaces = ['aggregate', 'slicemgr']
accepts = [
Parameter(str, "Human readable name of slice to retrive a ticket for (hrn or urn)"),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Parameter(str, "Resource specification (rspec)"),
Parameter(type([]), "List of user information"),
Parameter(dict, "Options")
]
returns = Parameter(str, "String representation of a ticket object")
def call(self, xrn, creds, rspec, users, options):
hrn, type = urn_to_hrn(xrn)
# Find the valid credentials
valid_creds = self.api.auth.checkCredentials(creds, 'getticket', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
#log the call
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# filter rspec through foam.sfa.ables
if self.api.interface in ['aggregate']:
chain_name = 'OUTGOING'
elif self.api.interface in ['slicemgr']:
chain_name = 'FORWARD-OUTGOING'
rspec = run_foam.sfa.ables(chain_name, hrn, origin_hrn, rspec)
# remove nodes that are not available at this interface from the rspec
return self.api.manager.GetTicket(self.api, xrn, creds, rspec, users, options)
| {
"repo_name": "ict-felix/stack",
"path": "ofam/src/src/foam/sfa/methods/GetTicket.py",
"copies": "3",
"size": "2287",
"license": "apache-2.0",
"hash": 596304858701821800,
"line_mean": 39.1228070175,
"line_max": 143,
"alpha_frac": 0.6655006559,
"autogenerated": false,
"ratio": 3.7801652892561983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02447256158211331,
"num_lines": 57
} |
from foam.sfa.util.xrn import urn_to_hrn
from foam.sfa.util.method import Method
from foam.sfa.trust.credential import Credential
from foam.sfa.util.parameter import Parameter, Mixed
class GetCredential(Method):
"""
Retrive a credential for an object
If cred == None then the behavior reverts to GetSelfCredential
@param hrn human readable name of object (hrn or urn)
@param cred credential object specifying rights of the caller
@param type type of object (user | slice | node | authority )
@return the string representation of a credential object
"""
interfaces = ['registry']
accepts = [
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Parameter(str, "Human readable name (hrn or urn)"),
Mixed(Parameter(str, "Record type"),
Parameter(None, "Type not specified")),
]
returns = Parameter(str, "String representation of a credential object")
def call(self, creds, xrn, type):
if type:
hrn = urn_to_hrn(xrn)[0]
else:
hrn, type = urn_to_hrn(xrn)
# check creds
valid_creds = self.api.auth.checkCredentials(creds, 'getcredential')
self.api.auth.verify_object_belongs_to_me(hrn)
#log the call
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.GetCredential(self.api, xrn, type, self.api.auth.client_gid.get_urn())
| {
"repo_name": "ict-felix/stack",
"path": "ofam/src/src/foam/sfa/methods/GetCredential.py",
"copies": "3",
"size": "1667",
"license": "apache-2.0",
"hash": -2468613239344310300,
"line_mean": 33.7291666667,
"line_max": 144,
"alpha_frac": 0.649670066,
"autogenerated": false,
"ratio": 3.5926724137931036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5742342479793103,
"avg_score": null,
"num_lines": null
} |
from foam.sfa.util.xrn import Xrn
from foam.sfa.rspecs.elements.element import Element
from foam.sfa.rspecs.elements.link import Link
from foam.sfa.rspecs.elements.interface import Interface
from foam.sfa.rspecs.elements.property import Property
class PGv2Link:
@staticmethod
def add_links(xml, links):
for link in links:
link_elem = xml.add_instance('link', link, ['component_name', 'component_id', 'client_id'])
# set component manager element
if 'component_manager' in link and link['component_manager']:
cm_element = link_elem.add_element('component_manager', name=link['component_manager'])
# set interface_ref elements
if link.get('interface1') and link.get('interface2'):
for if_ref in [link['interface1'], link['interface2']]:
link_elem.add_instance('interface_ref', if_ref, Interface.fields)
# set property elements
prop1 = link_elem.add_element('property', source_id = link['interface1']['component_id'],
dest_id = link['interface2']['component_id'], capacity=link['capacity'],
latency=link['latency'], packet_loss=link['packet_loss'])
prop2 = link_elem.add_element('property', source_id = link['interface2']['component_id'],
dest_id = link['interface1']['component_id'], capacity=link['capacity'],
latency=link['latency'], packet_loss=link['packet_loss'])
if link.get('type'):
type_elem = link_elem.add_element('link_type', name=link['type'])
@staticmethod
def get_links(xml):
links = []
link_elems = xml.xpath('//default:link | //link')
for link_elem in link_elems:
# set client_id, component_id, component_name
link = Link(link_elem.attrib, link_elem)
# set component manager
component_managers = link_elem.xpath('./default:component_manager | ./component_manager')
if len(component_managers) > 0 and 'name' in component_managers[0].attrib:
link['component_manager'] = component_managers[0].attrib['name']
# set link type
link_types = link_elem.xpath('./default:link_type | ./link_type')
if len(link_types) > 0 and 'name' in link_types[0].attrib:
link['type'] = link_types[0].attrib['name']
# get capacity, latency and packet_loss from first property
property_fields = ['capacity', 'latency', 'packet_loss']
property_elems = link_elem.xpath('./default:property | ./property')
if len(property_elems) > 0:
prop = property_elems[0]
for attrib in ['capacity', 'latency', 'packet_loss']:
if attrib in prop.attrib:
link[attrib] = prop.attrib[attrib]
# get interfaces
iface_elems = link_elem.xpath('./default:interface_ref | ./interface_ref')
interfaces = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
if len(interfaces) > 1:
link['interface1'] = interfaces[0]
link['interface2'] = interfaces[1]
links.append(link)
return links
@staticmethod
def add_link_requests(xml, link_tuples, append=False):
if not isinstance(link_tuples, set):
link_tuples = set(link_tuples)
available_links = PGv2Link.get_links(xml)
recently_added = []
for link in available_links:
if_name1 = Xrn(link['interface1']['component_id']).get_leaf()
if_name2 = Xrn(link['interface2']['component_id']).get_leaf()
requested_link = None
l_tup_1 = (if_name1, if_name2)
l_tup_2 = (if_name2, if_name1)
if link_tuples.issuperset([(if_name1, if_name2)]):
requested_link = (if_name1, if_name2)
elif link_tuples.issuperset([(if_name2, if_name2)]):
requested_link = (if_name2, if_name1)
if requested_link:
# add client id to link ane interface elements
link.element.set('client_id', link['component_name'])
link['interface1'].element.set('client_id', Xrn(link['interface1']['component_id']).get_leaf())
link['interface2'].element.set('client_id', Xrn(link['interface2']['component_id']).get_leaf())
recently_added.append(link['component_name'])
if not append:
# remove all links that don't have a client id
for link in PGv2Link.get_links(xml):
if not link['client_id'] or link['component_name'] not in recently_added:
parent = link.element.getparent()
parent.remove(link.element)
@staticmethod
def get_link_requests(xml):
link_requests = []
for link in PGv2Link.get_links(xml):
if link['client_id'] != None:
link_requests.append(link)
return link_requests
| {
"repo_name": "dana-i2cat/felix",
"path": "ofam/src/src/foam/sfa/rspecs/elements/versions/pgv2Link.py",
"copies": "3",
"size": "5284",
"license": "apache-2.0",
"hash": -7541183964658934000,
"line_mean": 49.8076923077,
"line_max": 112,
"alpha_frac": 0.5607494322,
"autogenerated": false,
"ratio": 4.105672105672106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6166421537872105,
"avg_score": null,
"num_lines": null
} |
from foam.sfa.util.xrn import Xrn
from foam.sfa.util.method import Method
from foam.sfa.trust.credential import Credential
from foam.sfa.util.parameter import Parameter, Mixed
class Remove(Method):
"""
Remove an object from the registry. If the object represents a PLC object,
then the PLC records will also be removed.
@param cred credential string
@param type record type
@param xrn human readable name of record to remove (hrn or urn)
@return 1 if successful, faults otherwise
"""
interfaces = ['registry']
accepts = [
Parameter(str, "Human readable name of slice to instantiate (hrn or urn)"),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Mixed(Parameter(str, "Record type"),
Parameter(None, "Type not specified")),
]
returns = Parameter(int, "1 if successful")
def call(self, xrn, creds, type):
xrn=Xrn(xrn,type=type)
# validate the cred
valid_creds = self.api.auth.checkCredentials(creds, "remove")
self.api.auth.verify_object_permission(xrn.get_hrn())
#log the call
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s"%(
self.api.interface, self.name, origin_hrn, xrn.get_urn()))
return self.api.manager.Remove(self.api, xrn)
| {
"repo_name": "ict-felix/stack",
"path": "ofam/src/src/foam/sfa/methods/Remove.py",
"copies": "3",
"size": "1508",
"license": "apache-2.0",
"hash": -4687097972822503000,
"line_mean": 33.2727272727,
"line_max": 95,
"alpha_frac": 0.6419098143,
"autogenerated": false,
"ratio": 3.6690997566909975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5811009570990998,
"avg_score": null,
"num_lines": null
} |
from foam.sfa.util.xrn import Xrn
from foam.sfa.util.xml import XmlElement
from foam.sfa.rspecs.elements.element import Element
from foam.sfa.rspecs.elements.sliver import Sliver
from foam.sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
from sfa.planetlab.plxrn import PlXrn
class NITOSv1Sliver:
@staticmethod
def add_slivers(xml, slivers):
if not slivers:
return
if not isinstance(slivers, list):
slivers = [slivers]
for sliver in slivers:
sliver_elem = xml.add_instance('sliver', sliver, ['name'])
tags = sliver.get('tags', [])
if tags:
for tag in tags:
NITOSv1Sliver.add_sliver_attribute(sliver_elem, tag['tagname'], tag['value'])
if sliver.get('sliver_id'):
name = PlXrn(xrn=sliver.get('sliver_id')).pl_slicename()
sliver_elem.set('name', name)
@staticmethod
def add_sliver_attribute(xml, name, value):
elem = xml.add_element(name)
elem.set_text(value)
@staticmethod
def get_sliver_attributes(xml):
attribs = []
for elem in xml.iterchildren():
if elem.tag not in Sliver.fields:
xml_element = XmlElement(elem, xml.namespaces)
instance = Element(fields=xml_element, element=elem)
instance['name'] = elem.tag
instance['value'] = elem.text
attribs.append(instance)
return attribs
@staticmethod
def get_slivers(xml, filter={}):
xpath = './default:sliver | ./sliver'
sliver_elems = xml.xpath(xpath)
slivers = []
for sliver_elem in sliver_elems:
sliver = Sliver(sliver_elem.attrib,sliver_elem)
if 'component_id' in xml.attrib:
sliver['component_id'] = xml.attrib['component_id']
sliver['tags'] = NITOSv1Sliver.get_sliver_attributes(sliver_elem)
slivers.append(sliver)
return slivers
| {
"repo_name": "dana-i2cat/felix",
"path": "ofam/src/src/foam/sfa/rspecs/elements/versions/nitosv1Sliver.py",
"copies": "3",
"size": "2071",
"license": "apache-2.0",
"hash": 7859196751474327000,
"line_mean": 35.3333333333,
"line_max": 97,
"alpha_frac": 0.5861902463,
"autogenerated": false,
"ratio": 3.5768566493955096,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5663046895695509,
"avg_score": null,
"num_lines": null
} |
from foam.sfa.util.xrn import Xrn
from foam.sfa.util.xml import XpathFilter
from foam.sfa.rspecs.elements.node import Node
from foam.sfa.rspecs.elements.sliver import Sliver
from foam.sfa.rspecs.elements.location import Location
from foam.sfa.rspecs.elements.hardware_type import HardwareType
from foam.sfa.rspecs.elements.disk_image import DiskImage
from foam.sfa.rspecs.elements.interface import Interface
from foam.sfa.rspecs.elements.bwlimit import BWlimit
from foam.sfa.rspecs.elements.pltag import PLTag
from foam.sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from foam.sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
from foam.sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
from sfa.planetlab.plxrn import xrn_to_hostname
class PGv2Node:
@staticmethod
def add_nodes(xml, nodes):
node_elems = []
for node in nodes:
node_fields = ['component_manager_id', 'component_id', 'client_id', 'sliver_id', 'exclusive']
node_elem = xml.add_instance('node', node, node_fields)
node_elems.append(node_elem)
# set component name
if node.get('component_id'):
component_name = xrn_to_hostname(node['component_id'])
node_elem.set('component_name', component_name)
# set hardware types
if node.get('hardware_types'):
for hardware_type in node.get('hardware_types', []):
node_elem.add_instance('hardware_type', hardware_type, HardwareType.fields)
# set location
if node.get('location'):
node_elem.add_instance('location', node['location'], Location.fields)
# set interfaces
PGv2Interface.add_interfaces(node_elem, node.get('interfaces'))
#if node.get('interfaces'):
# for interface in node.get('interfaces', []):
# node_elem.add_instance('interface', interface, ['component_id', 'client_id'])
# set available element
if node.get('boot_state'):
if node.get('boot_state').lower() == 'boot':
available_elem = node_elem.add_element('available', now='true')
else:
available_elem = node_elem.add_element('available', now='false')
# add services
PGv2Services.add_services(node_elem, node.get('services', []))
# add slivers
slivers = node.get('slivers', [])
if not slivers:
# we must still advertise the available sliver types
slivers = Sliver({'type': 'plab-vserver'})
# we must also advertise the available initscripts
slivers['tags'] = []
if node.get('pl_initscripts'):
for initscript in node.get('pl_initscripts', []):
slivers['tags'].append({'name': 'initscript', 'value': initscript['name']})
PGv2SliverType.add_slivers(node_elem, slivers)
return node_elems
@staticmethod
def get_nodes(xml, filter={}):
xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
node_elems = xml.xpath(xpath)
return PGv2Node.get_node_objs(node_elems)
@staticmethod
def get_nodes_with_slivers(xml, filter={}):
xpath = '//node[count(sliver_type)>0] | //default:node[count(default:sliver_type) > 0]'
node_elems = xml.xpath(xpath)
return PGv2Node.get_node_objs(node_elems)
@staticmethod
def get_node_objs(node_elems):
nodes = []
for node_elem in node_elems:
node = Node(node_elem.attrib, node_elem)
nodes.append(node)
if 'component_id' in node_elem.attrib:
node['authority_id'] = Xrn(node_elem.attrib['component_id']).get_authority_urn()
# get hardware types
hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
# get location
location_elems = node_elem.xpath('./default:location | ./location')
locations = [location_elem.get_instance(Location) for location_elem in location_elems]
if len(locations) > 0:
node['location'] = locations[0]
# get interfaces
iface_elems = node_elem.xpath('./default:interface | ./interface')
node['interfaces'] = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
# get services
node['services'] = PGv2Services.get_services(node_elem)
# get slivers
node['slivers'] = PGv2SliverType.get_slivers(node_elem)
available_elems = node_elem.xpath('./default:available | ./available')
if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
if available_elems[0].attrib.get('now', '').lower() == 'true':
node['boot_state'] = 'boot'
else:
node['boot_state'] = 'disabled'
return nodes
@staticmethod
def add_slivers(xml, slivers):
component_ids = []
for sliver in slivers:
filter = {}
if isinstance(sliver, str):
filter['component_id'] = '*%s*' % sliver
sliver = {}
elif 'component_id' in sliver and sliver['component_id']:
filter['component_id'] = '*%s*' % sliver['component_id']
if not filter:
continue
nodes = PGv2Node.get_nodes(xml, filter)
if not nodes:
continue
node = nodes[0]
PGv2SliverType.add_slivers(node, sliver)
@staticmethod
def remove_slivers(xml, hostnames):
for hostname in hostnames:
nodes = PGv2Node.get_nodes(xml, {'component_id': '*%s*' % hostname})
for node in nodes:
slivers = PGv2SliverType.get_slivers(node.element)
for sliver in slivers:
node.element.remove(sliver.element)
if __name__ == '__main__':
from foam.sfa.rspecs.rspec import RSpec
import pdb
r = RSpec('/tmp/emulab.rspec')
r2 = RSpec(version = 'ProtoGENI')
nodes = PGv2Node.get_nodes(r.xml)
PGv2Node.add_nodes(r2.xml.root, nodes)
#pdb.set_trace()
| {
"repo_name": "ict-felix/stack",
"path": "ofam/src/src/foam/sfa/rspecs/elements/versions/pgv2Node.py",
"copies": "3",
"size": "6632",
"license": "apache-2.0",
"hash": -2046615872932520000,
"line_mean": 44.1156462585,
"line_max": 108,
"alpha_frac": 0.5793124246,
"autogenerated": false,
"ratio": 3.8401852924145916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5919497717014592,
"avg_score": null,
"num_lines": null
} |
from fobi.base import form_element_plugin_widget_registry
from fobi.contrib.plugins.form_elements.fields.datetime.widgets import (
BaseDateTimePluginWidget
)
from fobi.contrib.themes.bootstrap3 import UID
__title__ = 'fobi.contrib.themes.bootstrap3.widgets.form_elements.' \
'datetime_bootstrap3_widget.fobi_form_elements'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('DateTimePluginWidget',)
class DateTimePluginWidget(BaseDateTimePluginWidget):
"""DateTime plugin widget for Bootstrap 3."""
theme_uid = UID
media_js = [
'js/moment-with-locales.js',
'bootstrap3/js/bootstrap-datetimepicker.min.js',
'bootstrap3/js/fobi.plugin.datetime-bootstrap3-widget.js',
]
media_css = [
'bootstrap3/css/bootstrap-datetimepicker.min.css',
# 'datetime/css/fobi.plugin.datetime-bootstrap3-widget.css',
]
# Registering the widget
form_element_plugin_widget_registry.register(DateTimePluginWidget)
| {
"repo_name": "mansonul/events",
"path": "events/contrib/themes/bootstrap3/widgets/form_elements/datetime_bootstrap3_widget/fobi_form_elements.py",
"copies": "1",
"size": "1072",
"license": "mit",
"hash": -5316315370071631000,
"line_mean": 33.5806451613,
"line_max": 72,
"alpha_frac": 0.7164179104,
"autogenerated": false,
"ratio": 3.2883435582822087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45047614686822085,
"avg_score": null,
"num_lines": null
} |
from fobi.base import form_element_plugin_widget_registry
from fobi.contrib.plugins.form_elements.fields.date.widgets import (
BaseDatePluginWidget
)
from fobi.contrib.themes.bootstrap3 import UID
__title__ = 'fobi.contrib.themes.bootstrap3.widgets.form_elements.' \
'date_bootstrap3_widget.fobi_form_elements'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('DatePluginWidget',)
class DatePluginWidget(BaseDatePluginWidget):
"""Date plugin widget for Bootstrap 3."""
theme_uid = UID
media_js = [
'js/moment-with-locales.js',
'bootstrap3/js/bootstrap-datetimepicker.min.js',
'bootstrap3/js/fobi.plugin.date-bootstrap3-widget.js',
]
media_css = [
'bootstrap3/css/bootstrap-datetimepicker.min.css',
# 'datetime/css/fobi.plugin.date-bootstrap3-widget.css',
]
# Registering the widget
form_element_plugin_widget_registry.register(DatePluginWidget)
| {
"repo_name": "mansonul/events",
"path": "events/contrib/themes/bootstrap3/widgets/form_elements/date_bootstrap3_widget/fobi_form_elements.py",
"copies": "1",
"size": "1032",
"license": "mit",
"hash": -8588858634810909000,
"line_mean": 32.2903225806,
"line_max": 69,
"alpha_frac": 0.7054263566,
"autogenerated": false,
"ratio": 3.165644171779141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4371070528379141,
"avg_score": null,
"num_lines": null
} |
from .focuser import AbstractFocuser
import time
import random
class Focuser(AbstractFocuser):
"""
Simple focuser simulator
"""
def __init__(self,
name='Simulated Focuser',
port='/dev/ttyFAKE',
*args, **kwargs):
super().__init__(*args, name=name, port=port, **kwargs)
self.logger.debug("Initialising simulator focuser")
self.connect()
self.logger.info("\t\t\t {} initialised".format(self))
##################################################################################################
# Methods
##################################################################################################
def connect(self):
"""
Simulator pretends to connect a focuser and obtain details, current state.
"""
time.sleep(0.1)
self._connected = True
self._serial_number = 'SF{:04d}'.format(random.randint(0, 9999))
self._min_position = 0
self._max_position = 22200
self.logger.debug("Connected to focuser {}".format(self.uid))
def move_to(self, position):
""" Move focuser to a new encorder position """
self.logger.debug('Moving focuser {} to {}'.format(self.uid, position))
time.sleep(0.11)
position = int(position)
position = min(position, self.max_position)
position = max(position, self.min_position)
self._position = position
return position
def move_by(self, increment):
""" Move focuser by a given amount """
self.logger.debug('Moving focuser {} by {}'.format(self.uid, increment))
time.sleep(0.1)
previous_position = self._position
position = previous_position + int(increment)
position = min(position, self.max_position)
position = max(position, self.min_position)
self._position = position
return position - previous_position
@property
def min_position(self):
"""
Returns position of close limit of focus travel, in encoder units
"""
return self._min_position
@property
def max_position(self):
"""
Returns position of far limit of focus travel, in encoder units
"""
return self._max_position
| {
"repo_name": "AstroHuntsman/POCS",
"path": "pocs/focuser/simulator.py",
"copies": "1",
"size": "2301",
"license": "mit",
"hash": 4042803890131363000,
"line_mean": 32.347826087,
"line_max": 98,
"alpha_frac": 0.5445458496,
"autogenerated": false,
"ratio": 4.416506717850288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5461052567450287,
"avg_score": null,
"num_lines": null
} |
from focus.errors import HelpBanner
from focus.version import __version__
from focus.environment.cli import FocusArgParser, CLI
from focus_unittest import FocusTestCase, MockEnvironment
class TestFocusArgParser(FocusTestCase):
def setUp(self):
super(TestFocusArgParser, self).setUp()
self.parser = FocusArgParser()
def testMessageDefStatus__exit(self):
""" FocusArgParser.exit: message, default status.
"""
message = 'the message'
with self.assertRaises(HelpBanner) as cm:
self.parser.exit(message=message)
exc = cm.exception
self.assertEqual(exc.code, 0)
self.assertEqual(exc.description, message)
def testMessageStatusSet__exit(self):
""" FocusArgParser.exit: message and status set.
"""
message = 'the message'
with self.assertRaises(HelpBanner) as cm:
self.parser.exit(message=message, status=999)
exc = cm.exception
self.assertEqual(exc.code, 999)
self.assertEqual(exc.description, message)
def testStripsSubparse__format_help(self):
""" FocusArgParser.format_help: strips out subparser section.
"""
sp = self.parser.add_subparsers()
sp.add_parser('subcmd1')
sp.add_parser('subcmd2')
msg = super(FocusArgParser, self.parser).format_help()
self.assertRegexpMatches(msg, r'\{.+\}')
msg = self.parser.format_help()
self.assertNotRegexpMatches(msg, r'\{.+\}')
def testIgnoresMessage__error(self):
""" FocusArgParser.error: fails with message from `format_help()`,
ignores provided message.
"""
message = 'the message'
with self.assertRaises(HelpBanner) as cm:
self.parser.error(message=message)
self.assertNotRegexpMatches(cm.exception.description, message)
def test__print_help(self):
""" FocusArgParser.print_help: fails with message from `format_help()`.
"""
with self.assertRaises(HelpBanner) as cm:
self.parser.print_help()
self.assertEqual(cm.exception.description, self.parser.format_help())
class TestCLI(FocusTestCase):
def setUp(self):
super(TestCLI, self).setUp()
self.cli = CLI()
self.env = MockEnvironment()
def tearDown(self):
self.env = None
self.cli = None
super(TestCLI, self).tearDown()
def testNoArguments__execute(self):
""" CLI.execute: prints help banner if no arguments passed.
"""
with self.assertRaises(HelpBanner):
self.cli.execute(self.env)
def testHelpArgument__execute(self):
""" CLI.execute: prints help banner if 'help' argument passed.
"""
with self.assertRaises(HelpBanner):
self.env.args = ('help',)
self.cli.execute(self.env)
def testVersionArgument__execute(self):
""" CLI.execute: prints version string if 'version' argument passed.
"""
self.env.args = ('version',)
self.cli.execute(self.env)
self.assertEqual(str(self.env.io.test__write_data),
'focus version {0}\n'.format(__version__))
def testNoColorArgument__execute(self):
""" CLI.execute: disables colored output if '--no-color' argument
passed.
"""
self.assertTrue(self.env.io.colored)
with self.assertRaises(HelpBanner):
self.env.args = ('--no-color', 'help')
self.cli.execute(self.env)
self.assertFalse(self.env.io.colored)
def testCommandNoMatchPlugins__execute(self):
""" CLI.execute: command doesn't match command plugins, raises help
banner.
"""
with self.assertRaises(HelpBanner):
self.env.args = ('no-exist',)
self.cli.execute(self.env)
| {
"repo_name": "xtrementl/focus",
"path": "tests/unit/environment/test_cli.py",
"copies": "1",
"size": "3915",
"license": "mit",
"hash": -5311099652075154000,
"line_mean": 34.2702702703,
"line_max": 79,
"alpha_frac": 0.6117496807,
"autogenerated": false,
"ratio": 4.151643690349947,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 111
} |
from focus.plugin.base import Plugin
from focus_unittest import FocusTestCase
class TestPlugin(FocusTestCase):
def setUp(self):
super(TestPlugin, self).setUp()
self.plugin = Plugin()
def tearDown(self):
self.plugin = None
super(TestPlugin, self).tearDown()
def testAllAttributes(self):
""" Plugin base: all supported attributes are defined.
"""
keys = ('name', 'version', 'target_version', 'command', 'events',
'options', 'needs_root', 'task_only')
for k in keys:
val = getattr(self.plugin, k, 'TESTVAL')
if k in ('needs_root', 'task_only'):
self.assertFalse(val)
else:
self.assertIsNone(val)
def testAllMethods(self):
""" Plugin base: all supported methods exist.
"""
keys = ('disable', 'on_taskstart', 'on_taskrun', 'on_taskend',
'parse_option', 'execute')
for k in keys:
method = getattr(self.plugin, k, 'TESTVAL')
self.assertIsNotNone(method)
self.assertTrue(callable(method))
| {
"repo_name": "xtrementl/focus",
"path": "tests/unit/plugin/test_base.py",
"copies": "1",
"size": "1144",
"license": "mit",
"hash": -9077881380348728000,
"line_mean": 30.7777777778,
"line_max": 73,
"alpha_frac": 0.5611888112,
"autogenerated": false,
"ratio": 4.25278810408922,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 36
} |
from focus.plugin.modules import notify as plugins
from focus_unittest import (
MockTask, FocusTestCase, IS_MACOSX, skipUnless, skipIf
)
class NotifyCase(FocusTestCase):
def setUp(self):
super(NotifyCase, self).setUp()
self.plugin = plugins.Notify()
def tearDown(self):
self.plugin = None
super(NotifyCase, self).tearDown()
def testValidMessageType__parse_option(self):
""" Notify.parse_option: parses message types correctly.
"""
self.assertEqual(self.plugin.messages, {})
for key in ('show', 'end_show', 'timer_show'):
self.plugin.parse_option(key, 'show', 'msg')
if key == 'show':
key = 'start_' + key
key = key.split('_', 1)[0]
self.assertIn(key, self.plugin.messages)
self.assertEqual(self.plugin.messages[key], 'msg')
def testInvalidMessageType__parse_option(self):
""" Notify.parse_option: validates message types.
"""
for key in ('show', 'end_show', 'timer_show'):
with self.assertRaises(TypeError):
self.plugin.parse_option(key, 'notify',
'test-message', '2', '3', '4')
@skipUnless(IS_MACOSX, 'for mac osx')
def testMac___notify(self):
""" Notify._notify: installs correct functions for mac osx.
"""
self.assertIn(self.plugin.notify_func, (
plugins._terminal_notifier,
plugins._growlnotify,
plugins._osx_popup
))
@skipIf(IS_MACOSX, 'for linux/nix')
def testRegular___notify(self):
""" Notify._notify: installs correct functions for linux/nix.
"""
self.assertEquals(self.plugin.notify_func, plugins._dbus_notify)
def testCallNotifyFunc___notify(self):
""" Notify._notify: calls function defined by notify_func.
"""
test_task = MockTask()
test_task.start('Test-Task')
ret_items = []
def _check_func(task, message):
ret_items.append((task, message))
self.plugin.notify_func = _check_func
self.plugin._notify(test_task, 'message-here')
for item in ret_items:
self.assertEqual(item, ('Focus ({0})'.format(test_task.name),
'message-here'))
| {
"repo_name": "xtrementl/focus",
"path": "tests/unit/plugin/modules/test_notify.py",
"copies": "1",
"size": "2374",
"license": "mit",
"hash": 8722550617479394000,
"line_mean": 33.4057971014,
"line_max": 73,
"alpha_frac": 0.5661331087,
"autogenerated": false,
"ratio": 4.121527777777778,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5187660886477777,
"avg_score": null,
"num_lines": null
} |
from foialist.models import *
from foialist.views import *
from django.conf import settings
import math
def entities():
'''
builds a list of existing entites
formatted as a javascript array
for the autocomplete on the entry form.
'''
entities = Entity.objects.all()
entitylist = "["
for entity in entities:
entitylist += "\"" + entity.name + "\", "
entitylist += "]"
return entitylist
def convert_bytes(bytes):
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2f TB' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2f GB' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2f MB' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
kilobytes = round(kilobytes, 0)
size = '%.2f KB' % kilobytes
else:
bytes = round(bytes, 0)
size = '%.2f bytes' % bytes
return size
def fake_slug(string):
'''
returns a fake slug for URL handling
'''
string = string.replace(" ", "_")
string = string.replace("/", "-")
return string
def pages():
count = float(Entry.objects.all().count())
real_num_pages = count / settings.ITEMS_PER_PAGE
# because we cannot have a fraction of a page
# and rounding down would loose us pages
num_pages = int(math.ceil(real_num_pages))
l = range(1, num_pages + 1)
return l | {
"repo_name": "a2civictech/a2docs-sources",
"path": "foialist/helpers.py",
"copies": "1",
"size": "1535",
"license": "bsd-3-clause",
"hash": -4762926672141354000,
"line_mean": 25.4827586207,
"line_max": 52,
"alpha_frac": 0.590228013,
"autogenerated": false,
"ratio": 3.6899038461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9642546063533557,
"avg_score": 0.02751715912405788,
"num_lines": 58
} |
from folder import Folder
from article import Article
import permissions
from pyramid import security
from bson.objectid import ObjectId
import pyes
from cms import dbutil
from users import UserCollection, GroupCollection, User, generate_random_password
from trash import Trash
class Root(Folder):
_object_type = 'root'
RESERVED_NAMES = Folder.RESERVED_NAMES + ('users', 'groups', 'trash', 'login', 'logout', 'my_password')
# Map Content _object_type strings to their classes.
# This should contain all types that a CMS user could possibly create.
_content_type_factories = {
'folder': Folder,
'article': Article,
}
def __init__(self, request, **kwargs):
Folder.__init__(self, request, **kwargs)
self.__name__ = ''
self.__parent__ = None
self.__acl__ = permissions.root_acl
def index(self):
# Don't index the root.
pass
def get_content_factory(self, object_type):
return self._content_type_factories.get(object_type)
def get_content_by_id(self, _id):
if _id == 'trash':
return self['trash']
if _id == self._id:
return self
doc = self._get_collection().find_one({'_id': _id})
if doc is None:
return None
obj = self._construct_child_from_mongo_document(doc)
pid = doc['__parent__']
if pid == self._id:
obj.__parent__ = self
else:
parent = self.get_content_by_id(pid)
if parent:
obj.__parent__ = parent
if pid == 'trash':
obj.__name__ = str(obj._id)
else:
# Parent must have been deleted between call to this method and now.
return None
return obj
# FIXME: add more options to allow searching a specific doctype with extra type-specific filters?
def search_raw(self, fulltext=None, title=None, description=None, __name__=None, _object_type=None, _pub_state=None, path_id=None, start=0, size=10, fields=None, highlight_fields=None, viewable_only=False, default_operator='AND', sort=None):
"""
fulltext, title and description should be query strings and may contain
boolean operators and wildcards
__name__, _object_type and _pub_state should be either a string or sequence of strings (with OR logic implied) and must be exact matches (no wildcards)
path_id should be either an ObjectId or a sequence of ObjectIds
identifying one or more portions of the site to restrict the search to
sort should be a pyes-style sort string, in other words a comma-delimited list of field names each with the options suffix ":asc" or ":desc"
(example: "_object_type,_created:desc")
Returns a pyes result dictionary.
Keys are [u'hits', u'_shards', u'took', u'timed_out'].
result['hits'] has the keys: [u'hits', u'total', u'max_score']
result['took'] -> search time in ms
result['hits']['total'] -> total number of hits
result['hits']['hits'] -> list of hit dictionaries, each with the keys: [u'_score', u'_type', u'_id', u'_source', u'_index', u'highlight']
Although if the fields argument is a list of field names (instead
of the default value None), instead of a '_source' key, each hit will
have a '_fields' key whose value is a dictionary of the requested fields.
The "highlight" key will only be present if highlight_fields were used
and there was a match in at least one of those fields.
In that case, the value of "highlight" will be dictionary of strings.
Each dictionary key is a field name and each string is an HTML fragment
where the matched term is in an <em> tag.
"""
# Convert singleton values to lists
if __name__ and (type(__name__) in (str, unicode)):
__name__ = [__name__]
if _object_type and (type(_object_type) in (str, unicode)):
_object_type = [_object_type]
if _pub_state and (type(_pub_state) in (str, unicode)):
_pub_state = [_pub_state]
if type(path_id) == ObjectId:
path_id = [path_id]
query = pyes.MatchAllQuery()
if fulltext or title or description:
query = pyes.BoolQuery()
if fulltext: query.add_must(pyes.StringQuery(fulltext, default_operator=default_operator))
if title: query.add_must(pyes.StringQuery(title, search_fields=['title'], default_operator=default_operator))
if description: query.add_must(pyes.StringQuery(description, search_fields=['description'], default_operator=default_operator))
filters = []
if __name__:
filters.append(pyes.TermsFilter('__name__', __name__))
if _object_type:
filters.append(pyes.TermsFilter('_object_type', _object_type))
if _pub_state:
filters.append(pyes.TermsFilter('_pub_state', _pub_state))
if path_id:
# Convert ObjectIds to strings
filters.append(pyes.TermsFilter('_id_path', [str(x) for x in path_id]))
if viewable_only:
filters.append(pyes.TermsFilter('_view', security.effective_principals(self.request)))
if filters:
query = pyes.FilteredQuery(query, pyes.ANDFilter(filters))
search = pyes.Search(query=query, start=start, size=size, fields=fields)
if highlight_fields:
for field in highlight_fields:
search.add_highlight(field)
# FIXME: use new search() method???
return dbutil.get_es_conn(self.request).search_raw(search, dbutil.get_es_index_name(self.request), sort=sort or '_score')
def search(self, fulltext=None, title=None, description=None, __name__=None, _object_type=None, _pub_state=None, path_id=None, start=0, size=10, highlight_fields=None, viewable_only=False, default_operator='AND', sort=None):
# Return a dictionary with the keys:
# "total": total number of matching hits
# "took": search time in ms
# "items": a list of child objects and highlights for the specified batch of hits
# We just need the _id values (not _source, etc), so set fields=[]
result = self.search_raw(fulltext=fulltext, title=title, description=description, __name__=__name__, _object_type=_object_type, _pub_state=_pub_state, path_id=path_id, start=start, size=size, fields=[], highlight_fields=highlight_fields, viewable_only=viewable_only, default_operator='AND', sort=sort)
items = []
for hit in result['hits']['hits']:
_id = ObjectId(hit['_id'])
obj = self.get_content_by_id(_id)
if obj:
items.append(dict(object=obj, highlight=hit.get('highlight')))
return dict(
items = items,
total = result['hits']['total'],
took = result['took'],
)
def __getitem__(self, name):
if name == 'users':
users = UserCollection(self.request)
users.__name__ = 'users'
users.__parent__ = self
return users
elif name == 'groups':
groups = GroupCollection(self.request)
groups.__name__ = 'groups'
groups.__parent__ = self
return groups
elif name == 'trash':
trash = Trash(self.request)
trash.__name__ = 'trash'
trash.__parent__ = self
return trash
return Folder.__getitem__(self, name)
def get_user(self, username):
return self['users'].get_child(username)
def get_current_user(self):
return self['users'].get_child(security.authenticated_userid(self.request))
def get_user_by_email(self, email):
return self['users'].get_user_by_email(email)
def add_super_user(self, name='admin', password=None):
""" Add a new user in the superuser group.
This is particularly handy to bootstrap a new system in pshell.
"""
user= User(self.request, firstname=name.capitalize(), lastname='User', groups=['superuser'], active=True, email='')
if not password:
password = generate_random_password()
user.set_password(password)
self['users'].add_child(name, user)
print "Created superuser with username %s and password %s" % (name, password)
# Not for use by "civilians"...
def _find_local_roles_for_principal(self, principal):
return self._get_collection().find({'_local_roles.%s' % principal: {"$exists":1}}, fields=['_local_roles'])
def _get_content_with_local_roles_for_principal(self, principal):
result = []
for item in self._find_local_roles_for_principal(principal):
obj = self.get_content_by_id(item['_id'])
if obj: result.append(obj)
return result
def _remove_local_roles_for_principal(self, principal):
self._get_collection().update({'_local_roles.%s' % principal: {"$exists": 1}}, {'$unset': {'_local_roles.%s' % principal: 1}}, multi=True)
| {
"repo_name": "sbrauer/recms",
"path": "cms/resources/root.py",
"copies": "1",
"size": "9167",
"license": "mit",
"hash": 7293186039316306000,
"line_mean": 43.5,
"line_max": 309,
"alpha_frac": 0.6082687902,
"autogenerated": false,
"ratio": 3.956409149762624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5064677939962624,
"avg_score": null,
"num_lines": null
} |
from folder import Folder
import time
from datetime import datetime
import thread
import subprocess
import shutil
import json
import os
import __init__ as ezex
import util
def xwrite(path,data):
with open(path+'/ezex.json','w+') as f:
json.dump(data,f)
def xread(path):
with open(path+'/ezex.json') as f:
return json.load(f)
def create(run_folder,tag=''):
''' create unique experiment folder '''
rf = Folder(run_folder)
#basename = 'e'+str(int(time.time()))+'_'+rf.name()
dstr = datetime.now().strftime('%Y%m%d_%H%M_%S')
basename = dstr+'_'+rf.name()+'_'+tag
name = basename
i = 1
while name in ezex.exfolder:
name = basename + '_' + str(i)
i = i+1
if i > 100:
raise RuntimeError('Could not create unique experiment folder')
# ezex.exp_folder[name] = Folder(create=True)
# exp = ezex.exp_folder[name]
# exp.copy(rf)
path = ezex.config['exfolder']+'/'+name
os.mkdir(path)
util.copy(run_folder,path,symlinks=True,ignore='.*')
return path
def submit(path):
x = xread(path)
if ezex.config['scheduler']=='lsf':
nvd = 'select[nvd]' if x['nvd'] else ''
cmd = ('bsub -q deflt_centos -M 20000 -W 24:00 -n 4 -R "'+nvd+'" -x '+
'-oo '+path+'/out '+
'\''+ 'ezex execute ' + path +'\'')
print(cmd)
out = subprocess.check_output(cmd,shell=True)
import re
match = re.match('Job <(\d*)>',out)
if match:
jid = match.group(1)
else:
raise RuntimeError()
elif ezex.config['scheduler']=='slurm':
nvd = '#SBATCH -C nvd \n' if x['nvd'] else ''
print(path)
jscr = ("#!/bin/bash" + '\n' +
"#SBATCH -o " + path + '/out' + '\n' +
"#SBATCH --mem-per-cpu=" + "5000" + '\n' +
"#SBATCH -n 4" + '\n' +
"#SBATCH -t 24:00:00" + "\n" +
nvd +
"ezex execute " + path)
with open(path+"/slurmjob","w") as f:
f.write(jscr)
cmd = "sbatch " + path + "/slurmjob"
out = subprocess.check_output(cmd,shell=True)
print("SUBMIT: \n" + out)
import re
#match = re.match('Submitted batch job (\d*)',out)
match = re.search('Submitted batch job (\d*)',out)
if match:
jid = match.group(1)
else:
raise RuntimeError()
else:
raise RuntimeError('No scheduling system (e.g. SLURM) present')
x['job_id'] = jid
xwrite(path,x)
def execute(path,deltime=3*60):
x = xread(path)
t_start = time.time()
try:
cmd = x['cmd']
x['run_status'] = 'running'
xwrite(path,x)
print(cmd)
#os.system(cmd)
subprocess.check_call(cmd,shell=True)
x['run_status'] = 'finished'
except:
x['run_status'] = 'aborted'
print("aborted")
finally:
elapsed = time.time() - t_start
x['end_time'] = time.time()
xwrite(path,x)
print('elapsed seconds: ' + str(elapsed))
if elapsed <= deltime:
shutil.rmtree(path,ignore_errors=False)
def kill(path):
try:
x = xread(path)
if ezex.config['scheduler']=='lsf':
jid = x['job_id']
cmd = 'bkill '+str(jid)
out = subprocess.check_output(cmd,shell=True)
elif ezex.config['scheduler']=='slurm':
jid = x['job_id']
cmd = 'scancel '+str(jid)
out = subprocess.check_output(cmd,shell=True)
else:
return False
except Exception as ex:
return False
def delete(path):
# TODO: simplify!
kill(path)
def deletet(path=path):
t = time.time()
suc = False
exc = None
def errorRemoveReadonly(func, path, exc):
import errno
import stat
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
# change the file to be readable,writable,executable: 0777
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# retry
func(path)
else:
exc = "Deletion of "+path+" not possible"
while (time.time()-t < 2):
try:
shutil.rmtree(path,ignore_errors=False,onerror=errorRemoveReadonly)
suc = not os.path.isdir(path)
if suc: break
except Exception as ex:
exc = ex
time.sleep(0.2)
if not suc:
print(exc)
thread.start_new_thread(deletet,()) | {
"repo_name": "SimonRamstedt/ezex",
"path": "ezex/experiment.py",
"copies": "1",
"size": "4203",
"license": "mit",
"hash": -5083574466846870000,
"line_mean": 23.4418604651,
"line_max": 76,
"alpha_frac": 0.5745895789,
"autogenerated": false,
"ratio": 3.0859030837004404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.416049266260044,
"avg_score": null,
"num_lines": null
} |
from folder import Folder
class Photofolder():
def __init__(self, folder):
self.folder = folder
pass
photoextensions = ['.jpg', '.png']
def isint(self, str):
try:
int(str)
return True
except ValueError:
return False
def fileisphoto(self, file):
extension = self.folder.getextension(file)
return extension.lower() in self.photoextensions
def getallphotos_filenames(self):
return filter(lambda f: self.fileisphoto(f), self.folder.getfiles())
def getallphotos_fullpath(self):
return map(lambda it: self.folder.getpath() + '/' + it, self.getallphotos_filenames())
def getphotobynumber(self, number):
return self.folder.getpath() + "/" + self.getallphotos_filenames()[number]
def getnextfilenumber(self):
names = map(lambda f: self.folder.getfilename(f), self.getallphotos_filenames())
onlynumbers = filter(lambda f: self.isint(f), names)
numbers = map(int, onlynumbers)
if len(numbers) == 0:
return 1
numbers.sort()
return numbers[-1] + 1
def getnextfilename_fullpath(self):
return self.folder.getpath() + '/' + str(self.getnextfilenumber()) + '.png'
def getlastphoto(self):
photos = self.getallphotos_filenames()
photos.sort()
return photos[-1]
| {
"repo_name": "MarkusAmshove/Photobox",
"path": "photobox/photofolder.py",
"copies": "1",
"size": "1393",
"license": "mit",
"hash": -1935987460403258400,
"line_mean": 29.2826086957,
"line_max": 94,
"alpha_frac": 0.6137832017,
"autogenerated": false,
"ratio": 3.8802228412256268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49940060429256267,
"avg_score": null,
"num_lines": null
} |
from folder import unindex_recursively
from collection import Collection
import zope.interface
from interfaces import ITrash
import permissions
from bson.objectid import ObjectId
from pyramid.security import authenticated_userid
from cms.dateutil import utcnow
from cms.exceptions import Veto
class Trash(Collection):
""" The Trash Collection is a special folder-ish object in the root of the site that behaves
like the trash or recyclebin that you've seen in various operating systems.
As an alternative to really deleting content, it can be moved into the trash.
Once in the trash, it is removed from the fulltext index and has very limited
permissions (it can be viewed by authenticated users, un-trashed if its parent
still exists (outside of the trash), or copy/pasted back into the content tree).
Another key difference between the Trash Collection and a regular Collection is that the
Mongo _id (as a string) is used to traverse to items in the trash, instead of __name__.
This is because there can be several items in the trash with the same __name__.
"""
zope.interface.implements(ITrash)
_object_type = "trash"
def __init__(self, request):
self.request = request
self._collection_name = "content"
self.title = "Trash"
self.description = "Content can be moved here as an alternative to deleting it forever."
self.__acl__ = permissions.trash_acl
self._id = 'trash'
def _morph_spec(self, spec):
if spec is None: spec = {}
# Make sure parent is in the spec.
spec['__parent__'] = self._id
return spec
def _get_child_class(self, doc):
return self.__parent__.get_content_factory(doc['_object_type'])
def move_child(self, obj):
if obj.__parent__._id == self._id: return
orig_parent = obj.__parent__
orig_name = obj.__name__
obj._memento = dict(
orig_name = orig_name,
orig_parent_id = orig_parent._id,
orig_parent_path = orig_parent.resource_path(),
trashed_at = utcnow(),
trashed_by = authenticated_userid(self.request),
)
obj.__parent__ = self
obj.__name__ = str(obj._id)
obj.save() # FIXME: set_modified=False?
unindex_recursively(obj, include_self=True)
# Notify old parent that child was moved (gives ordered folders an opportunity to update their ordered name list).
orig_parent._child_removed(orig_name)
def _child_removed(self, name):
pass # Like I care? I'm trash!
def dememento_child(self, child):
child.__name__ = child._memento['orig_name']
del child._memento
def veto_restore_child(self, child):
""" Return an error message string if there's any reason why the given child cannot
be restored into its original parent.
Otherwise return None.
"""
root = self.__parent__
orig_parent = root.get_content_by_id(child._memento['orig_parent_id'])
if orig_parent is None: return "Original parent object no longer exists."
if orig_parent.in_trash(): return "Original parent is also in the trash."
return orig_parent.veto_add_child(child._memento['orig_name'], child)
def restore_child(self, child):
root = self.__parent__
orig_parent = root.get_content_by_id(child._memento['orig_parent_id'])
if orig_parent.in_trash(): raise Veto("Original parent is also in the trash.")
self.dememento_child(child)
orig_parent.move_child(child)
def veto_restore_children(self, children):
""" Return an error message is there is a problem with any of the proposed restores.
Else return None.
"""
children_by_parent_id = {}
for child in children:
err = self.veto_restore_child(child)
if err: return "Can't restore child named \"%s\". (%s)" % (child._memento['orig_name'], err)
parent_id = child._memento['orig_parent_id']
if not children_by_parent_id.has_key(parent_id):
children_by_parent_id[parent_id] = []
children_by_parent_id[parent_id].append(child)
# Check for items that would be restored to the same folder with the same name...
for (parent_id, children) in children_by_parent_id.items():
names = {}
for child in children:
child_name = child._memento['orig_name']
if not names.has_key(child_name):
names[child_name] = 0
names[child_name] += 1
for (name, count) in names.items():
if count > 1:
return "Can't restore all of the requested objects, since %s would have the non-unique name \"%s\"." % (count, name)
return None
def restore_children(self, children):
error = self.veto_restore_children(children)
if error: raise Veto(error)
for child in children:
self.restore_child(child)
return len(children)
| {
"repo_name": "sbrauer/recms",
"path": "cms/resources/trash.py",
"copies": "1",
"size": "5110",
"license": "mit",
"hash": -1472922596945845500,
"line_mean": 42.3050847458,
"line_max": 136,
"alpha_frac": 0.6266144814,
"autogenerated": false,
"ratio": 4.001566170712608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017492496647721036,
"num_lines": 118
} |
from .folder_loader import FolderLoaderPlaylist
from .file_loader import FileLoaderPlaylist
from kivy.properties import DictProperty
from kivy.event import EventDispatcher
from kivy.logger import Logger
import global_vars as gvars
from utils import logs
import appworker
from appworker import async_tasks
import json
import os
class PlaylistLoader(EventDispatcher):
playlists = DictProperty()
'''All loaded playlist objects in sections'''
loader_switch = {
'file_loader': FileLoaderPlaylist,
'folder_loader': FolderLoaderPlaylist
}
def __init__(self, **kwargs):
self.register_event_type('on_playlists')
super(PlaylistLoader, self).__init__(**kwargs)
def on_playlists(self, *args):
pass
def get_playlist_by_path(self, path):
for section, playlist_list in self.playlists.items():
for pl in playlist_list:
if pl.path == path:
return pl
def get_playlist_by_name(self, name):
for section, playlist_list in self.playlists.items():
for pl in playlist_list:
if pl.name == name:
return pl
def update_from_directories_async(self, directories):
Logger.info('playlist_loader: update_from_directories_async')
ret = async_tasks.find_playlists(directories, self._async_call0)
appworker.add_task(*ret)
def _async_call0(self, result):
files = result['args']
path_section_list = []
for section in files:
for fpath, name in files[section]:
pl = self.get_playlist_by_path(fpath)
if pl:
pl.update()
else:
path_section_list.append((fpath, section))
ret = async_tasks.load_playlists(path_section_list, self._async_call1)
appworker.add_task(*ret)
def _async_call1(self, result):
lists = result['args']
for pld in lists:
section = pld['section']
if section not in self.playlists:
self.playlists[section] = []
new_pl = self.loader_switch[pld['playlist_type']]()
for attr, value in pld.items():
setattr(new_pl, attr, value)
self.playlists[section].append(new_pl)
self.dispatch('on_playlists', self.playlists)
def update_from_directories(self, directories):
Logger.info('playlist_loader: update_from_directories')
'''Load new playlists from directories, call playlist update() when
playlist is already in global playlists, then return playlist dict'''
playlists = self.playlists
for directory in directories:
dir_list = os.listdir(directory)
for section in dir_list:
if not section in playlists:
playlists[section] = []
file_list = os.listdir('{}{}/'.format(directory, section))
for f in file_list:
found = False
fpath = '{}{}/{}'.format(directory, section, f)
for k, v in playlists.items():
for pl in v:
if fpath == pl.path:
pl.update()
found = True
break
if found:
break
if not found:
pl = self.load_playlist(
'{}{}/{}'.format(directory, section, f), section)
if pl:
playlists[section].append(pl)
# Removes playlists which have been deleted
remlist = []
for k, v in playlists.items():
for pl in v:
if not os.path.exists(pl.path):
remlist.append((section, pl))
for section, pl in remlist:
playlists[section].remove(pl)
return playlists
def load_playlist(self, path, section):
'''Load new playlist object from path and return it'''
playlist = None
try:
with open(path) as data_file:
data = json.load(data_file)
if data['playlist_type'] == 'folder_loader':
playlist = FolderLoaderPlaylist()
elif data['playlist_type'] == 'file_loader':
playlist = FileLoaderPlaylist()
playlist.load(path, data)
playlist.section = section
except:
logs.error('playlist_loader: failed to load playlist \n', trace=True)
return playlist
def create_playlist(self, name):
'''Create new FileLoaderPlaylist with arg[0] name at default path'''
category = 'playlists'
load_path = ''
path = '{}{}/{}.json'.format(gvars.DIR_PLAYLISTS, category, name)
if os.path.exists(path):
logs.error(
'create_playlist: Playlist "{}" already exists, skipping'.format(
name))
else:
playlist = FileLoaderPlaylist.create(name, path, load_path)
loader = PlaylistLoader()
| {
"repo_name": "Bakterija/mmplayer",
"path": "mmplayer/media_controller/playlist_loader/__init__.py",
"copies": "1",
"size": "5208",
"license": "mit",
"hash": -8659144698327863000,
"line_mean": 34.9172413793,
"line_max": 81,
"alpha_frac": 0.5462749616,
"autogenerated": false,
"ratio": 4.536585365853658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5582860327453658,
"avg_score": null,
"num_lines": null
} |
from foldkin.base.model_factory import ModelFactory
from foldkin.base.model import Model
from foldkin.coop.coop_model import CoopModelFactory
from copy import deepcopy
def clone(ps):
return deepcopy(ps)
class CoopCollectionFactory(ModelFactory):
"""docstring for CoopCollectionFactory"""
def __init__(self, id_list, parameter_name, parameter_values):
super(CoopCollectionFactory, self).__init__()
assert parameter_name in ['N', 'beta']
self.id_list = id_list
self.parameter_name = parameter_name
self.parameter_values = parameter_values
self.element_model_factory = CoopModelFactory()
def create_model(self, parameter_set):
new_collection = CoopCollection(parameter_set)
for this_id, this_param_value in zip(self.id_list, self.parameter_values):
parameter_set_clone = clone(parameter_set)
parameter_set_clone.set_parameter(
self.parameter_name, this_param_value)
this_model = self.element_model_factory.create_model(
parameter_set_clone, this_id)
new_collection.add_element(this_model)
return new_collection
class CoopCollection(object):
"""docstring for CoopCollection"""
def __init__(self, parameter_set):
super(CoopCollection, self).__init__()
self.parameter_set = parameter_set
self.collection = []
def __iter__(self):
for element in self.collection:
yield element
def add_element(self, element):
self.collection.append(element)
def get_element(self, id_string):
found_element = None
for element in self.collection:
if element.get_id() == id_string:
found_element = element
break
return found_element
def get_parameter(self, parameter_name):
return self.parameter_set.get_parameter(parameter_name)
def get_parameter_set(self):
return self.parameter_set
| {
"repo_name": "grollins/foldkin",
"path": "foldkin/coop/coop_collection.py",
"copies": "1",
"size": "2032",
"license": "bsd-2-clause",
"hash": -5876855280599904000,
"line_mean": 34.0344827586,
"line_max": 82,
"alpha_frac": 0.6392716535,
"autogenerated": false,
"ratio": 4.138492871690428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5277764525190428,
"avg_score": null,
"num_lines": null
} |
from foldkin.base.model_factory import ModelFactory
from foldkin.base.model import Model
from foldkin.zam_protein import create_zam_protein_from_pdb_id
class ContactOrderModelFactory(ModelFactory):
"""docstring for ContactOrderModelFactory"""
def __init__(self):
super(ContactOrderModelFactory, self).__init__()
def create_model(self, pdb_id, parameter_set):
new_model = ContactOrderModel(pdb_id, parameter_set)
return new_model
class ContactOrderModel(Model):
"""docstring for ContactOrderModel"""
def __init__(self, pdb_id, parameter_set):
super(ContactOrderModel, self).__init__()
self.pdb_id = pdb_id
self.zam_protein = create_zam_protein_from_pdb_id(pdb_id)
self.parameter_set = parameter_set
def get_id(self):
return self.pdb_id
def get_parameter(self, parameter_name):
return self.parameter_set.get_parameter(parameter_name)
def get_contact_list(self):
contact_list = self.zam_protein.get_contact_list()
one_letter_sequence = self.zam_protein.get_sequence()
new_contact_list = []
for c in contact_list:
residue1_number = c[0]
residue2_number = c[1]
residue1_name = one_letter_sequence[residue1_number]
residue2_name = one_letter_sequence[residue2_number]
new_contact = Contact(residue1_name, residue2_name, residue1_number,
residue2_number)
new_contact_list.append(new_contact)
return new_contact_list
class Contact(object):
"""docstring for Contact"""
def __init__(self, residue1_name, residue2_name, residue1_number,
residue2_number):
super(Contact, self).__init__()
self.residue1_name = residue1_name
self.residue2_name = residue2_name
self.residue1_number = residue1_number
self.residue2_number = residue2_number
def get_sequence_separation(self):
return self.residue2_number - self.residue1_number
def get_residue_names_as_letters(self):
return [self.residue1_name, self.residue2_name]
| {
"repo_name": "grollins/foldkin",
"path": "foldkin/kings/contact_order_model.py",
"copies": "1",
"size": "2148",
"license": "bsd-2-clause",
"hash": -4462443247395210000,
"line_mean": 36.0344827586,
"line_max": 80,
"alpha_frac": 0.6513035382,
"autogenerated": false,
"ratio": 3.5328947368421053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4684198275042105,
"avg_score": null,
"num_lines": null
} |
from foldkin.base.model_factory import ModelFactory
from foldkin.kings.contact_order_model import ContactOrderModelFactory
class ContactOrderCollectionFactory(ModelFactory):
"""docstring for ContactOrderCollectionFactory"""
def __init__(self, pdb_id_list):
super(ContactOrderCollectionFactory, self).__init__()
self.element_model_factory = ContactOrderModelFactory()
self.pdb_id_list = pdb_id_list
def create_model(self, parameter_set):
new_collection = ContactOrderCollection(parameter_set)
for this_id in self.pdb_id_list:
this_model = self.element_model_factory.create_model(this_id, parameter_set)
new_collection.add_element(this_model)
return new_collection
class ContactOrderCollection(object):
"""docstring for ContactOrderCollection"""
def __init__(self, parameter_set):
super(ContactOrderCollection, self).__init__()
self.parameter_set = parameter_set
self.collection = []
def __iter__(self):
for element in self.collection:
yield element
def add_element(self, element):
self.collection.append(element)
def get_element(self, id_string):
found_element = None
for element in self.collection:
if element.get_id() == id_string:
found_element = element
break
return found_element
def get_parameter(self, parameter_name):
return self.parameter_set.get_parameter(parameter_name)
| {
"repo_name": "grollins/foldkin",
"path": "foldkin/kings/contact_order_collection.py",
"copies": "1",
"size": "1528",
"license": "bsd-2-clause",
"hash": -2185306959674432300,
"line_mean": 32.9555555556,
"line_max": 88,
"alpha_frac": 0.664921466,
"autogenerated": false,
"ratio": 4.2444444444444445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02247191011235955,
"num_lines": 45
} |
from foldkin.scipy_optimizer import ScipyOptimizer
import foldkin.one_param_curve.curve_fit_one_feature_model as curve
from foldkin.util import make_score_fcn
import nose.tools
from types import FloatType
EPSILON = 1e-3
@nose.tools.istest
class TestCurveFitOneFeatureFit(object):
@nose.tools.istest
def return_correct_score_and_optimal_parameter_value(self):
'''This example fits a linear model to y=2x+5.
The fitting function is y= a * x^b + c.
The expected fit parameters are a = 2, b = 1, and c = 5.
'''
initial_parameters = curve.CurveFitOneFeatureParameterSet()
initial_parameters.set_parameter('a', 0.0)
initial_parameters.set_parameter('b', 1.0)
initial_parameters.set_parameter('c', 0.0)
judge = curve.CurveFitOneFeatureJudge()
data_predictor = curve.CurveFitOneFeatureDataPredictor()
target_data = curve.CurveFitOneFeatureTargetData()
target_data.load_data()
id_list = target_data.get_id_list()
model_factory = curve.CurveFitOneFeatureModelFactory(id_list)
score_fcn = make_score_fcn(model_factory, initial_parameters,
judge, data_predictor, target_data)
optimizer = ScipyOptimizer()
results = optimizer.optimize_parameters(score_fcn, initial_parameters)
new_params, score, num_iterations = results
error_message = "Expected float, got %s %s" % (type(score), score)
nose.tools.ok_(type(score) is FloatType, error_message)
error_message = "Expected ParameterSet, got %s" % new_params
nose.tools.ok_(type(new_params) is type(initial_parameters),
error_message)
nose.tools.ok_(abs(0.0 - score) < EPSILON,
"Expected score = 2.0, got %s %s" % (type(score), score))
nose.tools.ok_(abs(2.0 - new_params.get_parameter('a')) < EPSILON,
"Expected optimal a = 2.0, got %s" % new_params)
nose.tools.ok_(abs(1.0 - new_params.get_parameter('b')) < EPSILON,
"Expected optimal b = 1.0, got %s" % new_params)
nose.tools.ok_(abs(5.0 - new_params.get_parameter('c')) < EPSILON,
"Expected optimal c = 5.0, got %s" % new_params)
return
| {
"repo_name": "grollins/foldkin",
"path": "foldkin/test/test_curve_fit.py",
"copies": "1",
"size": "2306",
"license": "bsd-2-clause",
"hash": 4221378966934505000,
"line_mean": 50.2444444444,
"line_max": 80,
"alpha_frac": 0.6248915872,
"autogenerated": false,
"ratio": 3.564142194744977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4689033781944977,
"avg_score": null,
"num_lines": null
} |
from foldkin.scipy_optimizer import ScipyOptimizer
import foldkin.simple.simple_model as simple_model
import nose.tools
from types import FloatType
EPSILON = 1e-3
@nose.tools.istest
class TestSimpleOptimization(object):
def make_score_fcn(self, model_factory, parameter_set,
judge, data_predictor, target_data):
def f(current_parameter_array):
parameter_set.update_from_array(current_parameter_array)
current_model = model_factory.create_model(parameter_set)
score, prediction = judge.judge_prediction(current_model,
data_predictor,
target_data)
return score
return f
@nose.tools.istest
def return_correct_score_and_optimal_parameter_value(self):
'''This example optimizes y = (x-3)^2 + 2 to find
the value of x that minimizes y.
'''
model_factory = simple_model.SimpleModelFactory()
initial_parameters = simple_model.SimpleParameterSet()
initial_parameters.set_parameter('x', 0.0)
judge = simple_model.SimpleJudge()
data_predictor = simple_model.SimpleDataPredictor()
target_data = simple_model.SimpleTargetData()
target_data.load_data()
score_fcn = self.make_score_fcn(model_factory, initial_parameters,
judge, data_predictor, target_data)
optimizer = ScipyOptimizer()
results = optimizer.optimize_parameters(score_fcn, initial_parameters)
new_params, score, num_iterations = results
error_message = "Expected float, got %s %s" % (type(score), score)
nose.tools.ok_(type(score) is FloatType, error_message)
error_message = "Expected ParameterSet, got %s" % new_params
nose.tools.ok_(type(new_params) is type(initial_parameters),
error_message)
nose.tools.ok_(abs(2.0 - score) < EPSILON,
"Expected score = 2.0, got %s %s" % (type(score), score))
nose.tools.ok_(abs(3.0 - new_params.get_parameter('x')) < EPSILON,
"Expected optimal x = 3.0, got %s" % new_params)
return
@nose.tools.istest
def stop_after_specified_number_of_iterations(self):
model_factory = simple_model.SimpleModelFactory()
initial_parameters = simple_model.SimpleParameterSet()
initial_parameters.set_parameter('x', 0.0)
judge = simple_model.SimpleJudge()
data_predictor = simple_model.SimpleDataPredictor()
target_data = simple_model.SimpleTargetData()
target_data.load_data()
score_fcn = self.make_score_fcn(model_factory, initial_parameters,
judge, data_predictor, target_data)
max_iterations = 1
optimizer = ScipyOptimizer(maxfun=max_iterations)
print optimizer.maxfun
results = optimizer.optimize_parameters(score_fcn, initial_parameters)
new_params, score, num_iterations = results
error_msg = "Expected %d iterations, got %d" % (max_iterations, num_iterations)
nose.tools.eq_( (num_iterations - max_iterations), 1, error_msg)
print error_msg
return
| {
"repo_name": "grollins/foldkin",
"path": "foldkin/test/test_simple_optimization.py",
"copies": "1",
"size": "3304",
"license": "bsd-2-clause",
"hash": 3976532150221296600,
"line_mean": 46.884057971,
"line_max": 87,
"alpha_frac": 0.6156174334,
"autogenerated": false,
"ratio": 4.014580801944107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5130198235344107,
"avg_score": null,
"num_lines": null
} |
from fol import *
def remove_equivalences(statement):
if isinstance(statement, Predicate):
pass
elif isinstance(statement, Equivalence):
statement = remove_equivalences(statement.get_implications())
elif isinstance(statement, Quantifier):
statement.statement = remove_equivalences(statement.statement)
elif hasattr(statement, "get_children"):
statement.set_children([remove_equivalences(s) for s in statement.get_children()])
return statement
def remove_implications(statement):
if isinstance(statement, Predicate):
pass
elif isinstance(statement, Implication):
statement = remove_implications(statement.get_or())
elif isinstance(statement, Quantifier):
statement.statement = remove_implications(statement.statement)
elif hasattr(statement, "get_children"):
statement.set_children([remove_implications(c) for c in statement.get_children()])
return statement
def get_new_variable(used_names):
for i in range(1, 5):
for char in range(ord("z"), ord("k"), -1):
char = chr(char)*i
if char not in used_names:
return char
def get_new_constant(used_names):
for i in range(1, 5):
for char in range(ord("a"), ord("k")):
char = chr(char)*i
if char not in used_names:
return char
def standardize_apart(statement, scope={}, used_names=[]):
if isinstance(statement, Variable):
if statement.name in scope:
statement.name = scope[statement.name]
else:
raise Exception()
elif isinstance(statement, Quantifier):
if statement.variable_name not in used_names:
scope[statement.variable_name] = statement.variable_name
used_names.append(statement.variable_name)
else:
sub = get_new_variable(used_names)
scope[statement.variable_name] = sub
statement.variable_name = sub
used_names.append(sub)
statement.statement = standardize_apart(statement.statement, scope, used_names)
elif hasattr(statement, "get_children"):
statement.set_children([standardize_apart(s, scope, used_names) for s in statement.get_children()])
return statement
def get_functions(statement):
if isinstance(statement, Function):
return [statement.name]
if hasattr(statement, "get_children"):
return [f for s in statement.get_children() for f in get_functions(s)]
else:
return []
def skolemize(statement, to_skolemize={}, quantified_variables=[], used_names=None):
if used_names is None:
used_names = get_functions(statement)
if isinstance(statement, Variable):
if statement.name in to_skolemize:
statement = Function(to_skolemize[statement.name], *[Variable(v) for v in quantified_variables])
elif isinstance(statement, ThereExists):
new_constant = get_new_constant(used_names)
to_skolemize[statement.variable_name] = new_constant
used_names.append(new_constant)
statement = skolemize(statement.statement, to_skolemize, quantified_variables, used_names)
elif isinstance(statement, ForAll):
quantified_variables.append(statement.variable_name)
statement.statement = skolemize(statement.statement, to_skolemize, quantified_variables, used_names)
elif hasattr(statement, "get_children"):
statement.set_children([skolemize(s, to_skolemize, quantified_variables, used_names) for s in statement.get_children()])
return statement
def discard_forall(statement):
if isinstance(statement, ForAll):
statement = statement.statement
if hasattr(statement, "get_children"):
statement.set_children([discard_forall(s) for s in statement.get_children()])
return statement
def cnf(statement):
print(statement)
print("\nremove equivalences")
statement = remove_equivalences(statement)
print(statement)
print("\nremove implications")
statement = remove_implications(statement)
print(statement)
print("\npush negation")
statement = statement.push_negation()
print(statement)
print('\nStandardize Apart')
statement = standardize_apart(statement)
print(statement)
print('\nSkolemize')
statement = skolemize(statement)
print(statement)
print('\nRemoving For All quatifiers')
statement = discard_forall(statement)
print(statement)
if __name__ == "__main__":
p1 = Predicate('P', Variable('x'))
p2 = Predicate('Q', Variable('x'))
p3 = Predicate('Q', Variable('y'))
f1 = Predicate('R', Variable('y'), Variable('x'))
expression = ForAll('x', Equivalence(p1, And([p2, ThereExists('y',And([p3,f1 ]) )]) ) )
cnf(expression)
# test expressions for standardize apart
# expression = ForAll('x', ThereExists('y', ThereExists('y', Predicate('p',Variable('x')))))
# expression = And([ForAll('x',Predicate('P', Variable('x'))), ThereExists('x',Predicate('P', Variable('x'))),ThereExists('x',Predicate('P', Variable('x')))])
# expression = ForAll('x', And([ ThereExists('y', Predicate('p',Variable('y'))), ThereExists('y', Predicate('p',Variable('y')))]))
# print(expression)
# print('\n\n')
# expression = remove_implications(expression)
# print(standardize_apart(expression))
| {
"repo_name": "m0hamed/fol-solver",
"path": "clause_form.py",
"copies": "1",
"size": "5031",
"license": "mit",
"hash": 777663320791456300,
"line_mean": 38,
"line_max": 160,
"alpha_frac": 0.7092029418,
"autogenerated": false,
"ratio": 3.6142241379310347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971040577106928,
"avg_score": 0.022604261732350875,
"num_lines": 129
} |
from fol import Predicate, Function, Variable, Nested, Atom
class Substitution:
def __init__(self, variable, replacement):
self.variable = variable
self.replacement = replacement
def __str__(self):
return str(self.replacement) + "/" + str(self.variable)
def unify(formula1, formula2, mgu=[], trace=False):
pp(trace, "Unifying expression:", formula1, "with expression:", formula2)
if mgu is False:
return False
if formula1 == formula2:
return mgu
if isinstance(formula1, Nested) and isinstance(formula2, Nested):
if type(formula1) != type(formula2) or formula1.name != formula2.name \
or len(formula1.children) != len(formula2.children):
return false
else:
for a,b in zip(formula1.children, formula2.children):
mgu = unify(a, b, mgu, trace)
return mgu
if isinstance(formula1, Variable):
return unify_variable(formula1, formula2, mgu, trace)
if isinstance(formula2, Variable):
return unify_variable(formula2, formula1, mgu, trace)
def substitute(mu, expression):
for s in (x for x in mu if occurs_in(x.variable, expression)):
if isinstance(expression, Variable):
expression = s.replacement
else:
expression.children = [substitute(mu, e) for e in expression.children]
return expression
def occurs_in(variable, expression):
if expression == variable:
return True
if not isinstance(expression, Nested):
return False
return any([occurs_in(variable, e) for e in expression.children])
def unify_variable(variable, expression, mgu, trace):
pp(trace, "Unifying variable:", variable, "with expression:", expression)
for s in (s for s in mgu if s.variable == variable):
return unify(s.replacement, expression, mgu, trace)
t = substitute(mgu, expression)
if occurs_in(variable, t):
return False
else:
s = Substitution(variable, t)
pp(trace, "MGU now is: ", ", ".join(map(str, mgu+[s])))
return mgu+[s]
def pp(trace, *args):
if trace:
print(*args)
if __name__ == "__main__":
e1 = Predicate("P", Function("f", Variable("u")), Variable("v"), Variable("v"))
e2 = Predicate("P", Variable("x"), Function("g", Variable("x")), Function("g", Function("f", Function("a"))))
mgu = unify(e1, e2, trace=True)
print("Expression 1: ", e1)
print("Expression 2: ", e2)
if mgu is False:
print("Not unifiable")
else:
print("MGU: ", ", ".join(map(str, mgu)))
e1 = Predicate("P", Variable("a"), Variable("y"), Function("f", Variable("y")))
e2 = Predicate("P", Variable("z"), Variable("z"), Variable("u"))
mgu = unify(e1, e2)
print("Expression 1: ", e1)
print("Expression 2: ", e2)
if mgu is False:
print("Not unifiable")
else:
print("MGU: ", ", ".join(map(str, mgu)))
e1 = Function("f", Variable("x"), Function("g", Variable("x")), Variable("x"))
e2 = Function("f", Function("g", Variable("u")), Function("g", Function("g", Function("z"))), Variable("z"))
mgu = unify(e1, e2)
print("Expression 1: ", e1)
print("Expression 2: ", e2)
if mgu is False:
print("Not unifiable")
else:
print("MGU: ", ", ".join(map(str, mgu)))
| {
"repo_name": "m0hamed/fol-solver",
"path": "unification.py",
"copies": "1",
"size": "3123",
"license": "mit",
"hash": 8770336791081360000,
"line_mean": 32.9456521739,
"line_max": 111,
"alpha_frac": 0.6436119116,
"autogenerated": false,
"ratio": 3.3012684989429175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44448804105429174,
"avg_score": null,
"num_lines": null
} |
from followthemoney import model
from followthemoney.types import registry
from aleph.model import Collection, Events
from aleph.logic import resolver
class Facet(object):
def __init__(self, name, aggregations, parser):
self.name = name
self.parser = parser
self.data = self.extract(aggregations, name, "values")
self.cardinality = self.extract(aggregations, name, "cardinality")
self.intervals = self.extract(aggregations, name, "intervals")
def extract(self, aggregations, name, sub):
if aggregations is None:
return {}
aggregations = aggregations.get("%s.filtered" % name, aggregations)
data = aggregations.get("scoped", {}).get(name, {}).get(name)
field = "%s.%s" % (name, sub)
return data or aggregations.get(field, {})
def expand(self, keys):
pass
def update(self, result, key):
pass
def to_dict(self):
active = list(self.parser.filters.get(self.name, []))
data = {"filters": active}
if self.parser.get_facet_total(self.name):
data["total"] = self.cardinality.get("value")
if self.parser.get_facet_values(self.name):
results = []
for bucket in self.data.get("buckets", []):
key = str(bucket.get("key"))
results.append(
{
"id": key,
"label": key,
"count": bucket.pop("doc_count", 0),
"active": key in active,
}
)
if key in active:
active.remove(key)
for key in active:
results.insert(0, {"id": key, "label": key, "count": 0, "active": True})
self.expand([r.get("id") for r in results])
for result in results:
self.update(result, result.get("id"))
data["values"] = results
if self.parser.get_facet_interval(self.name):
results = []
for bucket in self.intervals.get("buckets", []):
key = str(bucket.get("key_as_string"))
count = bucket.pop("doc_count", 0)
results.append(
{"id": key, "label": key, "count": count, "active": key in active}
)
data["intervals"] = sorted(results, key=lambda k: k["id"])
return data
class SchemaFacet(Facet):
def update(self, result, key):
try:
result["label"] = model.get(key).plural
except AttributeError:
result["label"] = key
class CountryFacet(Facet):
def update(self, result, key):
result["label"] = registry.country.names.get(key, key)
class EventFacet(Facet):
def update(self, result, key):
event = Events.get(key)
result["label"] = key if event is None else event.title
class LanguageFacet(Facet):
def update(self, result, key):
result["label"] = registry.language.names.get(key, key)
class CategoryFacet(Facet):
def update(self, result, key):
result["label"] = Collection.CATEGORIES.get(key, key)
class CollectionFacet(Facet):
def expand(self, keys):
for key in keys:
if self.parser.authz.can(key, self.parser.authz.READ):
resolver.queue(self.parser, Collection, key)
resolver.resolve(self.parser)
def update(self, result, key):
collection = resolver.get(self.parser, Collection, key)
if collection is not None:
result["label"] = collection.get("label")
result["category"] = collection.get("category")
| {
"repo_name": "alephdata/aleph",
"path": "aleph/search/facet.py",
"copies": "1",
"size": "3693",
"license": "mit",
"hash": 9179358044912402000,
"line_mean": 31.9732142857,
"line_max": 88,
"alpha_frac": 0.5564581641,
"autogenerated": false,
"ratio": 4.044906900328587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5101365064428587,
"avg_score": null,
"num_lines": null
} |
from followthemoney import model
from followthemoney.types import registry
from aleph.model import Collection
from aleph.logic import resolver
class Facet(object):
def __init__(self, name, aggregations, parser):
self.name = name
self.parser = parser
self.data = self.extract(aggregations, name, 'values')
self.cardinality = self.extract(aggregations, name, 'cardinality')
def extract(self, aggregations, name, sub):
if aggregations is None:
return {}
aggregations = aggregations.get('%s.filtered' % name, aggregations)
data = aggregations.get('scoped', {}).get(name, {}).get(name)
field = '%s.%s' % (name, sub)
return data or aggregations.get(field, {})
def expand(self, keys):
pass
def update(self, result, key):
pass
def to_dict(self):
active = list(self.parser.filters.get(self.name, []))
data = {'filters': active}
if self.parser.get_facet_total(self.name):
data['total'] = self.cardinality.get('value')
if self.parser.get_facet_values(self.name):
results = []
for bucket in self.data.get('buckets', []):
key = str(bucket.get('key'))
results.append({
'id': key,
'label': key,
'count': bucket.pop('doc_count', 0),
'active': key in active
})
if key in active:
active.remove(key)
for key in active:
results.insert(0, {
'id': key,
'label': key,
'count': 0,
'active': True
})
self.expand([r.get('id') for r in results])
for result in results:
self.update(result, result.get('id'))
data['values'] = sorted(results,
key=lambda k: k['count'],
reverse=True)
return data
class SchemaFacet(Facet):
def update(self, result, key):
try:
result['label'] = model.get(key).plural
except AttributeError:
result['label'] = key
class CountryFacet(Facet):
def update(self, result, key):
result['label'] = registry.country.names.get(key, key)
class LanguageFacet(Facet):
def update(self, result, key):
result['label'] = registry.language.names.get(key, key)
class CategoryFacet(Facet):
def update(self, result, key):
result['label'] = Collection.CATEGORIES.get(key, key)
class CollectionFacet(Facet):
def expand(self, keys):
for key in keys:
if self.parser.authz.can(key, self.parser.authz.READ):
resolver.queue(self.parser, Collection, key)
resolver.resolve(self.parser)
def update(self, result, key):
collection = resolver.get(self.parser, Collection, key)
if collection is not None:
result['label'] = collection.get('label')
result['category'] = collection.get('category')
| {
"repo_name": "pudo/aleph",
"path": "aleph/search/facet.py",
"copies": "1",
"size": "3166",
"license": "mit",
"hash": -8934566036977981000,
"line_mean": 28.8679245283,
"line_max": 75,
"alpha_frac": 0.541692988,
"autogenerated": false,
"ratio": 4.171277997364954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5212970985364954,
"avg_score": null,
"num_lines": null
} |
from followthemoney import model
from followthemoney.types import registry
from ingestors.analysis import Analyzer
from ingestors.analysis.patterns import EMAIL_REGEX, PHONE_REGEX
from ingestors.analysis.patterns import IBAN_REGEX
from .support import TestCase
class TestAnalysis(TestCase):
def _tagged_entity(self, entity):
a = Analyzer(self.manager.dataset, entity, {})
a.feed(entity)
a.flush()
return self.get_emitted_by_id(entity.id)
def test_ner_extract(self):
text = "Das ist der Pudel von Angela Merkel. "
text = text * 5
entity = model.make_entity("PlainText")
entity.id = "test1"
entity.add("bodyText", text)
entity = self._tagged_entity(entity)
names = entity.get_type_values(registry.name)
assert "Angela Merkel" in names, names
def test_language_tagging(self):
text = "C'est le caniche d'Emmanuel Macron. " * 2
entity = model.make_entity("PlainText")
entity.id = "test2"
entity.add("bodyText", text)
entity = self._tagged_entity(entity)
names = entity.get_type_values(registry.name)
assert "Emmanuel Macron" in names, names
assert entity.get("detectedLanguage") == ["fra"], entity.get(
"detectedLanguage"
) # noqa
def test_pattern_extract(self):
text = "Mr. Flubby Flubber called the number tel:+919988111222 twice"
entity = model.make_entity("PlainText")
entity.id = "test3"
entity.add("bodyText", text)
entity = self._tagged_entity(entity)
phones = entity.get_type_values(registry.phone)
assert "+919988111222" in phones
countries = entity.get_type_values(registry.country)
assert "in" in countries
class TestPatterns(TestCase):
def test_phonenumbers(self):
PHONE_NUMBERS = [
"754-3010",
"(541) 754-3010",
"+1-541-754-3010",
"1-541-754-3010",
"001-541-754-3010",
"191 541 754 3010",
"(089) / 636-48018",
"+49-89-636-48018",
"19-49-89-636-48018",
"phone: +49-89-636-48018",
"tel +49-89-636-48018 or so",
]
for number in PHONE_NUMBERS:
matches = PHONE_REGEX.findall(number)
assert len(matches) == 1
def test_iban(self):
IBANS = [
"SC52BAHL01031234567890123456USD",
"SK8975000000000012345671",
"SI56192001234567892",
"ES7921000813610123456789",
"SE1412345678901234567890",
"CH5604835012345678009",
"TL380080012345678910157",
"TN4401000067123456789123",
"TR320010009999901234567890",
"UA903052992990004149123456789",
"AE460090000000123456789",
"GB98MIDL07009312345678",
"VG21PACG0000000123456789",
]
for iban in IBANS:
matches = IBAN_REGEX.findall(iban)
assert len(matches) == 1
def test_email(self):
EMAILS = ["abc@sunu.in", "abc+netflix@sunu.in", "_@sunu.in"]
for email in EMAILS:
matches = EMAIL_REGEX.findall(email)
assert len(matches) == 1
| {
"repo_name": "alephdata/ingestors",
"path": "tests/test_analysis.py",
"copies": "1",
"size": "3271",
"license": "mit",
"hash": 8631580901761049000,
"line_mean": 33.4315789474,
"line_max": 77,
"alpha_frac": 0.5906450627,
"autogenerated": false,
"ratio": 3.5247844827586206,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.961542954545862,
"avg_score": 0,
"num_lines": 95
} |
from followthemoney import model
from normality import collapse_spaces
ANALYZABLE = model.get("Analyzable")
DOCUMENT = model.get("Document")
TAG_PERSON = ANALYZABLE.get("peopleMentioned")
TAG_COMPANY = ANALYZABLE.get("companiesMentioned")
TAG_LANGUAGE = ANALYZABLE.get("detectedLanguage")
TAG_COUNTRY = ANALYZABLE.get("detectedCountry")
TAG_EMAIL = ANALYZABLE.get("emailMentioned")
TAG_PHONE = ANALYZABLE.get("phoneMentioned")
TAG_IBAN = ANALYZABLE.get("ibanMentioned")
TAG_LOCATION = ANALYZABLE.get("location")
def text_chunks(texts, sep=" ", max_chunk=25000):
"""Pre-chew text snippets for NLP and pattern matching."""
for text in texts:
text = collapse_spaces(text)
if text is None or len(text) < 5:
continue
# Crudest text splitting code in documented human history.
# Most of the time, a single page of text is going to be
# 3000-4000 characters, so this really only kicks in if
# something weird is happening in the first place.
for idx in range(0, len(text), max_chunk):
yield text[idx : idx + max_chunk]
| {
"repo_name": "alephdata/ingestors",
"path": "ingestors/analysis/util.py",
"copies": "1",
"size": "1104",
"license": "mit",
"hash": 4163704485776459000,
"line_mean": 38.4285714286,
"line_max": 66,
"alpha_frac": 0.6992753623,
"autogenerated": false,
"ratio": 3.45,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46492753623000005,
"avg_score": null,
"num_lines": null
} |
from followthemoney import model
from ingestors.ingestor import Ingestor
from ingestors.support.encoding import EncodingSupport
from ingestors.exc import ProcessingException
class PlainTextIngestor(Ingestor, EncodingSupport):
"""Plan text file ingestor class.
Extracts the text from the document and enforces unicode on it.
"""
MIME_TYPES = [
'text/plain',
'text/x-c',
'text/x-c++',
'text/x-diff',
'text/x-python',
'text/x-shellscript',
'text/x-java',
'text/x-php',
'text/troff',
'text/x-ruby',
'text/x-pascal',
'text/x-msdos-batch',
'text/x-yaml',
'text/x-makefile',
'text/x-perl', # %^&%*^&%*%^
'text/x-objective-c',
'text/x-msdos-batch',
'text/x-asm',
'text/x-csrc',
'text/x-sh',
'text/javascript',
'text/x-algol68',
]
MAX_SIZE = 4 * 1024 * 1024
SCORE = 1
def ingest(self, file_path, entity):
"""Ingestor implementation."""
entity.schema = model.get('PlainText')
for file_size in entity.get('fileSize'):
if int(file_size) > self.MAX_SIZE:
raise ProcessingException("Text file is too large.")
text = self.read_file_decoded(entity, file_path)
entity.set('bodyText', text)
| {
"repo_name": "pudo/aleph",
"path": "services/ingest-file/ingestors/documents/plain.py",
"copies": "1",
"size": "1356",
"license": "mit",
"hash": 9163377455881133000,
"line_mean": 27.25,
"line_max": 68,
"alpha_frac": 0.5663716814,
"autogenerated": false,
"ratio": 3.3399014778325125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9406273159232512,
"avg_score": 0,
"num_lines": 48
} |
from followthemoney import model
from ingestors.ingestor import Ingestor
from ingestors.support.pdf import PDFSupport
from ingestors.support.ole import OLESupport
class DocumentIngestor(Ingestor, OLESupport, PDFSupport):
"""Office/Word document ingestor class.
Converts the document to PDF and extracts the text.
Mostly a slightly adjusted PDF ingestor.
Requires system tools:
- Open/Libre Office with dependencies
- image ingestor dependencies to cover any embeded images OCR
"""
MIME_TYPES = [
# Text documents
'text/richtext',
'text/rtf',
'application/rtf',
'application/x-rtf',
'application/msword',
'application/vnd.ms-word',
'application/wordperfect',
'application/vnd.wordperfect',
# Presentations
'application/vnd.ms-powerpoint',
'application/vnd.sun.xml.impress',
'application/vnd.ms-powerpoint.presentation',
'application/vnd.ms-powerpoint.presentation.12',
# MS Office files with short stream missing
'application/CDFV2-unknown',
'application/CDFV2-corrupt'
'application/clarisworks', # ClarisWorks_Draw
'application/epub+zip', # EPUB Document
'application/macwriteii', # MacWrite
'application/msword', # MS Word 2007 XML VBA
'application/prs.plucker', # Plucker eBook
'application/vnd.corel-draw', # Corel Draw Document
'application/vnd.lotus-wordpro', # LotusWordPro
'application/vnd.ms-powerpoint', # MS PowerPoint 97 Vorlage
'application/vnd.ms-powerpoint.presentation.macroEnabled.main+xml', # Impress MS PowerPoint 2007 XML VBA # noqa
'application/vnd.ms-works', # Mac_Works
'application/vnd.palm', # Palm_Text_Document
'application/vnd.sun.xml.draw', # StarOffice XML (Draw)
'application/vnd.sun.xml.draw.template', # draw_StarOffice_XML_Draw_Template # noqa
'application/vnd.sun.xml.impress', # StarOffice XML (Impress)
'application/vnd.sun.xml.impress.template', # impress_StarOffice_XML_Impress_Template # noqa
'application/vnd.sun.xml.writer', # StarOffice XML (Writer)
'application/vnd.sun.xml.writer.global', # writer_globaldocument_StarOffice_XML_Writer_GlobalDocument # noqa
'application/vnd.sun.xml.writer.template', # writer_StarOffice_XML_Writer_Template # noqa
'application/vnd.sun.xml.writer.web', # writer_web_StarOffice_XML_Writer_Web_Template # noqa
'application/vnd.visio', # Visio Document
'application/vnd.wordperfect', # WordPerfect
'application/x-abiword', # AbiWord
'application/x-aportisdoc', # PalmDoc
'application/x-fictionbook+xml', # FictionBook 2
'application/x-hwp', # writer_MIZI_Hwp_97
'application/x-iwork-keynote-sffkey', # Apple Keynote
'application/x-iwork-pages-sffpages', # Apple Pages
'application/x-mspublisher', # Publisher Document
'application/x-mswrite', # MS_Write
'application/x-pagemaker', # PageMaker Document
'application/x-sony-bbeb', # BroadBand eBook
'application/x-t602', # T602Document
'image/x-cmx', # Corel Presentation Exchange
'image/x-freehand', # Freehand Document
'image/x-wpg', # WordPerfect Graphics
]
EXTENSIONS = [
'602', # T602Document
'abw', # AbiWord
'cdr', # Corel Draw Document
'cmx', # Corel Presentation Exchange
'cwk', # ClarisWorks_Draw
'doc', # Mac_Word
'dot', # MS Word 97 Vorlage
'dps', # MS PowerPoint 97
'dpt', # MS PowerPoint 97 Vorlage
'epub', # EPUB Document
'fb2', # FictionBook 2
'fh', # Freehand Document
'fh1', # Freehand Document
'fh10', # Freehand Document
'fh11', # Freehand Document
'fh2', # Freehand Document
'fh3', # Freehand Document
'fh4', # Freehand Document
'fh5', # Freehand Document
'fh6', # Freehand Document
'fh7', # Freehand Document
'fh8', # Freehand Document
'fh9', # Freehand Document
'fodg', # OpenDocument Drawing Flat XML
'fodp', # OpenDocument Presentation Flat XML
'fodt', # OpenDocument Text Flat XML
'hwp', # writer_MIZI_Hwp_97
'key', # Apple Keynote
'lrf', # BroadBand eBook
'lwp', # LotusWordPro
'mcw', # MacWrite
'mw', # MacWrite
'mwd', # Mariner_Write
'nxd', # WriteNow
'odg', # draw8
'odm', # writerglobal8
'otg', # draw8_template
'oth', # writerweb8_writer_template
'otm', # writerglobal8_template
'otp', # impress8_template
'ott', # writer8_template
'p65', # PageMaker Document
'pages', # Apple Pages
'pdb', # Palm_Text_Document
'pm', # PageMaker Document
'pm6', # PageMaker Document
'pmd', # PageMaker Document
'pot', # PowerPoint 3
'pps', # MS PowerPoint 97 AutoPlay
'ppt', # PowerPoint 3
# 'pptm', # Impress Office Open XML
'pub', # Publisher Document
'qxd', # QXP Document
'qxt', # QXP Document
'rtf', # Rich Text Format
'sda', # StarOffice_Drawing
'sdd', # StarOffice_Presentation
'sdw', # StarOffice_Writer
'std', # draw_StarOffice_XML_Draw_Template
'sti', # impress_StarOffice_XML_Impress_Template
'stw', # writer_StarOffice_XML_Writer_Template
'sxd', # StarOffice XML (Draw)
'sxg', # writer_globaldocument_StarOffice_XML_Writer_GlobalDocument
'sxi', # StarOffice XML (Impress)
'sxw', # StarOffice XML (Writer)
# 'tab', # Text
# 'tsv', # Text
# 'txt', # Text
'vdx', # Visio Document
'vsd', # Visio Document
'vsdm', # Visio Document
'vsdx', # Visio Document
'wn', # WriteNow
'wpd', # WordPerfect
'wpg', # WordPerfect Graphics
'wps', # Mac_Works
'wpt', # MS Word 97 Vorlage
'wri', # MS_Write
'xlc', # MS Excel 95
'xlm', # MS Excel 95
'xls', # MS Excel 95
'xlw', # MS Excel 95
# 'xml', # OpenDocument Drawing Flat XML
'zabw', # AbiWord
# 'zip', # FictionBook 2
'zmf', # ZMF Document
]
SCORE = 6
def ingest(self, file_path, entity):
"""Ingestor implementation."""
entity.schema = model.get('Pages')
self.extract_ole_metadata(file_path, entity)
pdf_path = self.document_to_pdf(file_path, entity)
self.pdf_alternative_extract(entity, pdf_path)
| {
"repo_name": "pudo/aleph",
"path": "services/ingest-file/ingestors/documents/office.py",
"copies": "1",
"size": "6814",
"license": "mit",
"hash": 815266494573957500,
"line_mean": 38.6162790698,
"line_max": 121,
"alpha_frac": 0.5931904902,
"autogenerated": false,
"ratio": 3.3533464566929134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9446536946892914,
"avg_score": 0,
"num_lines": 172
} |
from followthemoney import model
from ingestors.ingestor import Ingestor
from ingestors.support.pdf import PDFSupport
from ingestors.support.ooxml import OOXMLSupport
class OfficeOpenXMLIngestor(Ingestor, OOXMLSupport, PDFSupport):
"""Office/Word document ingestor class.
Converts the document to PDF and extracts the text.
Mostly a slightly adjusted PDF ingestor.
"""
PREFIX = 'application/vnd.openxmlformats-officedocument.'
MIME_TYPES = [
PREFIX + 'wordprocessingml.document',
PREFIX + 'wordprocessingml.template',
PREFIX + 'presentationml.slideshow',
PREFIX + 'presentationml.presentation',
PREFIX + 'presentationml.template',
PREFIX + 'presentationml.slideshow',
]
EXTENSIONS = [
'docx', 'docm', 'dotx', 'dotm',
'potx', 'pptx', 'ppsx', 'pptm',
'ppsm', 'potm'
]
SCORE = 7
def ingest(self, file_path, entity):
"""Ingestor implementation."""
entity.schema = model.get('Pages')
self.ooxml_extract_metadata(file_path, entity)
pdf_path = self.document_to_pdf(file_path, entity)
self.pdf_alternative_extract(entity, pdf_path)
@classmethod
def match(cls, file_path, entity):
score = super(OfficeOpenXMLIngestor, cls).match(file_path, entity)
if score > 0 and cls.inspect_ooxml_manifest(file_path):
score = cls.SCORE * 2
return score
| {
"repo_name": "pudo/aleph",
"path": "services/ingest-file/ingestors/documents/ooxml.py",
"copies": "1",
"size": "1437",
"license": "mit",
"hash": 7298880375426226000,
"line_mean": 32.4186046512,
"line_max": 74,
"alpha_frac": 0.6569241475,
"autogenerated": false,
"ratio": 3.6105527638190953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9767476911319095,
"avg_score": 0,
"num_lines": 43
} |
from followthemoney import model
from ingestors.ingestor import Ingestor
from ingestors.support.pdf import PDFSupport
from ingestors.support.opendoc import OpenDocumentSupport
class OpenDocumentIngestor(Ingestor, OpenDocumentSupport, PDFSupport):
"""Office/Word document ingestor class.
Converts the document to PDF and extracts the text.
Mostly a slightly adjusted PDF ingestor.
Requires system tools:
- Open/Libre Office with dependencies
- image ingestor dependencies to cover any embeded images OCR
"""
MIME_TYPES = [
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-template',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.graphics',
'application/vnd.oasis.opendocument.graphics-flat-xml',
'application/vnd.oasis.opendocument.graphics-template'
'application/vnd.oasis.opendocument.presentation-flat-xml',
'application/vnd.oasis.opendocument.presentation-template',
'application/vnd.oasis.opendocument.chart',
'application/vnd.oasis.opendocument.chart-template',
'application/vnd.oasis.opendocument.image',
'application/vnd.oasis.opendocument.image-template',
'application/vnd.oasis.opendocument.formula',
'application/vnd.oasis.opendocument.formula-template',
'application/vnd.oasis.opendocument.text-flat-xml',
'application/vnd.oasis.opendocument.text-master',
'application/vnd.oasis.opendocument.text-web',
]
EXTENSIONS = [
'odt',
'odp',
'otp'
]
SCORE = 7
def ingest(self, file_path, entity):
"""Ingestor implementation."""
entity.schema = model.get('Pages')
self.parse_opendocument(file_path, entity)
pdf_path = self.document_to_pdf(file_path, entity)
self.pdf_alternative_extract(entity, pdf_path)
| {
"repo_name": "pudo/aleph",
"path": "services/ingest-file/ingestors/documents/opendoc.py",
"copies": "1",
"size": "1944",
"license": "mit",
"hash": -218819383437167840,
"line_mean": 36.3846153846,
"line_max": 70,
"alpha_frac": 0.7011316872,
"autogenerated": false,
"ratio": 3.640449438202247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9841581125402248,
"avg_score": 0,
"num_lines": 52
} |
from followthemoney import model
from ingestors.ingestor import Ingestor
class DirectoryIngestor(Ingestor):
"""Traverse the entries in a directory."""
MIME_TYPE = "inode/directory"
SKIP_ENTRIES = [
'.git',
'.hg',
'__MACOSX',
'.gitignore'
]
def ingest(self, file_path, entity):
"""Ingestor implementation."""
if entity.schema == model.get('Document'):
entity.schema = model.get('Folder')
if file_path is None or not file_path.is_dir():
return
self.crawl(self.manager, file_path, parent=entity)
@classmethod
def crawl(cls, manager, file_path, parent=None):
for path in file_path.iterdir():
name = path.name
if name is None or name in cls.SKIP_ENTRIES:
continue
sub_path = file_path.joinpath(name)
child = manager.make_entity('Document', parent=parent)
child.add('fileName', name)
if sub_path.is_dir():
if parent is not None:
child.make_id(parent.id, name)
else:
child.make_id(name)
child.schema = model.get('Folder')
child.add('mimeType', cls.MIME_TYPE)
manager.emit_entity(child)
cls.crawl(manager, sub_path, parent=child)
else:
checksum = manager.store(sub_path)
child.make_id(name, checksum)
child.set('contentHash', checksum)
manager.queue_entity(child)
| {
"repo_name": "pudo/aleph",
"path": "services/ingest-file/ingestors/directory.py",
"copies": "1",
"size": "1588",
"license": "mit",
"hash": -5709671823624650000,
"line_mean": 31.4081632653,
"line_max": 66,
"alpha_frac": 0.5415617128,
"autogenerated": false,
"ratio": 4.082262210796915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 49
} |
from followthemoney import model
from ingestors.ingestor import Ingestor
class DirectoryIngestor(Ingestor):
"""Traverse the entries in a directory."""
MIME_TYPE = "inode/directory"
SKIP_ENTRIES = [".git", ".hg", "__MACOSX", ".gitignore"]
def ingest(self, file_path, entity):
"""Ingestor implementation."""
if entity.schema == model.get("Document"):
entity.schema = model.get("Folder")
if file_path is None or not file_path.is_dir():
return
self.crawl(self.manager, file_path, parent=entity)
@classmethod
def crawl(cls, manager, file_path, parent=None):
for path in file_path.iterdir():
name = path.name
if name is None or name in cls.SKIP_ENTRIES:
continue
sub_path = file_path.joinpath(name)
child = manager.make_entity("Document", parent=parent)
child.add("fileName", name)
if sub_path.is_dir():
if parent is not None:
child.make_id(parent.id, name)
else:
child.make_id(name)
child.schema = model.get("Folder")
child.add("mimeType", cls.MIME_TYPE)
manager.emit_entity(child)
cls.crawl(manager, sub_path, parent=child)
else:
checksum = manager.store(sub_path)
child.make_id(name, checksum)
child.set("contentHash", checksum)
manager.queue_entity(child)
| {
"repo_name": "alephdata/ingestors",
"path": "ingestors/directory.py",
"copies": "1",
"size": "1551",
"license": "mit",
"hash": 3079777888896278500,
"line_mean": 33.4666666667,
"line_max": 66,
"alpha_frac": 0.55448098,
"autogenerated": false,
"ratio": 4.049608355091384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 45
} |
from fom.api import FluidApi
from fom.tx import TxFluidDB
from fom import errors
from twisted.trial import unittest
from twisted.internet import defer
class TestTxFluidDB(unittest.TestCase):
def setUp(self):
self.db = TxFluidDB('http://sandbox.fluidinfo.com')
@defer.inlineCallbacks
def testRequest(self):
resp = yield self.db('GET', ['users', 'test'])
self.assertEqual(resp.value[u'name'], 'test')
@defer.inlineCallbacks
def testApi(self):
fdb = FluidApi(self.db)
resp = yield fdb.users['test'].get()
self.assertEqual(resp.value[u'name'], 'test')
def testError(self):
fdb = FluidApi(self.db)
d = fdb.namespaces['test'].put(description='Oh deary')
return self.assertFailure(d, errors.Fluid401Error)
@defer.inlineCallbacks
def testLogin(self):
fdb = FluidApi(self.db)
self.db.login('test', 'test')
resp = yield fdb.namespaces['test'].put(
description='Test user namespace')
self.assertEqual(resp.status, 204)
self.assertEqual(resp.content, '')
| {
"repo_name": "fluidinfo/fom",
"path": "tests/test_tx.py",
"copies": "1",
"size": "1106",
"license": "mit",
"hash": -1460842600078825000,
"line_mean": 29.7222222222,
"line_max": 62,
"alpha_frac": 0.6464737794,
"autogenerated": false,
"ratio": 3.567741935483871,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47142157148838715,
"avg_score": null,
"num_lines": null
} |
from fom.session import Fluid
from fom.mapping import Namespace, Tag
from fom.mapping import Object, tag_value, readonly_tag_value
import settings
import time
import calendar
import article
import json
# Fluidinfo parser
session = Fluid()
class fi_article(Object):
about = readonly_tag_value(u'fluiddb/about')
doi = tag_value(settings.namespace + '/article/doi')
doi_url = tag_value(settings.namespace + '/article/doi_url')
pmid = tag_value(settings.namespace + '/article/pmid')
journal_id = tag_value(settings.namespace + '/article/journal_id')
journal_title = tag_value(settings.namespace + '/article/journal_title')
journal_issn_ppub = tag_value(settings.namespace + '/article/journal_issn_ppub')
journal_issn_epub = tag_value(settings.namespace + '/article/journal_issn_epub')
publisher = tag_value(settings.namespace + '/article/publisher')
article_title = tag_value(settings.namespace + '/article/article_title')
abstract = tag_value(settings.namespace + '/article/abstract')
article_type = tag_value(settings.namespace + '/article/article_type')
article_institution = tag_value(settings.namespace + '/article/article_institution')
article_country = tag_value(settings.namespace + '/article/article_country')
subject_area = tag_value(settings.namespace + '/article/subject_area')
research_organism = tag_value(settings.namespace + '/article/research_organism')
correspondence = tag_value(settings.namespace + '/article/correspondence')
author_notes = tag_value(settings.namespace + '/article/author_notes')
keywords = tag_value(settings.namespace + '/article/keywords')
pub_date_date = tag_value(settings.namespace + '/article/pub_date_date')
pub_date_day = tag_value(settings.namespace + '/article/pub_date_day')
pub_date_month = tag_value(settings.namespace + '/article/pub_date_month')
pub_date_year = tag_value(settings.namespace + '/article/pub_date_year')
pub_date_timestamp = tag_value(settings.namespace + '/article/pub_date_timestamp')
received_date_date = tag_value(settings.namespace + '/article/received_date_date')
received_date_day = tag_value(settings.namespace + '/article/received_date_day')
received_date_month = tag_value(settings.namespace + '/article/received_date_month')
received_date_year = tag_value(settings.namespace + '/article/received_date_year')
received_date_timestamp = tag_value(settings.namespace + '/article/received_date_timestamp')
accepted_date_date = tag_value(settings.namespace + '/article/accepted_date_date')
accepted_date_day = tag_value(settings.namespace + '/article/accepted_date_day')
accepted_date_month = tag_value(settings.namespace + '/article/accepted_date_month')
accepted_date_year = tag_value(settings.namespace + '/article/accepted_date_year')
accepted_date_timestamp = tag_value(settings.namespace + '/article/accepted_date_timestamp')
award_group_funding_source = tag_value(settings.namespace + '/article/award_group_funding_source')
award_group_award_id = tag_value(settings.namespace + '/article/award_group_award_id')
award_group_principle_award_recipient = tag_value(settings.namespace + '/article/award_group_principle_award_recipient')
funding_statement = tag_value(settings.namespace + '/article/funding_statement')
copyright_statement = tag_value(settings.namespace + '/article/copyright_statement')
copyright_year = tag_value(settings.namespace + '/article/copyright_year')
copyright_holder = tag_value(settings.namespace + '/article/copyright_holder')
license = tag_value(settings.namespace + '/article/license')
license_type = tag_value(settings.namespace + '/article/license_type')
license_url = tag_value(settings.namespace + '/article/license_url')
ack = tag_value(settings.namespace + '/article/ack')
conflict = tag_value(settings.namespace + '/article/conflict')
authors = tag_value(settings.namespace + '/article/authors')
refs = tag_value(settings.namespace + '/article/refs')
components = tag_value(settings.namespace + '/article/components')
class fi_ref(Object):
about = readonly_tag_value(u'fluiddb/about')
ref = tag_value(settings.namespace + '/ref/ref')
article_doi = tag_value(settings.namespace + '/ref/article_doi')
article_title = tag_value(settings.namespace + '/ref/article_title')
publication_type = tag_value(settings.namespace + '/ref/publication_type')
doi = tag_value(settings.namespace + '/ref/doi')
doi_url = tag_value(settings.namespace + '/ref/doi_url')
pmid = tag_value(settings.namespace + '/ref/pmid')
authors = tag_value(settings.namespace + '/ref/authors')
year = tag_value(settings.namespace + '/ref/year')
source = tag_value(settings.namespace + '/ref/source')
volume = tag_value(settings.namespace + '/ref/volume')
fpage = tag_value(settings.namespace + '/ref/fpage')
lpage = tag_value(settings.namespace + '/ref/lpage')
collab = tag_value(settings.namespace + '/ref/collab')
publisher_loc = tag_value(settings.namespace + '/ref/publisher_loc')
publisher_name = tag_value(settings.namespace + '/ref/publisher_name')
position = tag_value(settings.namespace + '/ref/position')
etal = tag_value(settings.namespace + '/ref/etal')
class fi_component(Object):
about = readonly_tag_value(u'fluiddb/about')
doi = tag_value(settings.namespace + '/component/doi')
doi_url = tag_value(settings.namespace + '/component/doi_url')
type = tag_value(settings.namespace + '/component/type')
content = tag_value(settings.namespace + '/component/content')
article_doi = tag_value(settings.namespace + '/component/article_doi')
position = tag_value(settings.namespace + '/component/position')
class fi_author(Object):
about = readonly_tag_value(u'fluiddb/about')
author = tag_value(settings.namespace + '/author/author')
person_id = tag_value(settings.namespace + '/author/person_id')
equal_contrib = tag_value(settings.namespace + '/author/equal_contrib')
article_doi = tag_value(settings.namespace + '/author/article_doi')
surname = tag_value(settings.namespace + '/author/surname')
given_names = tag_value(settings.namespace + '/author/given_names')
department = tag_value(settings.namespace + '/author/department')
institution = tag_value(settings.namespace + '/author/institution')
city = tag_value(settings.namespace + '/author/city')
country = tag_value(settings.namespace + '/author/country')
corresponding = tag_value(settings.namespace + '/author/corresponding')
position = tag_value(settings.namespace + '/author/position')
notes_correspondence = tag_value(settings.namespace + '/author/notes_correspondence')
notes_footnotes = tag_value(settings.namespace + '/author/notes_footnotes')
notes_other = tag_value(settings.namespace + '/author/notes_other')
class fi_alm(Object):
about = readonly_tag_value(u'fluiddb/about')
doi = tag_value(settings.namespace + '/alm/doi')
alm_source = tag_value(settings.namespace + '/alm/alm_source')
type = tag_value(settings.namespace + '/alm/type')
value = tag_value(settings.namespace + '/alm/value')
timespan = tag_value(settings.namespace + '/alm/timespan')
date_date = tag_value(settings.namespace + '/alm/date_date')
date_day = tag_value(settings.namespace + '/alm/date_day')
date_month = tag_value(settings.namespace + '/alm/date_month')
date_year = tag_value(settings.namespace + '/alm/date_year')
date_timestamp = tag_value(settings.namespace + '/alm/date_timestamp')
complete = tag_value(settings.namespace + '/alm/complete')
class fi_alm_source(Object):
about = readonly_tag_value(u'fluiddb/about')
source = tag_value(settings.namespace + '/alm_source/source')
url = tag_value(settings.namespace + '/alm_source/url')
name = tag_value(settings.namespace + '/alm_source/name')
trusted = tag_value(settings.namespace + '/alm_source/trusted')
def bind_fluidinfo_session():
session.login(settings.username, settings.password)
session.bind()
def get_uid_and_initial(key, value):
"""
Covert content returned from fom values get query
to uid and initial values to populate a fom object
Used by bulk load of values for a single object
or bulk load of many objects, for example find all references
of an article in one HTTP get, and build them without doing additional
HTTP
"""
uid = None
about = None
initial = {}
uid = key
for k, v in value.items():
if(k == 'fluiddb/about'):
about = v['value']
initial[k] = {"value": v['value']}
return about, uid, initial
def get_uid_from_query(query, obj = None):
"""
Refactored method to get the fluidinfo id (uid) and
existing initial values for an object based on a query
and optionally constrain the tag list returned based on
a fom object properties
"""
uid = None
about = None
initial = {}
# If a fom Object was supplied, get the tag list from its _path_map
# otherwise, use the tag wildcard
if(obj != None):
tag_list = []
for k, v in obj._path_map.items():
tag_list.append(k)
else:
tag_list = ["*"]
objects = values_get(query, tag_list)
if(objects.content):
# Parse content returned, only handling one object at once
# at the moment
# Parse content returned with json library to convert null values, etc.
i = json.loads(objects.content)
if(type(i) == dict):
# Only handle one object at a time
for key, value in i["results"]['id'].items():
(about, uid, initial) = get_uid_and_initial(key, value)
return about, uid, initial
def values_get(query, tag_list):
"""
Pass-through values get function, to allow external libraries
to use fom without directly importing
"""
if not hasattr(Fluid, 'bound'):
bind_fluidinfo_session()
return Fluid.bound.values.get(query, tag_list)
def get_article_initial(doi_url, obj = None):
"""
Bulk load initial values from an existing article object
Return the fluiddb/id (uid) object value, and
tag values (in initial) to populate a fom Object
"""
uid = None
initial = {}
query = 'fluiddb/about = "' + doi_url + '"'
return get_uid_from_query(query, obj)
def get_ref_initial(article_doi, position, obj = None):
"""
Bulk load initial values from an existing ref object
Uniquely keyed by the article_doi and the position of the ref in that article
Return the fluiddb/id (uid) object value, and
tag values (in initial) to populate a fom Object
"""
uid = None
initial = {}
query = 'fluiddb/about = "' + 'ref' + '_' + str(position) + '_' + article_doi + '"'
return get_uid_from_query(query, obj)
def get_component_initial(article_doi, doi, obj = None):
"""
Bulk load initial values from an existing component object
Uniquely keyed by the article_doi and the doi of the component itself
Return the fluiddb/id (uid) object value, and
tag values (in initial) to populate a fom Object
"""
uid = None
initial = {}
query = 'fluiddb/about = "' + doi + '"'
return get_uid_from_query(query, obj)
def get_author_initial(article_doi, position, obj = None):
"""
Bulk load initial values from an existing author object
Uniquely keyed by the article_doi and the position of the author in the article
Return the fluiddb/id (uid) object value, and
tag values (in initial) to populate a fom Object
"""
uid = None
initial = {}
query = 'fluiddb/about = "' + 'author' + '_' + str(position) + '_' + article_doi + '"'
return get_uid_from_query(query, obj)
def main():
# Basic testing / debug during development
# moved to load_article.py
pass
if __name__ == "__main__":
main() | {
"repo_name": "elifesciences/elife-api-prototype",
"path": "parseFI.py",
"copies": "1",
"size": "11309",
"license": "mit",
"hash": 4070911076872355300,
"line_mean": 44.0597609562,
"line_max": 121,
"alpha_frac": 0.7285347953,
"autogenerated": false,
"ratio": 3.2357653791130185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44643001744130184,
"avg_score": null,
"num_lines": null
} |
from fonction_py.tools import *
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn import svm
from sklearn import decomposition
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import uniform as sp_randint
from sklearn import datasets
from sklearn.linear_model import Ridge
import time
def faireTout():
fields = ['DATE', 'DAY_OFF', 'WEEK_END', 'DAY_WE_DS', 'ASS_ASSIGNMENT', 'CSPL_RECEIVED_CALLS' ] # selectionne les colonnes à lire
c = pd.DataFrame()
listmodel = faireListModel()
#'Evenements', 'Gestion Amex'
#setFields = set(pd.read_csv("data/fields.txt", sep=";")['0'].values)
resultat = pd.read_csv("data/submission.txt", sep="\t")
resultat['fait'] = False
i=0
res = []
start_time = time.time()
model = listmodel[25]
data=pd.read_csv("data/trainPure.csv", sep=";", usecols=fields) # LECTURE
resultat = pd.read_csv("data/submission.txt", sep="\t") # LECTURE
res=[]
for model in listmodel:
i = i+1
print(model[0])#nom du assignment
x,y = preprocess(data.copy(), model[0]) # rajoute les features
model[1].fit(x, y) #entrainement
(xTest, souvenir)=preprocessFINAL(x,model[0])
pred = model[1].predict(xTest)
pred[pred>max(y)*1.05]=max(y)*1.05
pred[pred<0]=0
pred =np.round(pred)
souvenir['prediction']=pred
resultat=pd.merge(resultat, souvenir, how='left',on=['DATE', 'ASS_ASSIGNMENT'])
resultat=resultat.fillna(0)
resultat['prediction'] = resultat['prediction_x']+resultat['prediction_y']
del resultat['prediction_x']
del resultat['prediction_y']
pd.DataFrame(res).to_csv("reslist.csv", sep=";", decimal=",")
resultat.to_csv("vraipred.txt", sep="\t", index =False)
return resultat
def faireListModel():
return [('CAT', linear_model.LinearRegression()),
('CMS', RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=10, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Crises',linear_model.LinearRegression()),
('Domicile', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=90, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion - Accueil Telephonique',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=70, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Assurances',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=20, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Clients', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=50, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion DZ', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=5,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Relation Clienteles',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=90, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=110, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Gestion Renault', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features=50, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Japon',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Manager',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=10,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Mécanicien',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Médical',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=30,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Nuit', RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Prestataires',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RENAULT',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=80,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('RTC',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Regulation Medicale',linear_model.LinearRegression()),
('SAP',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=20,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Services',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Axa',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=20,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Inter',RandomForestRegressor(bootstrap=False, criterion='mse', max_depth=30,
max_features=30, max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=30, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Tech. Total',RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=70,
max_features='auto', max_leaf_nodes=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False)),
('Téléphonie',RandomForestRegressor(n_estimators=40, bootstrap=False, max_depth=1, max_features=12))] | {
"repo_name": "LaRiffle/axa_challenge",
"path": "fonction_py/train2.py",
"copies": "1",
"size": "9910",
"license": "mit",
"hash": -3229224324574464500,
"line_mean": 55.9310344828,
"line_max": 133,
"alpha_frac": 0.6582534074,
"autogenerated": false,
"ratio": 3.2443498198493286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4402603227249329,
"avg_score": null,
"num_lines": null
} |
from fonctions_basiques import *
from objets import ObjetPyturbo
class FrontiereChoro(ObjetPyturbo):
"""
classe qui permet l'analyse des fichiers chorochroniques
Les coefficients choro sont stockes en tant que self.coeffs_a et self.coeffs_b
shape = (nb_harmoniques, nb_var, dims_0, dims_1)
dans l'ordre i, j, k intuitif (et pas l'ordre k, j, i de stockage VTK)
Pour initialisation, donner :
- maillage
- liste_acces_fichiers,
- liste_numblocs
- liste_frontieres,
- liste_directions=[],
- dict_fenetres_blocs = {}
si une liste de dossiers est donnees, alors les fichiers choro indiques par liste_acces_fichiers
sont successivement lus dans tous les dossiers indiques
en retour, coeffs_a et coeffs_b sont des arrays dont la permiere dimensions est egale aux nombre
de dossiers indiques.
shape = (n_calculs, n_harmoniques, n_grandeurs, dimensions_frontieres)
"""
#_____________________________________________________________________________________
def __init__(self,
maillage,
liste_acces_fichiers,
liste_numblocs,
liste_frontieres,
liste_directions=[],
dict_fenetres_blocs={},
liste_dossiers = None,
):
#Initialisation de la classe parente
attributs = locals().copy()
del attributs['self']
ObjetPyturbo.__init__(self, **attributs)
#Coefficients de Fourier
coeffs_a = None
coeffs_b = None
#lecture des fichiers chorochroniques si le dictionnaire de description des frontieres a ete renseigne
self.update()
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def update(self):
"""fonction de chargement des donnees depuis les fichiers chorochroniques"""
if self.liste_dossiers is None:
coeffs_a, coeffs_b = self.__lire_fichiers_choro__(
maillage = self.maillage,
liste_acces_fichiers = self.liste_acces_fichiers,
liste_numblocs = self.liste_numblocs,
liste_frontieres = self.liste_frontieres,
liste_directions = self.liste_directions,
dict_fenetres_blocs = self.dict_fenetres_blocs
)
self.coeffs_a = coeffs_a
self.coeffs_b = coeffs_b
else:
coeffs_a_tot = None
coeffs_b_tot = None
for doss in self.liste_dossiers:
liste_acces_fichiers = [doss + "/" + acces_fichier for
acces_fichier in self.liste_acces_fichiers]
coeffs_a, coeffs_b = self.__lire_fichiers_choro__(
maillage = self.maillage,
liste_acces_fichiers = liste_acces_fichiers,
liste_numblocs = self.liste_numblocs,
liste_frontieres = self.liste_frontieres,
liste_directions = self.liste_directions,
dict_fenetres_blocs = self.dict_fenetres_blocs
)
if coeffs_a_tot is None:
coeffs_a_tot = coeffs_a.reshape((1,) + coeffs_a.shape)
coeffs_b_tot = coeffs_b.reshape((1,) + coeffs_b.shape)
else:
coeffs_a_tot = numpy.concatenate(
(coeffs_a_tot,
coeffs_a.reshape((1,) + coeffs_a.shape)
),
axis = 0)
coeffs_b_tot = numpy.concatenate(
(coeffs_b_tot,
coeffs_b.reshape((1,) + coeffs_b.shape)
),
axis = 0)
self.coeffs_a = coeffs_a_tot
self.coeffs_b = coeffs_b_tot
return 0
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def __lire_fichiers_choro__(self,
maillage,
liste_acces_fichiers,
liste_numblocs,
liste_frontieres,
liste_directions=[],
dict_fenetres_blocs = {}):
"""Cette fonction lit les fichiers binaires v3d issus des calculs chorochroniques
d'elsA.
- maillage doit etre le maillage
- liste_acces_fichiers donne les chemins vers les fichiers chrochroniques a lire
- liste_numblocs donne la liste des blocs concernes par les fichiers chorochroniques,
dans le meme ordre
- liste_frontieres indique quelles sont les frontieres des blocs concernees
les cles peuvent etre "imin", "imax", "jmin", "jmax", "kmin", "kmax"
- liste_directions est une liste de taille N-1 ou N est la taille des listes precedentes
elle indique comment coller les frontieres des blocs pour reconstruire la frontiere complete
en fait, c'est directement l'argument axe de numpy.append
donc si direction = 0, les coeff_choro sont ajoutes en ligne, un espece de i de la frontiere
si direction = 1, les coeff_choro sont ajoutes en colonne, un espece de j de la frontiere
- dict_fenetres_blocs
sert dans le cas d'une frontiere qui ne repose pas sur l'ensemble d'une fenetre jmin (par exemple)
les indices doivent etre donnes en mode elsA, i.e. commencent a 1
en fait, il est possible de recopier directement la definition de la fenetre depuis la carte de lancement elsA
En sortie, cette fonction retourne
- la matrice des a_n en chacun des points de la frontiere
dimension N x M x N_harmoniques, ou N et M sont les nombres de points de la frontiere dans les deux directions
- la matrice des b_n en chacun des points de la frontiere
dimension N x M x N_harmoniques, ou N et M sont les nombres de points de la frontiere dans les deux directions
Pour l'appel de lire_v3d, les parametres par defaut sont conserves
"""
#Verification des donnes d'entree
if len(liste_acces_fichiers) != len(liste_numblocs):
raise IOError, "liste_acces_fichiers et liste_numblocs doivent faire la meme longueur"
if len(liste_acces_fichiers) != len(liste_frontieres):
raise IOError, "liste_acces_fichiers et liste_frontieres doivent faire la meme longueur"
if len(liste_acces_fichiers) != (len(liste_directions) + 1) and liste_directions != []:
raise IOError, "len(liste_directions) doit etre egal a (liste_acces_fichiers) - 1"
#Lecture des fichiers chorochroniques, et mises sous les bonnes dimensions
#i.e. nb_variables, nk, nj, ni.
coeffs_choro = []
for k in range(len(liste_acces_fichiers)):
fich = liste_acces_fichiers[k]
#cas ou le fichier couvre plusieurs blocs (stage_choro)
if isinstance(liste_numblocs[k], list):
#lecture de toutes les dimensions
liste_dims = None
for numbloc in liste_numblocs[k][:-1]:
if numbloc not in dict_fenetres_blocs:
dims_bloc = list(maillage.GetBlock(numbloc).GetDimensions())
else:
dims_fenetre = dict_fenetres_blocs[numbloc]
dims_bloc = [
dims_fenetre[1] - dims_fenetre[0] + 1,
dims_fenetre[3] - dims_fenetre[2] + 1,
dims_fenetre[5] - dims_fenetre[4] + 1
]
if liste_dims is None:
liste_dims = dims_bloc
else:
liste_dims = numpy.c_[liste_dims, dims_bloc]
liste_dims = liste_dims.transpose()
#somme des dimensions
if liste_numblocs[k][-1][0] is 'i':
dims = [numpy.sum(liste_dims[:, 0] - 1) + 1, liste_dims[0, 1], liste_dims[0, 2]]
elif liste_numblocs[k][-1][0] is 'j':
dims = [liste_dims[0, 0], numpy.sum(liste_dims[:, 1] - 1) + 1, liste_dims[0, 2]]
elif liste_numblocs[k][-1][0] is 'k':
dims = [liste_dims[0, 0], liste_dims[0, 1], numpy.sum(liste_dims[:, 2] - 1) + 1]
#cas simple ou le fichier n'est que sur un bloc
elif liste_numblocs[k] not in dict_fenetres_blocs:
dims = maillage.GetBlock(liste_numblocs[k]).GetDimensions()
else:
dims_fenetre = dict_fenetres_blocs[liste_numblocs[k]]
dims = [
dims_fenetre[1] - dims_fenetre[0] + 1,
dims_fenetre[3] - dims_fenetre[2] + 1,
dims_fenetre[5] - dims_fenetre[4] + 1
]
frontiere = liste_frontieres[k]
#lecture du fichier V3D
coeffs_choro.append(lire_v3d(acces_fichier = fich)['data'])
#reformatage - sachant que les coeff choro sont aux centres des faces
if frontiere[0] is "i":
dims_frontiere = (dims[2] - 1, dims[1] - 1)
elif frontiere[0] is "j":
dims_frontiere = (dims[2] - 1, dims[0] - 1)
elif frontiere[0] is "k":
dims_frontiere = (dims[1] - 1, dims[0] - 1)
else:
raise IOError, "frontiere {0} pas comprise".format(frontiere)
for harmonique in coeffs_choro[-1].keys():
coeffs_choro[-1][harmonique] = coeffs_choro[-1][harmonique].reshape(
(7, ) + dims_frontiere).transpose(0, 2, 1)
#assemblage des coefficients choro complet pour recreer la frontiere totale
coeffs_choro_global = coeffs_choro[0]
for numbloc in range(1, len(coeffs_choro)):
choro_bloc = coeffs_choro[numbloc]
axe = liste_directions[numbloc -1]
for key in coeffs_choro_global.keys():
coeffs_choro_global[key] = numpy.append(
coeffs_choro_global[key],
choro_bloc[key],
axis = 1 + axe
)
rangs = []
for k in coeffs_choro_global.keys():
if k[0] is "a":
rangs.append(int(k[1:]))
rangs.sort()
coeffs_a = numpy.array(
[coeffs_choro_global["a{0}".format(rang)] for rang in rangs]
)
coeffs_b = numpy.array(
[coeffs_choro_global["b{0}".format(rang)] for rang in rangs]
)
return coeffs_a, coeffs_b
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def get_amplitudes(self):
"""
fonction de calcul des amplitudes de chacune des harmoniques, pour chacune des variables
amplitudes = numpy.sqrt(self.coeffs_a ** 2 + self.coeffs_b ** 2)
"""
amplitudes = numpy.sqrt(self.coeffs_a ** 2 + self.coeffs_b ** 2)
return amplitudes
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def get_phases(self):
"""
fonction de calcul des phases de chacune des harmoniques, pour chacune des variables
amplitudes = numpy.arctan(self.coeffs_b / self.coeffs_a)
"""
amplitudes = numpy.arctan(self.coeffs_b / self.coeffs_a)
return amplitudes
#_____________________________________________________________________________________
| {
"repo_name": "aurmarsan/pyturbo",
"path": "convergence.py",
"copies": "1",
"size": "12224",
"license": "mit",
"hash": 8191293781585130000,
"line_mean": 47.7011952191,
"line_max": 127,
"alpha_frac": 0.5022905759,
"autogenerated": false,
"ratio": 3.5721800116890705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45744705875890707,
"avg_score": null,
"num_lines": null
} |
from foneworx.utils import dict_to_xml, xml_to_dict, Dispatcher, tostring, api_response_to_dict, Element
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twisted.python import log
from twisted.internet.protocol import ClientCreator
from twisted.internet import reactor
from xml.etree.ElementTree import Element, tostring, fromstring
from datetime import datetime, timedelta
from foneworx.errors import ApiException
from foneworx.protocol import FoneworxProtocol
class Connection(object):
"""Dummy implementation of a connection to the Foneworx SMS XML API"""
def send(self, dictionary):
pass
def __getattr__(self, attname):
"""
All calls to the connection will automatically be sent
over the wire to Foneworx as an API call.
"""
def sms_api_wrapper(*args, **options):
options.update({'api_action': attname})
return self.send(options)
return sms_api_wrapper
class TwistedConnection(Connection):
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
self.creator = ClientCreator(reactor, FoneworxProtocol)
@inlineCallbacks
def send(self, dictionary):
# reroute the remote calls to local calls for testing
api_request = dict_to_xml(dictionary, root=Element("sms_api"))
log.msg("Sending XML: %s" % tostring(api_request))
protocol = yield self.creator.connectTCP(self.hostname, self.port)
api_response = yield protocol.send_xml(api_request)
response = api_response_to_dict(api_response)
log.msg("Received Dict: %s" % response)
if response.get('error_type'):
raise ApiException(response['error_type'], api_response)
log.msg('Returning: %s' % response)
returnValue(response)
class Status(object):
"""
Not being used currently
"""
values = {
0: "To Be Sent",
1: "Submitted To Network",
2: "At Network",
3: "Delivered",
4: "Rejected",
5: "Undelivered",
6: "Expired",
9: "Submit Failed",
10: "Cancelled",
11: "Scheduled",
91: "Message Length is Invalid",
911: "Desitnation Addr Is Invalid",
988: "Throttling Error",
}
def __init__(self, status_id):
self.status_id = status_id
def __eq__(self, other):
if isinstance(other, Status):
return other.id == self.id
return False
@property
def id(self):
return self.status_id
@property
def text(self):
return self.values.get(int(self.id), 'Unknown status')
def __repr__(self):
return "<Status id: %s, msg: %s>" % (self.id, self.text)
class Convertor(Dispatcher):
"""
Convert a key, value pair to a python object. For now it's primarily
used for timestamp strings -> datetime objects
"""
def do_datetime(self, string):
return datetime.strptime(string, '%Y%m%d%H%M%S')
# these do all the same
do_time_submitted = \
do_time_processed = \
do_timereceived = do_datetime
def do_status_id(self, status_id):
return Status(status_id)
def convert(self, key, value):
"""
Wraps the Dispatcher.dispatch to return the original values
for when a convertor doesn't exist for the given key
"""
if hasattr(self, "%s%s" % (self.prefix, key.lower())):
return key, self.dispatch(key, value)
return key, value
class Client(object):
def __init__(self, username, password, connection=Connection()):
self.username = username
self.password = password
self.connection = connection
self._session_id = None
def to_python_values(self, dictionary):
"""
Convert a dictionary to more pythonic values
"""
convertor = Convertor()
return dict(convertor.convert(*kv) for kv in dictionary.items())
def reset_session_id(self):
self._session_id = None
@inlineCallbacks
def get_new_session_id(self):
"""Get a new session_id from the Foneworx API"""
session_id = yield self.login()
returnValue(session_id)
@inlineCallbacks
def get_session_id(self):
"""
Session ids time out after 10 minutes of inactivity. Stored locally.
"""
if not self._session_id:
self._session_id = yield self.get_new_session_id()
returnValue(self._session_id)
@inlineCallbacks
def login(self):
"""
To log into an account, and get a session var allocated to your login.
"""
response = yield self.connection.login(api_username=self.username,
api_password=self.password)
returnValue(response.get('session_id'))
@inlineCallbacks
def logout(self):
"""
This Function is used to release the sessionid
"""
session_id = yield self.get_session_id()
response = yield self.connection.logout(api_sessionid=session_id)
self.reset_session_id()
returnValue(response.get('status'))
@inlineCallbacks
def new_messages(self, since=None):
"""
Get New Messages for a user
Arguments:
since -- if since is empty the system will only return new messages
since the last time of this call for this user. if since
(datetime object) is filled in, it will return all message
since that time
"""
action_content = {}
if since:
action_content.update({
"smstime": since.strftime("%Y%m%d%H%M%S")
})
session_id = yield self.get_session_id()
try:
response = yield self.connection.newmessages(
api_sessionid=session_id,
action_content=action_content
)
returnValue([self.to_python_values(sms) for sms in response.get('sms')])
except ApiException, e:
# this API is insane, why not an empty SMS element?
if e.args[0] == 'No New Messages':
returnValue([])
else:
raise
@inlineCallbacks
def delete_message(self, sms_id):
"""
Delete New Messages for a user
Arguments:
sms_id -- the id of the sms to be deleted
"""
session_id = yield self.get_session_id()
response = yield self.connection.deletenewmessages(
api_sessionid=session_id,
action_content={
'sms_id': sms_id
}
)
returnValue(response.get('change'))
@inlineCallbacks
def send_messages(self, messages):
"""
Send Sms Messages
Arguments:
messages -- A list of messages to be sent. Each message is a dictionary.
The dictionary's key values match the XML element names of the Foneworx
XML API:
-- Manditory
<msisdn> - number(s) to send the message to, delimited by ~ (tilde)
<message> - message to be sent
-- Allowed Characters: See General Notes
-- Optional
<rule> - which rule to link the message to
<send_at> - when to send the sms (yyyy-mm-dd HH:MM:SS)
-- Optional - Please do not specify these, unless you have been given the correct values by foneworx
<source_addr> - the number the message is sent from (only works if you also specify <sentby>)
<sentby> - the bind/account to use to send the message
<smstype> - 0 for normal text sms, 64 for encoded sms, and then message has to contain the hex string
"""
session_id = yield self.get_session_id()
response = yield self.connection.sendmessages(
api_sessionid=session_id,
action_content={
"sms": messages
}
)
returnValue(response.get('sms'))
@inlineCallbacks
def sent_messages(self, since=None, give_detail=False):
"""
Get Status Updates For Sent Messages
Keyword arguments:
since -- if since is empty the system will only return new messages
since the last time of this call for this user. if since
(datetime object) is filled in, it will return all
message since that time
give_detail -- if you want the message and the destination numbers
returned for each sms, boolean True / False
"""
options = {
'give_detail': '1' if give_detail else '0'
}
if since:
options.update({
'smstime': since.strftime("%Y%m%d%H%M%S")
})
session_id = yield self.get_session_id()
try:
response = yield self.connection.sentmessages(
api_sessionid=session_id,
action_content=options
)
returnValue([self.to_python_values(sms) for sms in response.get('sms')])
except ApiException, e:
if e.args[0] == 'No Updates':
returnValue([])
else:
raise
@inlineCallbacks
def delete_sent_message(self, sms_id):
"""
Delete a Sent Message
Arguments:
sms_id -- the id of the sms
"""
session_id = yield self.get_session_id()
response = yield self.connection.deletesentmessages(
api_sessionid=session_id,
action_content={
'sms_id': sms_id
}
)
returnValue(response.get('change'))
| {
"repo_name": "smn/python-foneworx",
"path": "foneworx/client.py",
"copies": "1",
"size": "10063",
"license": "bsd-3-clause",
"hash": 1364644427735807200,
"line_mean": 31.5663430421,
"line_max": 113,
"alpha_frac": 0.5657358641,
"autogenerated": false,
"ratio": 4.363833477883781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5429569341983781,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check, condition
from fontbakery.checkrunner import FAIL, PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('is_cff', 'is_cff2'))
]
class CFFAnalysis:
def __init__(self):
self.glyphs_dotsection = []
self.glyphs_endchar_seac = []
self.glyphs_exceed_max = []
self.glyphs_recursion_errors = []
def _get_subr_bias(count):
if count < 1240:
bias = 107
elif count < 33900:
bias = 1131
else:
bias = 32768
return bias
def _traverse_subr_call_tree(info, program, depth):
global_subrs = info['global_subrs']
subrs = info['subrs']
gsubr_bias = info['gsubr_bias']
subr_bias = info['subr_bias']
if depth > info['max_depth']:
info['max_depth'] = depth
# once we exceed the max depth we can stop going deeper
if depth > 10:
return
if len(program) >=5 and program[-1] == 'endchar' and all([isinstance(a, int) for a in program[-5:-1]]):
info['saw_endchar_seac'] = True
if 'ignore' in program: # decompiler expresses 'dotsection' as 'ignore'
info['saw_dotsection'] = True
while program:
x = program.pop()
if x == 'callgsubr':
y = int(program.pop()) + gsubr_bias
sub_program = global_subrs[y].program.copy()
_traverse_subr_call_tree(info, sub_program, depth + 1)
elif x == 'callsubr':
y = int(program.pop()) + subr_bias
sub_program = subrs[y].program.copy()
_traverse_subr_call_tree(info, sub_program, depth + 1)
def _analyze_cff(analysis, top_dict, private_dict, fd_index=0):
char_strings = top_dict.CharStrings
global_subrs = top_dict.GlobalSubrs
gsubr_bias = _get_subr_bias(len(global_subrs))
if private_dict is not None and hasattr(private_dict, 'Subrs'):
subrs = private_dict.Subrs
subr_bias = _get_subr_bias(len(subrs))
else:
subrs = None
subr_bias = None
char_list = char_strings.keys()
for glyph_name in char_list:
t2_char_string, fd_select_index = char_strings.getItemAndSelector(
glyph_name)
if fd_select_index is not None and fd_select_index != fd_index:
continue
try:
t2_char_string.decompile()
except RecursionError:
analysis.glyphs_recursion_errors.append(glyph_name)
continue
info = dict()
info['subrs'] = subrs
info['global_subrs'] = global_subrs
info['gsubr_bias'] = gsubr_bias
info['subr_bias'] = subr_bias
info['max_depth'] = 0
depth = 0
program = t2_char_string.program.copy()
_traverse_subr_call_tree(info, program, depth)
max_depth = info['max_depth']
if max_depth > 10:
analysis.glyphs_exceed_max.append(glyph_name)
if info.get('saw_endchar_seac'):
analysis.glyphs_endchar_seac.append(glyph_name)
if info.get('saw_dotsection'):
analysis.glyphs_dotsection.append(glyph_name)
@condition
def cff_analysis(ttFont):
analysis = CFFAnalysis()
if 'CFF ' in ttFont:
cff = ttFont['CFF '].cff
for top_dict in cff.topDictIndex:
if hasattr(top_dict, 'FDArray'):
for fd_index, font_dict in enumerate(top_dict.FDArray):
if hasattr(font_dict, 'Private'):
private_dict = font_dict.Private
else:
private_dict = None
_analyze_cff(analysis, top_dict, private_dict, fd_index)
else:
if hasattr(top_dict, 'Private'):
private_dict = top_dict.Private
else:
private_dict = None
_analyze_cff(analysis, top_dict, private_dict)
elif 'CFF2' in ttFont:
cff = ttFont['CFF2'].cff
for top_dict in cff.topDictIndex:
for fd_index, font_dict in enumerate(top_dict.FDArray):
if hasattr(font_dict, 'Private'):
private_dict = font_dict.Private
else:
private_dict = None
_analyze_cff(analysis, top_dict, private_dict, fd_index)
return analysis
@check(
id = 'com.adobe.fonts/check/cff_call_depth',
conditions = ['ttFont', 'is_cff', 'cff_analysis'],
rationale = """
Per "The Type 2 Charstring Format, Technical Note #5177", the "Subr nesting, stack limit" is 10.
"""
)
def com_adobe_fonts_check_cff_call_depth(cff_analysis):
"""Is the CFF subr/gsubr call depth > 10?"""
any_failures = False
if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors:
any_failures = True
for gn in cff_analysis.glyphs_exceed_max:
yield FAIL, \
Message('max-depth',
f'Subroutine call depth exceeded maximum of 10 for glyph "{gn}".')
for gn in cff_analysis.glyphs_recursion_errors:
yield FAIL, \
Message('recursion-error',
f'Recursion error while decompiling glyph "{gn}".')
if not any_failures:
yield PASS, 'Maximum call depth not exceeded.'
@check(
id = 'com.adobe.fonts/check/cff2_call_depth',
conditions = ['ttFont', 'is_cff2', 'cff_analysis'],
rationale = """
Per "The CFF2 CharString Format", the "Subr nesting, stack limit" is 10.
"""
)
def com_adobe_fonts_check_cff2_call_depth(cff_analysis):
"""Is the CFF2 subr/gsubr call depth > 10?"""
any_failures = False
if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors:
any_failures = True
for gn in cff_analysis.glyphs_exceed_max:
yield FAIL, \
Message('max-depth',
f'Subroutine call depth exceeded maximum of 10 for glyph "{gn}".')
for gn in cff_analysis.glyphs_recursion_errors:
yield FAIL, \
Message('recursion-error',
f'Recursion error while decompiling glyph "{gn}".')
if not any_failures:
yield PASS, 'Maximum call depth not exceeded.'
@check(
id = 'com.adobe.fonts/check/cff_deprecated_operators',
conditions = ['ttFont', 'is_cff', 'cff_analysis'],
rationale = """
The 'dotsection' operator and the use of 'endchar' to build accented
characters from the Adobe Standard Encoding Character Set ("seac") are
deprecated in CFF. Adobe recommends repairing any fonts that use these,
especially endchar-as-seac, because a rendering issue was discovered in
Microsoft Word with a font that makes use of this operation. The check
treats that useage as a FAIL. There are no known ill effects of using
dotsection, so that check is a WARN.
"""
)
def com_adobe_fonts_check_cff_deprecated_operators(cff_analysis):
"""Does the font use deprecated CFF operators or operations?"""
any_failures = False
if cff_analysis.glyphs_dotsection or cff_analysis.glyphs_endchar_seac:
any_failures = True
for gn in cff_analysis.glyphs_dotsection:
yield WARN, \
Message('deprecated-operator-dotsection',
f'Glyph "{gn}" uses deprecated "dotsection" operator.')
for gn in cff_analysis.glyphs_endchar_seac:
yield FAIL, \
Message('deprecated-operation-endchar-seac',
f'Glyph "{gn}" has deprecated use of "endchar" operator to build accented characters (seac).')
if not any_failures:
yield PASS, 'No deprecated CFF operators used.'
| {
"repo_name": "moyogo/fontbakery",
"path": "Lib/fontbakery/profiles/cff.py",
"copies": "2",
"size": "7932",
"license": "apache-2.0",
"hash": -854951948691031300,
"line_mean": 35.0545454545,
"line_max": 120,
"alpha_frac": 0.5949319213,
"autogenerated": false,
"ratio": 3.672222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0041918089828246385,
"num_lines": 220
} |
from fontbakery.callable import check, condition
from fontbakery.checkrunner import PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@condition
def has_kerning_info(ttFont):
"""A font has kerning info if it has a GPOS table containing at least one
Pair Adjustment lookup (eigther directly or through an extension
subtable)."""
if "GPOS" not in ttFont:
return False
if not ttFont["GPOS"].table.LookupList:
return False
for lookup in ttFont["GPOS"].table.LookupList.Lookup:
if lookup.LookupType == 2: # type 2 = Pair Adjustment
return True
elif lookup.LookupType == 9: # type 9 = Extension subtable
for ext in lookup.SubTable:
if ext.ExtensionLookupType == 2: # type 2 = Pair Adjustment
return True
@check(
id = 'com.google.fonts/check/gpos_kerning_info'
)
def com_google_fonts_check_gpos_kerning_info(ttFont):
"""Does GPOS table have kerning information?"""
if not has_kerning_info(ttFont):
yield WARN,\
Message("lacks-kern-info",
"GPOS table lacks kerning information.")
else:
yield PASS, "GPOS table has got kerning information."
| {
"repo_name": "graphicore/fontbakery",
"path": "Lib/fontbakery/profiles/gpos.py",
"copies": "1",
"size": "1388",
"license": "apache-2.0",
"hash": -8512645826340312000,
"line_mean": 34.5897435897,
"line_max": 89,
"alpha_frac": 0.6678674352,
"autogenerated": false,
"ratio": 3.8662952646239552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5034162699823955,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check, condition
from fontbakery.status import FAIL, PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('is_cff', 'is_cff2'))
]
class CFFAnalysis:
def __init__(self):
self.glyphs_dotsection = []
self.glyphs_endchar_seac = []
self.glyphs_exceed_max = []
self.glyphs_recursion_errors = []
def _get_subr_bias(count):
if count < 1240:
bias = 107
elif count < 33900:
bias = 1131
else:
bias = 32768
return bias
def _traverse_subr_call_tree(info, program, depth):
global_subrs = info['global_subrs']
subrs = info['subrs']
gsubr_bias = info['gsubr_bias']
subr_bias = info['subr_bias']
if depth > info['max_depth']:
info['max_depth'] = depth
# once we exceed the max depth we can stop going deeper
if depth > 10:
return
if len(program) >=5 and program[-1] == 'endchar' and all([isinstance(a, int) for a in program[-5:-1]]):
info['saw_endchar_seac'] = True
if 'ignore' in program: # decompiler expresses 'dotsection' as 'ignore'
info['saw_dotsection'] = True
while program:
x = program.pop()
if x == 'callgsubr':
y = int(program.pop()) + gsubr_bias
sub_program = global_subrs[y].program.copy()
_traverse_subr_call_tree(info, sub_program, depth + 1)
elif x == 'callsubr':
y = int(program.pop()) + subr_bias
sub_program = subrs[y].program.copy()
_traverse_subr_call_tree(info, sub_program, depth + 1)
def _analyze_cff(analysis, top_dict, private_dict, fd_index=0):
char_strings = top_dict.CharStrings
global_subrs = top_dict.GlobalSubrs
gsubr_bias = _get_subr_bias(len(global_subrs))
if private_dict is not None and hasattr(private_dict, 'Subrs'):
subrs = private_dict.Subrs
subr_bias = _get_subr_bias(len(subrs))
else:
subrs = None
subr_bias = None
char_list = char_strings.keys()
for glyph_name in char_list:
t2_char_string, fd_select_index = char_strings.getItemAndSelector(
glyph_name)
if fd_select_index is not None and fd_select_index != fd_index:
continue
try:
t2_char_string.decompile()
except RecursionError:
analysis.glyphs_recursion_errors.append(glyph_name)
continue
info = dict()
info['subrs'] = subrs
info['global_subrs'] = global_subrs
info['gsubr_bias'] = gsubr_bias
info['subr_bias'] = subr_bias
info['max_depth'] = 0
depth = 0
program = t2_char_string.program.copy()
_traverse_subr_call_tree(info, program, depth)
max_depth = info['max_depth']
if max_depth > 10:
analysis.glyphs_exceed_max.append(glyph_name)
if info.get('saw_endchar_seac'):
analysis.glyphs_endchar_seac.append(glyph_name)
if info.get('saw_dotsection'):
analysis.glyphs_dotsection.append(glyph_name)
@condition
def cff_analysis(ttFont):
analysis = CFFAnalysis()
if 'CFF ' in ttFont:
cff = ttFont['CFF '].cff
for top_dict in cff.topDictIndex:
if hasattr(top_dict, 'FDArray'):
for fd_index, font_dict in enumerate(top_dict.FDArray):
if hasattr(font_dict, 'Private'):
private_dict = font_dict.Private
else:
private_dict = None
_analyze_cff(analysis, top_dict, private_dict, fd_index)
else:
if hasattr(top_dict, 'Private'):
private_dict = top_dict.Private
else:
private_dict = None
_analyze_cff(analysis, top_dict, private_dict)
elif 'CFF2' in ttFont:
cff = ttFont['CFF2'].cff
for top_dict in cff.topDictIndex:
for fd_index, font_dict in enumerate(top_dict.FDArray):
if hasattr(font_dict, 'Private'):
private_dict = font_dict.Private
else:
private_dict = None
_analyze_cff(analysis, top_dict, private_dict, fd_index)
return analysis
@check(
id = 'com.adobe.fonts/check/cff_call_depth',
conditions = ['ttFont',
'is_cff',
'cff_analysis'],
rationale = """
Per "The Type 2 Charstring Format, Technical Note #5177", the "Subr nesting, stack limit" is 10.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2425'
}
)
def com_adobe_fonts_check_cff_call_depth(cff_analysis):
"""Is the CFF subr/gsubr call depth > 10?"""
any_failures = False
if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors:
any_failures = True
for gn in cff_analysis.glyphs_exceed_max:
yield FAIL, \
Message('max-depth',
f'Subroutine call depth exceeded maximum of 10 for glyph "{gn}".')
for gn in cff_analysis.glyphs_recursion_errors:
yield FAIL, \
Message('recursion-error',
f'Recursion error while decompiling glyph "{gn}".')
if not any_failures:
yield PASS, 'Maximum call depth not exceeded.'
@check(
id = 'com.adobe.fonts/check/cff2_call_depth',
conditions = ['ttFont', 'is_cff2', 'cff_analysis'],
rationale = """
Per "The CFF2 CharString Format", the "Subr nesting, stack limit" is 10.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2425'
}
)
def com_adobe_fonts_check_cff2_call_depth(cff_analysis):
"""Is the CFF2 subr/gsubr call depth > 10?"""
any_failures = False
if cff_analysis.glyphs_exceed_max or cff_analysis.glyphs_recursion_errors:
any_failures = True
for gn in cff_analysis.glyphs_exceed_max:
yield FAIL, \
Message('max-depth',
f'Subroutine call depth exceeded maximum of 10 for glyph "{gn}".')
for gn in cff_analysis.glyphs_recursion_errors:
yield FAIL, \
Message('recursion-error',
f'Recursion error while decompiling glyph "{gn}".')
if not any_failures:
yield PASS, 'Maximum call depth not exceeded.'
@check(
id = 'com.adobe.fonts/check/cff_deprecated_operators',
conditions = ['ttFont',
'is_cff',
'cff_analysis'],
rationale = """
The 'dotsection' operator and the use of 'endchar' to build accented characters from the Adobe Standard Encoding Character Set ("seac") are deprecated in CFF. Adobe recommends repairing any fonts that use these, especially endchar-as-seac, because a rendering issue was discovered in Microsoft Word with a font that makes use of this operation. The check treats that useage as a FAIL. There are no known ill effects of using dotsection, so that check is a WARN.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/3033'
}
)
def com_adobe_fonts_check_cff_deprecated_operators(cff_analysis):
"""Does the font use deprecated CFF operators or operations?"""
any_failures = False
if cff_analysis.glyphs_dotsection or cff_analysis.glyphs_endchar_seac:
any_failures = True
for gn in cff_analysis.glyphs_dotsection:
yield WARN, \
Message('deprecated-operator-dotsection',
f'Glyph "{gn}" uses deprecated "dotsection" operator.')
for gn in cff_analysis.glyphs_endchar_seac:
yield FAIL, \
Message('deprecated-operation-endchar-seac',
f'Glyph "{gn}" has deprecated use of "endchar" operator to build accented characters (seac).')
if not any_failures:
yield PASS, 'No deprecated CFF operators used.'
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/cff.py",
"copies": "1",
"size": "8257",
"license": "apache-2.0",
"hash": 526201142057042800,
"line_mean": 35.3744493392,
"line_max": 469,
"alpha_frac": 0.5934358726,
"autogenerated": false,
"ratio": 3.6697777777777776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4763213650377777,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check, condition
from fontbakery.status import PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@condition
def has_kerning_info(ttFont):
"""A font has kerning info if it has a GPOS table containing at least one
Pair Adjustment lookup (either directly or through an extension
subtable)."""
if "GPOS" not in ttFont:
return False
if not ttFont["GPOS"].table.LookupList:
return False
for lookup in ttFont["GPOS"].table.LookupList.Lookup:
if lookup.LookupType == 2: # type 2 = Pair Adjustment
return True
elif lookup.LookupType == 9: # type 9 = Extension subtable
for ext in lookup.SubTable:
if ext.ExtensionLookupType == 2: # type 2 = Pair Adjustment
return True
@check(
id = 'com.google.fonts/check/gpos_kerning_info'
)
def com_google_fonts_check_gpos_kerning_info(ttFont):
"""Does GPOS table have kerning information?
This check skips monospaced fonts as defined by post.isFixedPitch value
"""
if ttFont["post"].isFixedPitch == 0 and not has_kerning_info(ttFont):
yield WARN,\
Message("lacks-kern-info",
"GPOS table lacks kerning information.")
else:
yield PASS, "GPOS table check for kerning information passed."
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/gpos.py",
"copies": "1",
"size": "1509",
"license": "apache-2.0",
"hash": 8016954942950544000,
"line_mean": 35.8048780488,
"line_max": 89,
"alpha_frac": 0.6732935719,
"autogenerated": false,
"ratio": 3.8692307692307693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504252434113077,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import ERROR, FAIL, PASS, WARN, INFO
from fontbakery.message import Message
from fontbakery.constants import (NameID,
PlatformID,
WindowsEncodingID,
WindowsLanguageID)
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('glyph_metrics_stats', 'is_ttf', 'is_cff'))
]
@check(
id = 'com.adobe.fonts/check/name/empty_records',
rationale = """
Check the name table for empty records, as this can cause problems in Adobe apps.
"""
)
def com_adobe_fonts_check_name_empty_records(ttFont):
"""Check name table for empty records."""
failed = False
for name_record in ttFont['name'].names:
name_string = name_record.toUnicode().strip()
if len(name_string) == 0:
failed = True
name_key = tuple([name_record.platformID,
name_record.platEncID,
name_record.langID,
name_record.nameID])
yield FAIL,\
Message("empty-record",
f'"name" table record with key={name_key} is'
f' empty and should be removed.')
if not failed:
yield PASS, ("No empty name table records found.")
@check(
id = 'com.google.fonts/check/name/no_copyright_on_description'
)
def com_google_fonts_check_name_no_copyright_on_description(ttFont):
"""Description strings in the name table must not contain copyright info."""
failed = False
for name in ttFont['name'].names:
if 'opyright' in name.string.decode(name.getEncoding())\
and name.nameID == NameID.DESCRIPTION:
failed = True
if failed:
yield FAIL,\
Message("copyright-on-description",
f"Some namerecords with"
f" ID={NameID.DESCRIPTION} (NameID.DESCRIPTION)"
f" containing copyright info should be removed"
f" (perhaps these were added by a longstanding"
f" FontLab Studio 5.x bug that copied"
f" copyright notices to them.)")
else:
yield PASS, ("Description strings in the name table"
" do not contain any copyright string.")
@check(
id = 'com.google.fonts/check/monospace',
conditions = ['glyph_metrics_stats',
'is_ttf'],
rationale = """
There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not truly monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...)
Requirements for monospace fonts:
* post.isFixedPitch - "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)"
www.microsoft.com/typography/otspec/post.htm
* hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater.
www.microsoft.com/typography/otspec/hhea.htm
* OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced."
www.microsoft.com/typography/otspec/os2.htm#pan
monotypecom-test.monotype.de/services/pan2
* OS/2.xAvgCharWidth must be set accurately.
"OS/2.xAvgCharWidth is used when rendering monospaced fonts, at least by Windows GDI"
http://typedrawers.com/discussion/comment/15397/#Comment_15397
Also we should report an error for glyphs not of average width.
Please also note:
Thomas Phinney told us that a few years ago (as of December 2019), if you gave a font a monospace flag in Panose, Microsoft Word would ignore the actual advance widths and treat it as monospaced. Source: https://typedrawers.com/discussion/comment/45140/#Comment_45140
"""
)
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats):
"""Checking correctness of monospaced metadata."""
from fontbakery.constants import (IsFixedWidth,
PANOSE_Proportion)
failed = False
# Note: These values are read from the dict here only to
# reduce the max line length in the check implementation below:
seems_monospaced = glyph_metrics_stats["seems_monospaced"]
most_common_width = glyph_metrics_stats["most_common_width"]
width_max = glyph_metrics_stats['width_max']
# Check for missing tables before indexing them
missing_tables = False
required = ["glyf", "hhea", "hmtx", "OS/2", "post"]
for key in required:
if key not in ttFont:
missing_tables = True
yield FAIL,\
Message(f'lacks-{key}',
f"Font file lacks a '{key}' table.")
if missing_tables:
return
if ttFont['hhea'].advanceWidthMax != width_max:
failed = True
yield FAIL,\
Message("bad-advanceWidthMax",
f"Value of hhea.advanceWidthMax"
f" should be set to {width_max}"
f" but got {ttFont['hhea'].advanceWidthMax} instead.")
if seems_monospaced:
if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED:
failed = True
yield FAIL,\
Message("mono-bad-post-isFixedPitch",
f"On monospaced fonts, the value of post.isFixedPitch"
f" must be set to a non-zero value"
f" (meaning 'fixed width monospaced'),"
f" but got {ttFont['post'].isFixedPitch} instead.")
if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED:
failed = True
yield FAIL,\
Message("mono-bad-panose-proportion",
f"On monospaced fonts, the value of"
f" OS/2.panose.bProportion"
f" must be set to {PANOSE_Proportion.MONOSPACED}"
f" (proportion: monospaced),"
f" but got {ttFont['OS/2'].panose.bProportion} instead.")
num_glyphs = len(ttFont['glyf'].glyphs)
unusually_spaced_glyphs = [
g for g in ttFont['glyf'].glyphs
if g not in ['.notdef', '.null', 'NULL'] and
ttFont['hmtx'].metrics[g][0] != 0 and
ttFont['hmtx'].metrics[g][0] != most_common_width
]
outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs
if outliers_ratio > 0:
failed = True
yield WARN,\
Message("mono-outliers",
f"Font is monospaced"
f" but {len(unusually_spaced_glyphs)} glyphs"
f" ({100.0 * outliers_ratio}%)"
f" have a different width."
f" You should check the widths of:"
f" {unusually_spaced_glyphs}")
if not failed:
yield PASS,\
Message("mono-good",
"Font is monospaced and all related metadata look good.")
else:
# it is a non-monospaced font, so lets make sure
# that all monospace-related metadata is properly unset.
if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED:
failed = True
yield FAIL,\
Message("bad-post-isFixedPitch",
f"On non-monospaced fonts,"
f" the post.isFixedPitch value must be set to"
f" {IsFixedWidth.NOT_MONOSPACED} (not monospaced),"
f" but got {ttFont['post'].isFixedPitch} instead.")
if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED:
failed = True
yield FAIL,\
Message("bad-panose-proportion",
"On non-monospaced fonts,"
" the OS/2.panose.bProportion value can be set to"
" any value except 9 (proportion: monospaced)"
" which is the bad value we got in this font.")
if not failed:
yield PASS,\
Message("good",
"Font is not monospaced and"
" all related metadata look good.")
@check(
id = 'com.google.fonts/check/name/match_familyname_fullfont'
)
def com_google_fonts_check_name_match_familyname_fullfont(ttFont):
"""Does full font name begin with the font family name?"""
from fontbakery.utils import get_name_entry_strings
familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
fullfontname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)
if len(familyname) == 0:
yield FAIL,\
Message("no-font-family-name",
"Font lacks a NameID.FONT_FAMILY_NAME"
" entry in the 'name' table.")
elif len(fullfontname) == 0:
yield FAIL,\
Message("no-full-font-name",
"Font lacks a NameID.FULL_FONT_NAME"
" entry in the 'name' table.")
else:
# we probably should check all found values are equivalent.
# and, in that case, then performing the rest of the check
# with only the first occurences of the name entries
# will suffice:
fullfontname = fullfontname[0]
familyname = familyname[0]
if not fullfontname.startswith(familyname):
yield FAIL,\
Message("does-not",
f"On the 'name' table, the full font name"
f" (NameID {NameID.FULL_FONT_NAME}"
f" - FULL_FONT_NAME: '{familyname}')"
f" does not begin with font family name"
f" (NameID {NameID.FONT_FAMILY_NAME}"
f" - FONT_FAMILY_NAME: '{fullfontname}')")
else:
yield PASS, "Full font name begins with the font family name."
@check(
id = 'com.google.fonts/check/family_naming_recommendations'
)
def com_google_fonts_check_family_naming_recommendations(ttFont):
"""Font follows the family naming recommendations?"""
# See http://forum.fontlab.com/index.php?topic=313.0
import re
from fontbakery.utils import get_name_entry_strings
bad_entries = []
# <Postscript name> may contain only a-zA-Z0-9
# and one hyphen
bad_psname = re.compile("[^A-Za-z0-9-]")
for string in get_name_entry_strings(ttFont,
NameID.POSTSCRIPT_NAME):
if bad_psname.search(string):
bad_entries.append({
'field': 'PostScript Name',
'value': string,
'rec': ('May contain only a-zA-Z0-9'
' characters and an hyphen.')
})
if string.count('-') > 1:
bad_entries.append({
'field': 'Postscript Name',
'value': string,
'rec': ('May contain not more'
' than a single hyphen')
})
for string in get_name_entry_strings(ttFont,
NameID.FULL_FONT_NAME):
if len(string) >= 64:
bad_entries.append({
'field': 'Full Font Name',
'value': string,
'rec': 'exceeds max length (63)'
})
for string in get_name_entry_strings(ttFont,
NameID.POSTSCRIPT_NAME):
if len(string) >= 64:
bad_entries.append({
'field': 'PostScript Name',
'value': string,
'rec': 'exceeds max length (63)'
})
for string in get_name_entry_strings(ttFont,
NameID.FONT_FAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'Family Name',
'value': string,
'rec': 'exceeds max length (31)'
})
for string in get_name_entry_strings(ttFont,
NameID.FONT_SUBFAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'Style Name',
'value': string,
'rec': 'exceeds max length (31)'
})
for string in get_name_entry_strings(ttFont,
NameID.TYPOGRAPHIC_FAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'OT Family Name',
'value': string,
'rec': 'exceeds max length (31)'
})
for string in get_name_entry_strings(ttFont,
NameID.TYPOGRAPHIC_SUBFAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'OT Style Name',
'value': string,
'rec': 'exceeds max length (31)'
})
if len(bad_entries) > 0:
table = "| Field | Value | Recommendation |\n"
table += "|:----- |:----- |:-------------- |\n"
for bad in bad_entries:
table += "| {} | {} | {} |\n".format(bad["field"],
bad["value"],
bad["rec"])
yield INFO,\
Message("bad-entries",
f"Font does not follow "
f"some family naming recommendations:\n"
f"\n"
f"{table}")
else:
yield PASS, "Font follows the family naming recommendations."
@check(
id = 'com.adobe.fonts/check/name/postscript_vs_cff',
conditions = ['is_cff'],
rationale = """
The PostScript name entries in the font's 'name' table should match the FontName string in the 'CFF ' table.
The 'CFF ' table has a lot of information that is duplicated in other tables. This information should be consistent across tables, because there's no guarantee which table an app will get the data from.
"""
)
def com_adobe_fonts_check_name_postscript_vs_cff(ttFont):
"""CFF table FontName must match name table ID 6 (PostScript name)."""
cff_names = ttFont['CFF '].cff.fontNames
if len(cff_names) != 1:
yield ERROR, "Unexpected number of font names in CFF table."
return
passed = True
cff_name = cff_names[0]
for entry in ttFont['name'].names:
if entry.nameID == NameID.POSTSCRIPT_NAME:
postscript_name = entry.toUnicode()
if postscript_name != cff_name:
passed = False
yield FAIL,\
Message("mismatch",
f"Name table PostScript name '{postscript_name}' "
f"does not match CFF table FontName '{cff_name}'.")
if passed:
yield PASS, ("Name table PostScript name matches CFF table FontName.")
@check(
id = 'com.adobe.fonts/check/name/postscript_name_consistency',
conditions = ['not is_cff'], # e.g. TTF or CFF2
rationale = """
The PostScript name entries in the font's 'name' table should be consistent across platforms.
This is the TTF/CFF2 equivalent of the CFF 'postscript_name_cff_vs_name' check.
"""
)
def com_adobe_fonts_check_name_postscript_name_consistency(ttFont):
"""Name table ID 6 (PostScript name) must be consistent across platforms."""
postscript_names = set()
for entry in ttFont['name'].names:
if entry.nameID == NameID.POSTSCRIPT_NAME:
postscript_name = entry.toUnicode()
postscript_names.add(postscript_name)
if len(postscript_names) > 1:
yield FAIL,\
Message("inconsistency",
f'Entries in the "name" table for ID 6'
f' (PostScript name) are not consistent.'
f' Names found: {sorted(postscript_names)}.')
else:
yield PASS, ('Entries in the "name" table for ID 6 '
'(PostScript name) are consistent.')
@check(
id = 'com.adobe.fonts/check/family/max_4_fonts_per_family_name',
rationale = """
Per the OpenType spec:
'The Font Family name [...] should be shared among at most four fonts that differ only in weight or style [...]'
""",
)
def com_adobe_fonts_check_family_max_4_fonts_per_family_name(ttFonts):
"""Verify that each group of fonts with the same nameID 1 has maximum of 4 fonts"""
from collections import Counter
from fontbakery.utils import get_name_entry_strings
family_names = list()
for ttFont in ttFonts:
names_list = get_name_entry_strings(ttFont,
NameID.FONT_FAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
# names_list may contain multiple entries.
# We use set() below to remove the duplicates and only store
# the unique family name(s) used for a given font
names_set = set(names_list)
family_names.extend(names_set)
passed = True
counter = Counter(family_names)
for family_name, count in counter.items():
if count > 4:
passed = False
yield FAIL,\
Message("too-many",
f"Family '{family_name}' has {count} fonts"
f" (should be 4 or fewer).")
if passed:
yield PASS, ("There were no more than 4 fonts per family name.")
| {
"repo_name": "graphicore/fontbakery",
"path": "Lib/fontbakery/profiles/name.py",
"copies": "1",
"size": "18426",
"license": "apache-2.0",
"hash": 5650502475126499000,
"line_mean": 41.5542725173,
"line_max": 342,
"alpha_frac": 0.5492239227,
"autogenerated": false,
"ratio": 4.127688172043011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.517691209474301,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, INFO, PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@check(
id = 'com.google.fonts/check/kern_table',
rationale = """
Even though all fonts should have their kerning implemented in the GPOS table, there may be kerning info at the kern table as well.
Some applications such as MS PowerPoint require kerning info on the kern table. More specifically, they require a format 0 kern subtable from a kern table version 0 with only glyphs defined in the cmap table, which is the only one that Windows understands (and which is also the simplest and more limited of all the kern subtables).
Google Fonts ingests fonts made for download and use on desktops, and does all web font optimizations in the serving pipeline (using libre libraries that anyone can replicate.)
Ideally, TTFs intended for desktop users (and thus the ones intended for Google Fonts) should have both KERN and GPOS tables.
Given all of the above, we currently treat kerning on a v0 kern table as a good-to-have (but optional) feature.
""",
misc_metadata = {
'request': [
'https://github.com/googlefonts/fontbakery/issues/1675',
'https://github.com/googlefonts/fontbakery/issues/3148'
]
}
)
def com_google_fonts_check_kern_table(ttFont):
"""Is there a usable "kern" table declared in the font?"""
kern = ttFont.get("kern")
if kern:
cmap = set(ttFont.getBestCmap().values())
nonCharacterGlyphs = set()
for kernTable in kern.kernTables:
if kernTable.format == 0:
for leftGlyph, rightGlyph in kernTable.kernTable.keys():
if leftGlyph not in cmap:
nonCharacterGlyphs.add(leftGlyph)
if rightGlyph not in cmap:
nonCharacterGlyphs.add(rightGlyph)
if all(kernTable.format != 0 for kernTable in kern.kernTables):
yield WARN,\
Message("kern-unknown-format",
'The "kern" table does not have any format-0 subtable '
'and will not work in a few programs that may require '
'the table.')
elif nonCharacterGlyphs:
yield FAIL,\
Message("kern-non-character-glyphs",
'The following glyphs should not be used in the "kern" '
'table because they are not in the "cmap" table: %s'
% ', '.join(sorted(nonCharacterGlyphs)))
else:
yield INFO,\
Message("kern-found",
'Only a few programs may require the kerning'
' info that this font provides on its "kern" table.')
else:
yield PASS, 'Font does not declare an optional "kern" table.'
| {
"repo_name": "moyogo/fontbakery",
"path": "Lib/fontbakery/profiles/kern.py",
"copies": "1",
"size": "3080",
"license": "apache-2.0",
"hash": -2702506947649630000,
"line_mean": 51.2033898305,
"line_max": 340,
"alpha_frac": 0.6311688312,
"autogenerated": false,
"ratio": 4.301675977653631,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5432844808853631,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@check(
id = 'com.google.fonts/check/family/equal_unicode_encodings'
)
def com_google_fonts_check_family_equal_unicode_encodings(ttFonts):
"""Fonts have equal unicode encodings?"""
encoding = None
failed = False
for ttFont in ttFonts:
cmap = None
for table in ttFont['cmap'].tables:
if table.format == 4:
cmap = table
break
# Could a font lack a format 4 cmap table ?
# If we ever find one of those, it would crash the check here.
# Then we'd have to yield a FAIL regarding the missing table entry.
if not encoding:
encoding = cmap.platEncID
if encoding != cmap.platEncID:
failed = True
if failed:
yield FAIL,\
Message("mismatch",
"Fonts have different unicode encodings.")
else:
yield PASS, "Fonts have equal unicode encodings."
# This check was originally ported from
# Mekkablue Preflight Checks available at:
# https://github.com/mekkablue/Glyphs-Scripts/blob/master/Test/Preflight%20Font.py
@check(
id = 'com.google.fonts/check/all_glyphs_have_codepoints',
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/735'
}
)
def com_google_fonts_check_all_glyphs_have_codepoints(ttFont):
"""Check all glyphs have codepoints assigned."""
failed = False
for subtable in ttFont['cmap'].tables:
if subtable.isUnicode():
for item in subtable.cmap.items():
codepoint = item[0]
if codepoint is None:
failed = True
yield FAIL,\
Message("glyph-lacks-codepoint",
f"Glyph {codepoint} lacks a unicode"
f" codepoint assignment.")
if not failed:
yield PASS, "All glyphs have a codepoint value assigned."
| {
"repo_name": "moyogo/fontbakery",
"path": "Lib/fontbakery/profiles/cmap.py",
"copies": "2",
"size": "2238",
"license": "apache-2.0",
"hash": 8217103135155320000,
"line_mean": 36.9322033898,
"line_max": 89,
"alpha_frac": 0.6179624665,
"autogenerated": false,
"ratio": 4.175373134328358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5793335600828358,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('is_ttf', ))
]
@check(
id = 'com.google.fonts/check/family/underline_thickness',
rationale = """
Dave C Lemon (Adobe Type Team) recommends setting the underline thickness to be consistent across the family.
If thicknesses are not family consistent, words set on the same line which have different styles look strange.
See also:
https://twitter.com/typenerd1/status/690361887926697986
""",
misc_metadata = {
'affects': [('InDesign', 'unspecified')]
}
)
def com_google_fonts_check_family_underline_thickness(ttFonts):
"""Fonts have consistent underline thickness?"""
underTs = {}
underlineThickness = None
failed = False
for ttfont in ttFonts:
fontname = ttfont.reader.file.name
# stylename = style(fontname)
ut = ttfont['post'].underlineThickness
underTs[fontname] = ut
if underlineThickness is None:
underlineThickness = ut
if ut != underlineThickness:
failed = True
if failed:
msg = ("Thickness of the underline is not"
" the same across this family. In order to fix this,"
" please make sure that the underlineThickness value"
" is the same in the 'post' table of all of this family"
" font files.\n"
"Detected underlineThickness values are:\n")
for style in underTs.keys():
msg += f"\t{style}: {underTs[style]}\n"
yield FAIL, msg
else:
yield PASS, "Fonts have consistent underline thickness."
@check(
id = 'com.google.fonts/check/post_table_version',
rationale = """
Apple recommends against using 'post' table format 3 under most circumstances, as it can create problems with some printer drivers and PDF documents. The savings in disk space usually does not justify the potential loss in functionality.
Source: https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html
The CFF2 table does not contain glyph names, so variable OTFs should be allowed to use post table version 2.
This check expects:
- Version 2 for TTF or OTF CFF2 Variable fonts
- Version 3 for OTF
""",
misc_metadata = {
'request': [
'https://github.com/google/fonts/issues/215',
'https://github.com/googlefonts/fontbakery/issues/2638'
]
}
)
def com_google_fonts_check_post_table_version(ttFont, is_ttf):
"""Font has correct post table version?"""
formatType = ttFont['post'].formatType
is_var = "fvar" in ttFont.keys()
is_cff2 = "CFF2" in ttFont.keys()
if is_ttf or (is_var and is_cff2):
expected = 2
else:
expected = 3
if formatType != expected:
yield FAIL, (f"Post table should be version {expected}"
f" instead of {formatType}.")
else:
yield PASS, f"Font has post table version {expected}."
| {
"repo_name": "moyogo/fontbakery",
"path": "Lib/fontbakery/profiles/post.py",
"copies": "2",
"size": "3264",
"license": "apache-2.0",
"hash": -580516306220090400,
"line_mean": 37.4,
"line_max": 245,
"alpha_frac": 0.6446078431,
"autogenerated": false,
"ratio": 3.9043062200956937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5548914063195693,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS, SKIP, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('glyph_metrics_stats', 'is_ttf'))
]
@check(
id = 'com.google.fonts/check/linegaps'
)
def com_google_fonts_check_linegaps(ttFont):
"""Checking Vertical Metric Linegaps."""
if ttFont["hhea"].lineGap != 0:
yield WARN,\
Message("hhea",
"hhea lineGap is not equal to 0.")
elif ttFont["OS/2"].sTypoLineGap != 0:
yield WARN,\
Message("OS/2",
"OS/2 sTypoLineGap is not equal to 0.")
else:
yield PASS, "OS/2 sTypoLineGap and hhea lineGap are both 0."
@check(
id = 'com.google.fonts/check/maxadvancewidth'
)
def com_google_fonts_check_maxadvancewidth(ttFont):
"""MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?"""
hhea_advance_width_max = ttFont['hhea'].advanceWidthMax
hmtx_advance_width_max = None
for g in ttFont['hmtx'].metrics.values():
if hmtx_advance_width_max is None:
hmtx_advance_width_max = max(0, g[0])
else:
hmtx_advance_width_max = max(g[0], hmtx_advance_width_max)
if hmtx_advance_width_max != hhea_advance_width_max:
yield FAIL,\
Message("mismatch",
f"AdvanceWidthMax mismatch:"
f" expected {hmtx_advance_width_max} (from hmtx);"
f" got {hhea_advance_width_max} (from hhea)")
else:
yield PASS, ("MaxAdvanceWidth is consistent"
" with values in the Hmtx and Hhea tables.")
| {
"repo_name": "graphicore/fontbakery",
"path": "Lib/fontbakery/profiles/hhea.py",
"copies": "2",
"size": "1852",
"license": "apache-2.0",
"hash": 694465002661436800,
"line_mean": 36.7959183673,
"line_max": 89,
"alpha_frac": 0.620950324,
"autogenerated": false,
"ratio": 3.4232902033271717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004795273837355786,
"num_lines": 49
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS, WARN, INFO
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('vmetrics', )),
('.googlefonts_conditions', ('RIBBI_ttFonts', ))
]
@check(
id = 'com.google.fonts/check/family/panose_proportion'
)
def com_google_fonts_check_family_panose_proportion(ttFonts):
"""Fonts have consistent PANOSE proportion?"""
passed = True
proportion = None
missing = False
for ttFont in ttFonts:
if "OS/2" not in ttFont:
missing = True
passed = False
continue
if proportion is None:
proportion = ttFont['OS/2'].panose.bProportion
if proportion != ttFont['OS/2'].panose.bProportion:
passed = False
if missing:
yield FAIL,\
Message("lacks-OS/2",
"One or more fonts lack the required OS/2 table.")
if not passed:
yield FAIL,\
Message("inconsistency",
"PANOSE proportion is not the same across this family."
" In order to fix this, please make sure that"
" the panose.bProportion value is the same"
" in the OS/2 table of all of this family font files.")
else:
yield PASS, "Fonts have consistent PANOSE proportion."
@check(
id = 'com.google.fonts/check/family/panose_familytype'
)
def com_google_fonts_check_family_panose_familytype(ttFonts):
"""Fonts have consistent PANOSE family type?"""
passed = True
familytype = None
missing = False
for ttfont in ttFonts:
if "OS/2" not in ttfont:
passed = False
missing = True
continue
if familytype is None:
familytype = ttfont['OS/2'].panose.bFamilyType
if familytype != ttfont['OS/2'].panose.bFamilyType:
passed = False
if missing:
yield FAIL,\
Message("lacks-OS/2",
"One or more fonts lack the required OS/2 table.")
if not passed:
yield FAIL,\
Message("inconsistency",
"PANOSE family type is not the same across this family."
" In order to fix this, please make sure that"
" the panose.bFamilyType value is the same"
" in the OS/2 table of all of this family font files.")
else:
yield PASS, "Fonts have consistent PANOSE family type."
@check(
id = 'com.google.fonts/check/xavgcharwidth',
conditions = ['is_ttf']
)
def com_google_fonts_check_xavgcharwidth(ttFont):
"""Check if OS/2 xAvgCharWidth is correct."""
if "OS/2" not in ttFont:
yield FAIL,\
Message("lacks-OS/2",
"Required OS/2 table is missing.")
return
current_value = ttFont['OS/2'].xAvgCharWidth
ACCEPTABLE_ERROR = 10 # Width deviation tolerance in font units
# Since version 3, the average is computed using _all_ glyphs in a font.
if ttFont['OS/2'].version >= 3:
calculation_rule = "the average of the widths of all glyphs in the font"
if not ttFont['hmtx'].metrics: # May contain just '.notdef', which is valid.
yield FAIL,\
Message("missing-glyphs",
"CRITICAL: Found no glyph width data in the hmtx table!")
return
width_sum = 0
count = 0
for glyph_id in ttFont['glyf'].glyphs: # At least .notdef must be present.
width = ttFont['hmtx'].metrics[glyph_id][0]
# The OpenType spec doesn't exclude negative widths, but only positive
# widths seems to be the assumption in the wild?
if width > 0:
count += 1
width_sum += width
expected_value = int(round(width_sum / count))
else: # Version 2 and below only consider lowercase latin glyphs and space.
calculation_rule = ("the weighted average of the widths of the latin"
" lowercase glyphs in the font")
weightFactors = {
'a': 64,
'b': 14,
'c': 27,
'd': 35,
'e': 100,
'f': 20,
'g': 14,
'h': 42,
'i': 63,
'j': 3,
'k': 6,
'l': 35,
'm': 20,
'n': 56,
'o': 56,
'p': 17,
'q': 4,
'r': 49,
's': 56,
't': 71,
'u': 31,
'v': 10,
'w': 18,
'x': 3,
'y': 18,
'z': 2,
'space': 166
}
glyph_order = ttFont.getGlyphOrder()
if not all(character in glyph_order for character in weightFactors):
yield FAIL,\
Message("missing-glyphs",
"Font is missing the required"
" latin lowercase letters and/or space.")
return
width_sum = 0
for glyph_id in weightFactors:
width = ttFont['hmtx'].metrics[glyph_id][0]
width_sum += (width * weightFactors[glyph_id])
expected_value = int(width_sum / 1000.0 + 0.5) # round to closest int
difference = abs(current_value - expected_value)
# We accept matches and off-by-ones due to rounding as correct.
if current_value == expected_value or difference == 1:
yield PASS, "OS/2 xAvgCharWidth value is correct."
elif difference < ACCEPTABLE_ERROR:
yield INFO, (f"OS/2 xAvgCharWidth is {current_value} but it should be"
f" {expected_value} which corresponds to {calculation_rule}."
f" These are similar values, which"
f" may be a symptom of the slightly different"
f" calculation of the xAvgCharWidth value in"
f" font editors. There's further discussion on"
f" this at https://github.com/googlefonts/fontbakery"
f"/issues/1622")
else:
yield WARN, (f"OS/2 xAvgCharWidth is {current_value} but it should be"
f" {expected_value} which corresponds to {calculation_rule}.")
@check(
id = 'com.adobe.fonts/check/fsselection_matches_macstyle',
rationale = """
The bold and italic bits in OS/2.fsSelection must match the bold and italic bits in head.macStyle per the OpenType spec.
"""
)
def com_adobe_fonts_check_fsselection_matches_macstyle(ttFont):
"""Check if OS/2 fsSelection matches head macStyle bold and italic bits."""
# Check both OS/2 and head are present.
missing_tables = False
required = ["OS/2", "head"]
for key in required:
if key not in ttFont:
missing_tables = True
yield FAIL,\
Message(f'lacks-{key}',
f"The '{key}' table is missing.")
if missing_tables:
return
from fontbakery.constants import FsSelection, MacStyle
failed = False
head_bold = (ttFont['head'].macStyle & MacStyle.BOLD) != 0
os2_bold = (ttFont['OS/2'].fsSelection & FsSelection.BOLD) != 0
if head_bold != os2_bold:
failed = True
yield FAIL, "The OS/2.fsSelection and head.macStyle " \
"bold settings do not match."
head_italic = (ttFont['head'].macStyle & MacStyle.ITALIC) != 0
os2_italic = (ttFont['OS/2'].fsSelection & FsSelection.ITALIC) != 0
if head_italic != os2_italic:
failed = True
yield FAIL, "The OS/2.fsSelection and head.macStyle " \
"italic settings do not match."
if not failed:
yield PASS, "The OS/2.fsSelection and head.macStyle " \
"bold and italic settings match."
@check(
id = 'com.adobe.fonts/check/family/bold_italic_unique_for_nameid1',
conditions=['RIBBI_ttFonts'],
rationale = """
Per the OpenType spec: name ID 1 'is used in combination with Font Subfamily name (name ID 2), and should be shared among at most four fonts that differ only in weight or style...
This four-way distinction should also be reflected in the OS/2.fsSelection field, using bits 0 and 5.
"""
)
def com_adobe_fonts_check_family_bold_italic_unique_for_nameid1(RIBBI_ttFonts):
"""Check that OS/2.fsSelection bold & italic settings are unique
for each NameID1"""
from collections import Counter
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NameID, FsSelection
failed = False
family_name_and_bold_italic = list()
for ttFont in RIBBI_ttFonts:
names_list = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
# names_list will likely contain multiple entries, e.g. multiple copies
# of the same name in the same language for different platforms, but
# also different names in different languages, we use set() below
# to remove the duplicates and only store the unique family name(s)
# used for a given font
names_set = set(names_list)
bold = (ttFont['OS/2'].fsSelection & FsSelection.BOLD) != 0
italic = (ttFont['OS/2'].fsSelection & FsSelection.ITALIC) != 0
bold_italic = 'Bold=%r, Italic=%r' % (bold, italic)
for name in names_set:
family_name_and_bold_italic.append((name, bold_italic,))
counter = Counter(family_name_and_bold_italic)
for (family_name, bold_italic), count in counter.items():
if count > 1:
failed = True
yield FAIL, (f"Family '{family_name}' has {count} fonts"
f" (should be no more than 1) with the"
f" same OS/2.fsSelection bold & italic settings:"
f" {bold_italic}")
if not failed:
yield PASS, ("The OS/2.fsSelection bold & italic settings were unique "
"within each compatible family group.")
@check(
id = 'com.google.fonts/check/code_pages',
rationale = """
At least some programs (such as Word and Sublime Text) under Windows 7 do not recognize fonts unless code page bits are properly set on the ulCodePageRange1 (and/or ulCodePageRange2) fields of the OS/2 table.
More specifically, the fonts are selectable in the font menu, but whichever Windows API these applications use considers them unsuitable for any character set, so anything set in these fonts is rendered with a fallback font of Arial.
This check currently does not identify which code pages should be set. Auto-detecting coverage is not trivial since the OpenType specification leaves the interpretation of whether a given code page is "functional" or not open to the font developer to decide.
So here we simply detect as a FAIL when a given font has no code page declared at all.
"""
)
def com_google_fonts_check_code_pages(ttFont):
"""Check code page character ranges"""
if "OS/2" not in ttFont:
yield FAIL,\
Message("lacks-OS/2",
"The required OS/2 table is missing.")
return
if not hasattr(ttFont['OS/2'], "ulCodePageRange1") or \
not hasattr(ttFont['OS/2'], "ulCodePageRange2") or \
(ttFont['OS/2'].ulCodePageRange1 == 0 and \
ttFont['OS/2'].ulCodePageRange2 == 0):
yield FAIL, ("No code pages defined in the OS/2 table"
" ulCodePageRange1 and CodePageRange2 fields.")
else:
yield PASS, "At least one code page is defined."
| {
"repo_name": "moyogo/fontbakery",
"path": "Lib/fontbakery/profiles/os2.py",
"copies": "2",
"size": "11839",
"license": "apache-2.0",
"hash": -2867745990990617600,
"line_mean": 38.4633333333,
"line_max": 266,
"alpha_frac": 0.5883942901,
"autogenerated": false,
"ratio": 3.96483590087073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.555323019097073,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('is_variable_font'
, 'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord'
, 'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord'))
]
@check(
id = 'com.google.fonts/check/varfont/regular_wght_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wght' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wght
If a variable font has a 'wght' (Weight) axis, then the coordinate of its 'Regular' instance is required to be 400.
""",
conditions = ['is_variable_font',
'regular_wght_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_wght_coord(ttFont, regular_wght_coord):
"""The variable font 'wght' (Weight) axis coordinate must be 400 on the
'Regular' instance."""
if regular_wght_coord == 400:
yield PASS, "Regular:wght is 400."
else:
yield FAIL,\
Message("not-400",
f'The "wght" axis coordinate of'
f' the "Regular" instance must be 400.'
f' Got {regular_wght_coord} instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_wdth_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wdth' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wdth
If a variable font has a 'wdth' (Width) axis, then the coordinate of its 'Regular' instance is required to be 100.
""",
conditions = ['is_variable_font',
'regular_wdth_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_wdth_coord(ttFont, regular_wdth_coord):
"""The variable font 'wdth' (Width) axis coordinate must be 100 on the 'Regular' instance."""
if regular_wdth_coord == 100:
yield PASS, "Regular:wdth is 100."
else:
yield FAIL,\
Message("not-100",
f'The "wdth" coordinate of'
f' the "Regular" instance must be 100.'
f' Got {regular_wdth_coord} as a default value instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_slnt_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'slnt' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_slnt
If a variable font has a 'slnt' (Slant) axis, then the coordinate of its 'Regular' instance is required to be zero.
""",
conditions = ['is_variable_font',
'regular_slnt_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_slnt_coord(ttFont, regular_slnt_coord):
"""The variable font 'slnt' (Slant) axis coordinate must be zero on the 'Regular' instance."""
if regular_slnt_coord == 0:
yield PASS, "Regular:slnt is zero."
else:
yield FAIL,\
Message("non-zero",
f'The "slnt" coordinate of'
f' the "Regular" instance must be zero.'
f' Got {regular_slnt_coord} as a default value instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_ital_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'ital' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_ital
If a variable font has a 'ital' (Italic) axis, then the coordinate of its 'Regular' instance is required to be zero.
""",
conditions = ['is_variable_font',
'regular_ital_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_ital_coord(ttFont, regular_ital_coord):
"""The variable font 'ital' (Italic) axis coordinate must be zero on the 'Regular' instance."""
if regular_ital_coord == 0:
yield PASS, "Regular:ital is zero."
else:
yield FAIL,\
Message("non-zero",
f'The "ital" coordinate of'
f' the "Regular" instance must be zero.'
f' Got {regular_ital_coord} as a default value instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_opsz_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'opsz' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_opsz
If a variable font has a 'opsz' (Optical Size) axis, then the coordinate of its 'Regular' instance is recommended to be a value in the range 9 to 13.
""",
conditions = ['is_variable_font',
'regular_opsz_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_opsz_coord(ttFont, regular_opsz_coord):
"""The variable font 'opsz' (Optical Size) axis coordinate should be between 9 and 13 on the 'Regular' instance."""
if regular_opsz_coord >= 9 and regular_opsz_coord <= 13:
yield PASS, ("Regular:opsz coordinate ({regular_opsz_coord}) looks good.")
else:
yield WARN,\
Message("out-of-range",
f'The "opsz" (Optical Size) coordinate'
f' on the "Regular" instance is recommended'
f' to be a value in the range 9 to 13.'
f' Got {regular_opsz_coord} instead.')
@check(
id = 'com.google.fonts/check/varfont/bold_wght_coord',
rationale = """
The Open-Type spec's registered design-variation tag 'wght' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wght does not specify a required value for the 'Bold' instance of a variable font.
But Dave Crossland suggested that we should enforce a required value of 700 in this case.
""",
conditions = ['is_variable_font',
'bold_wght_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_bold_wght_coord(ttFont, bold_wght_coord):
"""The variable font 'wght' (Weight) axis coordinate must be 700 on the 'Bold' instance."""
if bold_wght_coord == 700:
yield PASS, "Bold:wght is 700."
else:
yield FAIL,\
Message("not-700",
f'The "wght" axis coordinate of'
f' the "Bold" instance must be 700.'
f' Got {bold_wght_coord} instead.')
@check(
id = 'com.google.fonts/check/varfont/wght_valid_range',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wght' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wght
On the 'wght' (Weight) axis, the valid coordinate range is 1-1000.
""",
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2264'
}
)
def com_google_fonts_check_varfont_wght_valid_range(ttFont):
"""The variable font 'wght' (Weight) axis coordinate
must be within spec range of 1 to 1000 on all instances."""
Failed = False
for instance in ttFont['fvar'].instances:
if 'wght' in instance.coordinates:
value = instance.coordinates['wght']
if value < 1 or value > 1000:
Failed = True
yield FAIL,\
Message("out-of-range",
f'Found a bad "wght" coordinate with value {value}'
f' outside of the valid range from 1 to 1000.')
break
if not Failed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/varfont/wdth_valid_range',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wdth' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wdth
On the 'wdth' (Width) axis, the valid coordinate range is 1-1000
""",
conditions = ['is_variable_font']
)
def com_google_fonts_check_varfont_wdth_valid_range(ttFont):
"""The variable font 'wdth' (Weight) axis coordinate
must be within spec range of 1 to 1000 on all instances."""
Failed = False
for instance in ttFont['fvar'].instances:
if 'wdth' in instance.coordinates:
value = instance.coordinates['wdth']
if value < 1 or value > 1000:
Failed = True
yield FAIL,\
Message("out-of-range",
f'Found a bad "wdth" coordinate with value {value}'
f' outside of the valid range from 1 to 1000.')
break
if not Failed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/varfont/slnt_range',
rationale = """
The OpenType spec says at https://docs.microsoft.com/en-us/typography/opentype/spec/dvaraxistag_slnt that:
[...] the scale for the Slant axis is interpreted as the angle of slant in counter-clockwise degrees from upright. This means that a typical, right-leaning oblique design will have a negative slant value. This matches the scale used for the italicAngle field in the post table.
""",
conditions = ['is_variable_font',
'slnt_axis'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2572'
}
)
def com_google_fonts_check_varfont_slnt_range(ttFont, slnt_axis):
""" The variable font 'slnt' (Slant) axis coordinate
specifies positive values in its range? """
if slnt_axis.minValue < 0 and slnt_axis.maxValue >= 0:
yield PASS, "Looks good!"
else:
yield WARN,\
Message("unusual-range",
f'The range of values for the "slnt" axis in'
f' this font only allows positive coordinates'
f' (from {slnt_axis.minValue} to {slnt_axis.maxValue}),'
f' indicating that this may be a back slanted design,'
f' which is rare. If that\'s not the case, then'
f' the "slant" axis should be a range of'
f' negative values instead.')
| {
"repo_name": "graphicore/fontbakery",
"path": "Lib/fontbakery/profiles/fvar.py",
"copies": "2",
"size": "11057",
"license": "apache-2.0",
"hash": -5346988027309577000,
"line_mean": 41.5269230769,
"line_max": 285,
"alpha_frac": 0.616261192,
"autogenerated": false,
"ratio": 3.737998647734956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5354259839734956,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS, WARN, SKIP
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
def _is_non_mark_char(charcode):
from fontTools import unicodedata
category = unicodedata.category(chr(charcode))
if category.startswith("C"):
# skip control characters
return None
else:
return not category.startswith("M")
def _get_mark_class_glyphnames(ttFont):
from fontbakery.constants import GlyphClass
class_defs = ttFont["GDEF"].table.GlyphClassDef.classDefs.items()
return {name for (name, value) in class_defs
if value == GlyphClass.MARK}
@check(
id = 'com.google.fonts/check/gdef_spacing_marks',
rationale = """
Glyphs in the GDEF mark glyph class should be non-spacing.
Spacing glyphs in the GDEF mark glyph class may have incorrect anchor positioning that was only intended for building composite glyphs during design.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2877'
}
)
def com_google_fonts_check_gdef_spacing_marks(ttFont):
"""Check mark characters are in GDEF mark glyph class)"""
from fontbakery.utils import pretty_print_list
if "GDEF" in ttFont and ttFont["GDEF"].table.GlyphClassDef:
spacing_glyphnames = {name
for (name, (width, lsb)) in ttFont["hmtx"].metrics.items()
if width > 0}
mark_class_glyphnames = _get_mark_class_glyphnames(ttFont)
spacing_glyphnames_in_mark_glyph_class = spacing_glyphnames & mark_class_glyphnames
if spacing_glyphnames_in_mark_glyph_class :
formatted_list = "\t " +\
pretty_print_list(sorted(spacing_glyphnames_in_mark_glyph_class),
shorten=10,
sep=", ")
yield WARN,\
Message('spacing-mark-glyphs',
f"The following spacing glyphs may be in"
f" the GDEF mark glyph class by mistake:\n"
f"{formatted_list}")
else:
yield PASS, ('Font does not has spacing glyphs'
' in the GDEF mark glyph class.')
else:
yield SKIP, ('Font does not declare an optional "GDEF" table'
' or has any GDEF glyph class definition.')
@check(
id = 'com.google.fonts/check/gdef_mark_chars',
rationale = """
Mark characters should be in the GDEF mark glyph class.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2877'
}
)
def com_google_fonts_check_gdef_mark_chars(ttFont):
"""Check mark characters are in GDEF mark glyph class"""
from fontbakery.utils import pretty_print_list
if "GDEF" in ttFont and ttFont["GDEF"].table.GlyphClassDef:
cmap = ttFont.getBestCmap()
mark_class_glyphnames = _get_mark_class_glyphnames(ttFont)
mark_chars_not_in_mark_class = {
charcode for charcode in cmap
if _is_non_mark_char(charcode) is False and
cmap[charcode] not in mark_class_glyphnames
}
if mark_chars_not_in_mark_class:
formatted_marks = "\t " +\
pretty_print_list(sorted("U+%04X" % c for c in
mark_chars_not_in_mark_class),
shorten=None,
sep=", ")
yield WARN,\
Message('mark-chars',
f"The following mark characters could be"
f" in the GDEF mark glyph class:\n"
f"{formatted_marks}")
else:
yield PASS, ('Font does not have mark characters'
' not in the GDEF mark glyph class.')
else:
yield SKIP, ('Font does not declare an optional "GDEF" table'
' or has any GDEF glyph class definition.')
@check(
id = 'com.google.fonts/check/gdef_non_mark_chars',
rationale = """
Glyphs in the GDEF mark glyph class become non-spacing and may be repositioned if they have mark anchors.
Only combining mark glyphs should be in that class. Any non-mark glyph must not be in that class, in particular spacing glyphs.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2877'
}
)
def com_google_fonts_check_gdef_non_mark_chars(ttFont):
"""Check GDEF mark glyph class doesn't have characters that are not marks)"""
from fontbakery.utils import pretty_print_list
if "GDEF" in ttFont and ttFont["GDEF"].table.GlyphClassDef:
cmap = ttFont.getBestCmap()
nonmark_chars = {
charcode for charcode in cmap
if _is_non_mark_char(charcode) is True
}
nonmark_char_glyphnames = {cmap[c] for c in nonmark_chars}
glyphname_to_char_mapping = dict()
for k, v in cmap.items():
if v in glyphname_to_char_mapping:
glyphname_to_char_mapping[v].add(k)
else:
glyphname_to_char_mapping[v] = {k}
mark_class_glyphnames = _get_mark_class_glyphnames(ttFont)
nonmark_char_glyphnames_in_mark_class = nonmark_char_glyphnames & mark_class_glyphnames
if nonmark_char_glyphnames_in_mark_class:
nonmark_chars_in_mark_class = set()
for glyphname in nonmark_char_glyphnames_in_mark_class:
chars = glyphname_to_char_mapping[glyphname]
for char in chars:
if char in nonmark_chars:
nonmark_chars_in_mark_class.add(char)
formatted_nonmarks = "\t " +\
pretty_print_list(sorted("U+%04X" % c for c in
nonmark_chars_in_mark_class),
shorten=None,
sep=", ")
yield WARN,\
Message('non-mark-chars',
f"The following non-mark characters should"
f" not be in the GDEF mark glyph class:\n"
f"{formatted_nonmarks}")
else:
yield PASS, ('Font does not have non-mark characters'
' in the GDEF mark glyph class.')
else:
yield SKIP, ('Font does not declare an optional "GDEF" table'
' or has any GDEF glyph class definition.')
| {
"repo_name": "graphicore/fontbakery",
"path": "Lib/fontbakery/profiles/gdef.py",
"copies": "1",
"size": "6860",
"license": "apache-2.0",
"hash": -7023560101261480000,
"line_mean": 42.9743589744,
"line_max": 157,
"alpha_frac": 0.5683673469,
"autogenerated": false,
"ratio": 4.135021097046414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203388443946413,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.status import ERROR, FAIL, PASS, WARN, INFO
from fontbakery.message import Message
from fontbakery.constants import (NameID,
PlatformID,
WindowsEncodingID,
WindowsLanguageID)
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('glyph_metrics_stats', 'is_ttf', 'is_cff'))
]
@check(
id = 'com.adobe.fonts/check/name/empty_records',
rationale = """
Check the name table for empty records, as this can cause problems in Adobe apps.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2369'
}
)
def com_adobe_fonts_check_name_empty_records(ttFont):
"""Check name table for empty records."""
failed = False
for name_record in ttFont['name'].names:
name_string = name_record.toUnicode().strip()
if len(name_string) == 0:
failed = True
name_key = tuple([name_record.platformID,
name_record.platEncID,
name_record.langID,
name_record.nameID])
yield FAIL,\
Message("empty-record",
f'"name" table record with key={name_key} is'
f' empty and should be removed.')
if not failed:
yield PASS, ("No empty name table records found.")
@check(
id = 'com.google.fonts/check/name/no_copyright_on_description'
)
def com_google_fonts_check_name_no_copyright_on_description(ttFont):
"""Description strings in the name table must not contain copyright info."""
failed = False
for name in ttFont['name'].names:
if 'opyright' in name.string.decode(name.getEncoding())\
and name.nameID == NameID.DESCRIPTION:
failed = True
if failed:
yield FAIL,\
Message("copyright-on-description",
f"Some namerecords with"
f" ID={NameID.DESCRIPTION} (NameID.DESCRIPTION)"
f" containing copyright info should be removed"
f" (perhaps these were added by a longstanding"
f" FontLab Studio 5.x bug that copied"
f" copyright notices to them.)")
else:
yield PASS, ("Description strings in the name table"
" do not contain any copyright string.")
@check(
id = 'com.google.fonts/check/monospace',
conditions = ['glyph_metrics_stats',
'is_ttf'],
rationale = """
There are various metadata in the OpenType spec to specify if a font is monospaced or not. If the font is not truly monospaced, then no monospaced metadata should be set (as sometimes they mistakenly are...)
Requirements for monospace fonts:
* post.isFixedPitch - "Set to 0 if the font is proportionally spaced, non-zero if the font is not proportionally spaced (monospaced)"
www.microsoft.com/typography/otspec/post.htm
* hhea.advanceWidthMax must be correct, meaning no glyph's width value is greater.
www.microsoft.com/typography/otspec/hhea.htm
* OS/2.panose.bProportion must be set to 9 (monospace). Spec says: "The PANOSE definition contains ten digits each of which currently describes up to sixteen variations. Windows uses bFamilyType, bSerifStyle and bProportion in the font mapper to determine family type. It also uses bProportion to determine if the font is monospaced."
www.microsoft.com/typography/otspec/os2.htm#pan
monotypecom-test.monotype.de/services/pan2
* OS/2.xAvgCharWidth must be set accurately.
"OS/2.xAvgCharWidth is used when rendering monospaced fonts, at least by Windows GDI"
http://typedrawers.com/discussion/comment/15397/#Comment_15397
Also we should report an error for glyphs not of average width.
Please also note:
Thomas Phinney told us that a few years ago (as of December 2019), if you gave a font a monospace flag in Panose, Microsoft Word would ignore the actual advance widths and treat it as monospaced. Source: https://typedrawers.com/discussion/comment/45140/#Comment_45140
"""
)
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats):
"""Checking correctness of monospaced metadata."""
from fontbakery.constants import (IsFixedWidth,
PANOSE_Proportion)
failed = False
# Note: These values are read from the dict here only to
# reduce the max line length in the check implementation below:
seems_monospaced = glyph_metrics_stats["seems_monospaced"]
most_common_width = glyph_metrics_stats["most_common_width"]
width_max = glyph_metrics_stats['width_max']
# Check for missing tables before indexing them
missing_tables = False
required = ["glyf", "hhea", "hmtx", "OS/2", "post"]
for key in required:
if key not in ttFont:
missing_tables = True
yield FAIL,\
Message(f'lacks-{key}',
f"Font file lacks a '{key}' table.")
if missing_tables:
return
if ttFont['hhea'].advanceWidthMax != width_max:
failed = True
yield FAIL,\
Message("bad-advanceWidthMax",
f"Value of hhea.advanceWidthMax"
f" should be set to {width_max}"
f" but got {ttFont['hhea'].advanceWidthMax} instead.")
if seems_monospaced:
if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED:
failed = True
yield FAIL,\
Message("mono-bad-post-isFixedPitch",
f"On monospaced fonts, the value of post.isFixedPitch"
f" must be set to a non-zero value"
f" (meaning 'fixed width monospaced'),"
f" but got {ttFont['post'].isFixedPitch} instead.")
if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED:
failed = True
yield FAIL,\
Message("mono-bad-panose-proportion",
f"On monospaced fonts, the value of"
f" OS/2.panose.bProportion"
f" must be set to {PANOSE_Proportion.MONOSPACED}"
f" (proportion: monospaced),"
f" but got {ttFont['OS/2'].panose.bProportion} instead.")
num_glyphs = len(ttFont['glyf'].glyphs)
unusually_spaced_glyphs = [
g for g in ttFont['glyf'].glyphs
if g not in ['.notdef', '.null', 'NULL'] and
ttFont['hmtx'].metrics[g][0] != 0 and
ttFont['hmtx'].metrics[g][0] != most_common_width
]
outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs
if outliers_ratio > 0:
failed = True
yield WARN,\
Message("mono-outliers",
f"Font is monospaced"
f" but {len(unusually_spaced_glyphs)} glyphs"
f" ({100.0 * outliers_ratio:.2f}%)"
f" have a different width."
f" You should check the widths of:"
f" {unusually_spaced_glyphs}")
if not failed:
yield PASS,\
Message("mono-good",
"Font is monospaced and all related metadata look good.")
else:
# it is a non-monospaced font, so lets make sure
# that all monospace-related metadata is properly unset.
if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED:
failed = True
yield FAIL,\
Message("bad-post-isFixedPitch",
f"On non-monospaced fonts,"
f" the post.isFixedPitch value must be set to"
f" {IsFixedWidth.NOT_MONOSPACED} (not monospaced),"
f" but got {ttFont['post'].isFixedPitch} instead.")
if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED:
failed = True
yield FAIL,\
Message("bad-panose-proportion",
"On non-monospaced fonts,"
" the OS/2.panose.bProportion value can be set to"
" any value except 9 (proportion: monospaced)"
" which is the bad value we got in this font.")
if not failed:
yield PASS,\
Message("good",
"Font is not monospaced and"
" all related metadata look good.")
@check(
id = 'com.google.fonts/check/name/match_familyname_fullfont'
)
def com_google_fonts_check_name_match_familyname_fullfont(ttFont):
"""Does full font name begin with the font family name?"""
from fontbakery.utils import get_name_entry_strings
familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
fullfontname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME)
if len(familyname) == 0:
yield FAIL,\
Message("no-font-family-name",
"Font lacks a NameID.FONT_FAMILY_NAME"
" entry in the 'name' table.")
elif len(fullfontname) == 0:
yield FAIL,\
Message("no-full-font-name",
"Font lacks a NameID.FULL_FONT_NAME"
" entry in the 'name' table.")
else:
# we probably should check all found values are equivalent.
# and, in that case, then performing the rest of the check
# with only the first occurences of the name entries
# will suffice:
fullfontname = fullfontname[0]
familyname = familyname[0]
if not fullfontname.startswith(familyname):
yield FAIL,\
Message("does-not",
f"On the 'name' table, the full font name"
f" (NameID {NameID.FULL_FONT_NAME}"
f" - FULL_FONT_NAME: '{familyname}')"
f" does not begin with font family name"
f" (NameID {NameID.FONT_FAMILY_NAME}"
f" - FONT_FAMILY_NAME: '{fullfontname}')")
else:
yield PASS, "Full font name begins with the font family name."
@check(
id = 'com.google.fonts/check/family_naming_recommendations'
)
def com_google_fonts_check_family_naming_recommendations(ttFont):
"""Font follows the family naming recommendations?"""
# See http://forum.fontlab.com/index.php?topic=313.0
import re
from fontbakery.utils import get_name_entry_strings
bad_entries = []
# <Postscript name> may contain only a-zA-Z0-9
# and one hyphen
bad_psname = re.compile("[^A-Za-z0-9-]")
for string in get_name_entry_strings(ttFont,
NameID.POSTSCRIPT_NAME):
if bad_psname.search(string):
bad_entries.append({
'field': 'PostScript Name',
'value': string,
'rec': ('May contain only a-zA-Z0-9'
' characters and an hyphen.')
})
if string.count('-') > 1:
bad_entries.append({
'field': 'Postscript Name',
'value': string,
'rec': ('May contain not more'
' than a single hyphen')
})
for string in get_name_entry_strings(ttFont,
NameID.FULL_FONT_NAME):
if len(string) >= 64:
bad_entries.append({
'field': 'Full Font Name',
'value': string,
'rec': 'exceeds max length (63)'
})
for string in get_name_entry_strings(ttFont,
NameID.POSTSCRIPT_NAME):
if len(string) >= 64:
bad_entries.append({
'field': 'PostScript Name',
'value': string,
'rec': 'exceeds max length (63)'
})
for string in get_name_entry_strings(ttFont,
NameID.FONT_FAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'Family Name',
'value': string,
'rec': 'exceeds max length (31)'
})
for string in get_name_entry_strings(ttFont,
NameID.FONT_SUBFAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'Style Name',
'value': string,
'rec': 'exceeds max length (31)'
})
for string in get_name_entry_strings(ttFont,
NameID.TYPOGRAPHIC_FAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'OT Family Name',
'value': string,
'rec': 'exceeds max length (31)'
})
for string in get_name_entry_strings(ttFont,
NameID.TYPOGRAPHIC_SUBFAMILY_NAME):
if len(string) >= 32:
bad_entries.append({
'field': 'OT Style Name',
'value': string,
'rec': 'exceeds max length (31)'
})
if len(bad_entries) > 0:
table = "| Field | Value | Recommendation |\n"
table += "|:----- |:----- |:-------------- |\n"
for bad in bad_entries:
table += "| {} | {} | {} |\n".format(bad["field"],
bad["value"],
bad["rec"])
yield INFO,\
Message("bad-entries",
f"Font does not follow "
f"some family naming recommendations:\n"
f"\n"
f"{table}")
else:
yield PASS, "Font follows the family naming recommendations."
@check(
id = 'com.adobe.fonts/check/name/postscript_vs_cff',
conditions = ['is_cff'],
rationale = """
The PostScript name entries in the font's 'name' table should match the FontName string in the 'CFF ' table.
The 'CFF ' table has a lot of information that is duplicated in other tables. This information should be consistent across tables, because there's no guarantee which table an app will get the data from.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2229'
}
)
def com_adobe_fonts_check_name_postscript_vs_cff(ttFont):
"""CFF table FontName must match name table ID 6 (PostScript name)."""
cff_names = ttFont['CFF '].cff.fontNames
if len(cff_names) != 1:
yield ERROR, "Unexpected number of font names in CFF table."
return
passed = True
cff_name = cff_names[0]
for entry in ttFont['name'].names:
if entry.nameID == NameID.POSTSCRIPT_NAME:
postscript_name = entry.toUnicode()
if postscript_name != cff_name:
passed = False
yield FAIL,\
Message("mismatch",
f"Name table PostScript name '{postscript_name}' "
f"does not match CFF table FontName '{cff_name}'.")
if passed:
yield PASS, ("Name table PostScript name matches CFF table FontName.")
@check(
id = 'com.adobe.fonts/check/name/postscript_name_consistency',
conditions = ['not is_cff'], # e.g. TTF or CFF2
rationale = """
The PostScript name entries in the font's 'name' table should be consistent across platforms.
This is the TTF/CFF2 equivalent of the CFF 'name/postscript_vs_cff' check.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2394'
}
)
def com_adobe_fonts_check_name_postscript_name_consistency(ttFont):
"""Name table ID 6 (PostScript name) must be consistent across platforms."""
postscript_names = set()
for entry in ttFont['name'].names:
if entry.nameID == NameID.POSTSCRIPT_NAME:
postscript_name = entry.toUnicode()
postscript_names.add(postscript_name)
if len(postscript_names) > 1:
yield FAIL,\
Message("inconsistency",
f'Entries in the "name" table for ID 6'
f' (PostScript name) are not consistent.'
f' Names found: {sorted(postscript_names)}.')
else:
yield PASS, ('Entries in the "name" table for ID 6 '
'(PostScript name) are consistent.')
@check(
id = 'com.adobe.fonts/check/family/max_4_fonts_per_family_name',
rationale = """
Per the OpenType spec:
'The Font Family name [...] should be shared among at most four fonts that differ only in weight or style [...]'
""",
)
def com_adobe_fonts_check_family_max_4_fonts_per_family_name(ttFonts):
"""Verify that each group of fonts with the same nameID 1 has maximum of 4 fonts"""
from collections import Counter
from fontbakery.utils import get_name_entry_strings
family_names = list()
for ttFont in ttFonts:
names_list = get_name_entry_strings(ttFont,
NameID.FONT_FAMILY_NAME,
PlatformID.WINDOWS,
WindowsEncodingID.UNICODE_BMP,
WindowsLanguageID.ENGLISH_USA)
# names_list may contain multiple entries.
# We use set() below to remove the duplicates and only store
# the unique family name(s) used for a given font
names_set = set(names_list)
family_names.extend(names_set)
passed = True
counter = Counter(family_names)
for family_name, count in counter.items():
if count > 4:
passed = False
yield FAIL,\
Message("too-many",
f"Family '{family_name}' has {count} fonts"
f" (should be 4 or fewer).")
if passed:
yield PASS, ("There were no more than 4 fonts per family name.")
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/name.py",
"copies": "1",
"size": "18726",
"license": "apache-2.0",
"hash": 1105248275282360400,
"line_mean": 41.3665158371,
"line_max": 342,
"alpha_frac": 0.5499839795,
"autogenerated": false,
"ratio": 4.109282422646478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003717997986355035,
"num_lines": 442
} |
from fontbakery.callable import check
from fontbakery.status import FAIL, INFO, PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@check(
id = 'com.google.fonts/check/kern_table',
rationale = """
Even though all fonts should have their kerning implemented in the GPOS table, there may be kerning info at the kern table as well.
Some applications such as MS PowerPoint require kerning info on the kern table. More specifically, they require a format 0 kern subtable from a kern table version 0 with only glyphs defined in the cmap table, which is the only one that Windows understands (and which is also the simplest and more limited of all the kern subtables).
Google Fonts ingests fonts made for download and use on desktops, and does all web font optimizations in the serving pipeline (using libre libraries that anyone can replicate.)
Ideally, TTFs intended for desktop users (and thus the ones intended for Google Fonts) should have both KERN and GPOS tables.
Given all of the above, we currently treat kerning on a v0 kern table as a good-to-have (but optional) feature.
""",
misc_metadata = {
'request': [
'https://github.com/googlefonts/fontbakery/issues/1675',
'https://github.com/googlefonts/fontbakery/issues/3148'
]
}
)
def com_google_fonts_check_kern_table(ttFont):
"""Is there a usable "kern" table declared in the font?"""
kern = ttFont.get("kern")
if kern:
cmap = set(ttFont.getBestCmap().values())
nonCharacterGlyphs = set()
for kernTable in kern.kernTables:
if kernTable.format == 0:
for leftGlyph, rightGlyph in kernTable.kernTable.keys():
if leftGlyph not in cmap:
nonCharacterGlyphs.add(leftGlyph)
if rightGlyph not in cmap:
nonCharacterGlyphs.add(rightGlyph)
if all(kernTable.format != 0 for kernTable in kern.kernTables):
yield WARN,\
Message("kern-unknown-format",
'The "kern" table does not have any format-0 subtable '
'and will not work in a few programs that may require '
'the table.')
elif nonCharacterGlyphs:
yield FAIL,\
Message("kern-non-character-glyphs",
'The following glyphs should not be used in the "kern" '
'table because they are not in the "cmap" table: %s'
% ', '.join(sorted(nonCharacterGlyphs)))
else:
yield INFO,\
Message("kern-found",
'Only a few programs may require the kerning'
' info that this font provides on its "kern" table.')
else:
yield PASS, 'Font does not declare an optional "kern" table.'
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/kern.py",
"copies": "1",
"size": "3075",
"license": "apache-2.0",
"hash": 8378634742455424000,
"line_mean": 51.1186440678,
"line_max": 340,
"alpha_frac": 0.6305691057,
"autogenerated": false,
"ratio": 4.300699300699301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009229758614901978,
"num_lines": 59
} |
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
@check(
id = 'com.google.fonts/check/family/equal_unicode_encodings'
)
def com_google_fonts_check_family_equal_unicode_encodings(ttFonts):
"""Fonts have equal unicode encodings?"""
encoding = None
failed = False
for ttFont in ttFonts:
cmap = None
for table in ttFont['cmap'].tables:
if table.format == 4:
cmap = table
break
# Could a font lack a format 4 cmap table ?
# If we ever find one of those, it would crash the check here.
# Then we'd have to yield a FAIL regarding the missing table entry.
if not encoding:
encoding = cmap.platEncID
if encoding != cmap.platEncID:
failed = True
if failed:
yield FAIL,\
Message("mismatch",
"Fonts have different unicode encodings.")
else:
yield PASS, "Fonts have equal unicode encodings."
# This check was originally ported from
# Mekkablue Preflight Checks available at:
# https://github.com/mekkablue/Glyphs-Scripts/blob/master/Test/Preflight%20Font.py
@check(
id = 'com.google.fonts/check/all_glyphs_have_codepoints',
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/735'
}
)
def com_google_fonts_check_all_glyphs_have_codepoints(ttFont):
"""Check all glyphs have codepoints assigned."""
failed = False
for subtable in ttFont['cmap'].tables:
if subtable.isUnicode():
for item in subtable.cmap.items():
codepoint = item[0]
if codepoint is None:
failed = True
yield FAIL,\
Message("glyph-lacks-codepoint",
f"Glyph {codepoint} lacks a unicode"
f" codepoint assignment.")
if not failed:
yield PASS, "All glyphs have a codepoint value assigned."
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/cmap.py",
"copies": "1",
"size": "2233",
"license": "apache-2.0",
"hash": 3617692398178430500,
"line_mean": 36.8474576271,
"line_max": 89,
"alpha_frac": 0.6171070309,
"autogenerated": false,
"ratio": 4.173831775700934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5290938806600934,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS, SKIP, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('glyph_metrics_stats', 'is_ttf'))
]
@check(
id = 'com.google.fonts/check/linegaps'
)
def com_google_fonts_check_linegaps(ttFont):
"""Checking Vertical Metric Linegaps."""
if ttFont["hhea"].lineGap != 0:
yield WARN,\
Message("hhea",
"hhea lineGap is not equal to 0.")
elif ttFont["OS/2"].sTypoLineGap != 0:
yield WARN,\
Message("OS/2",
"OS/2 sTypoLineGap is not equal to 0.")
else:
yield PASS, "OS/2 sTypoLineGap and hhea lineGap are both 0."
@check(
id = 'com.google.fonts/check/maxadvancewidth'
)
def com_google_fonts_check_maxadvancewidth(ttFont):
"""MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?"""
hhea_advance_width_max = ttFont['hhea'].advanceWidthMax
hmtx_advance_width_max = None
for g in ttFont['hmtx'].metrics.values():
if hmtx_advance_width_max is None:
hmtx_advance_width_max = max(0, g[0])
else:
hmtx_advance_width_max = max(g[0], hmtx_advance_width_max)
if hmtx_advance_width_max != hhea_advance_width_max:
yield FAIL,\
Message("mismatch",
f"AdvanceWidthMax mismatch:"
f" expected {hmtx_advance_width_max} (from hmtx);"
f" got {hhea_advance_width_max} (from hhea)")
else:
yield PASS, ("MaxAdvanceWidth is consistent"
" with values in the Hmtx and Hhea tables.")
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/hhea.py",
"copies": "1",
"size": "1847",
"license": "apache-2.0",
"hash": 5519955415518569000,
"line_mean": 36.693877551,
"line_max": 89,
"alpha_frac": 0.6199242014,
"autogenerated": false,
"ratio": 3.4203703703703705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.454029457177037,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS, WARN, INFO
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('vmetrics', )),
('.googlefonts_conditions', ('RIBBI_ttFonts', ))
]
@check(
id = 'com.google.fonts/check/family/panose_proportion'
)
def com_google_fonts_check_family_panose_proportion(ttFonts):
"""Fonts have consistent PANOSE proportion?"""
passed = True
proportion = None
missing = False
for ttFont in ttFonts:
if "OS/2" not in ttFont:
missing = True
passed = False
continue
if proportion is None:
proportion = ttFont['OS/2'].panose.bProportion
if proportion != ttFont['OS/2'].panose.bProportion:
passed = False
if missing:
yield FAIL,\
Message("lacks-OS/2",
"One or more fonts lack the required OS/2 table.")
if not passed:
yield FAIL,\
Message("inconsistency",
"PANOSE proportion is not the same across this family."
" In order to fix this, please make sure that"
" the panose.bProportion value is the same"
" in the OS/2 table of all of this family font files.")
else:
yield PASS, "Fonts have consistent PANOSE proportion."
@check(
id = 'com.google.fonts/check/family/panose_familytype'
)
def com_google_fonts_check_family_panose_familytype(ttFonts):
"""Fonts have consistent PANOSE family type?"""
passed = True
familytype = None
missing = False
for ttfont in ttFonts:
if "OS/2" not in ttfont:
passed = False
missing = True
continue
if familytype is None:
familytype = ttfont['OS/2'].panose.bFamilyType
if familytype != ttfont['OS/2'].panose.bFamilyType:
passed = False
if missing:
yield FAIL,\
Message("lacks-OS/2",
"One or more fonts lack the required OS/2 table.")
if not passed:
yield FAIL,\
Message("inconsistency",
"PANOSE family type is not the same across this family."
" In order to fix this, please make sure that"
" the panose.bFamilyType value is the same"
" in the OS/2 table of all of this family font files.")
else:
yield PASS, "Fonts have consistent PANOSE family type."
@check(
id = 'com.google.fonts/check/xavgcharwidth',
conditions = ['is_ttf']
)
def com_google_fonts_check_xavgcharwidth(ttFont):
"""Check if OS/2 xAvgCharWidth is correct."""
if "OS/2" not in ttFont:
yield FAIL,\
Message("lacks-OS/2",
"Required OS/2 table is missing.")
return
current_value = ttFont['OS/2'].xAvgCharWidth
ACCEPTABLE_ERROR = 10 # Width deviation tolerance in font units
# Since version 3, the average is computed using _all_ glyphs in a font.
if ttFont['OS/2'].version >= 3:
calculation_rule = "the average of the widths of all glyphs in the font"
if not ttFont['hmtx'].metrics: # May contain just '.notdef', which is valid.
yield FAIL,\
Message("missing-glyphs",
"CRITICAL: Found no glyph width data in the hmtx table!")
return
width_sum = 0
count = 0
for glyph_id in ttFont['glyf'].glyphs: # At least .notdef must be present.
width = ttFont['hmtx'].metrics[glyph_id][0]
# The OpenType spec doesn't exclude negative widths, but only positive
# widths seems to be the assumption in the wild?
if width > 0:
count += 1
width_sum += width
expected_value = int(round(width_sum / count))
else: # Version 2 and below only consider lowercase latin glyphs and space.
calculation_rule = ("the weighted average of the widths of the latin"
" lowercase glyphs in the font")
weightFactors = {
'a': 64,
'b': 14,
'c': 27,
'd': 35,
'e': 100,
'f': 20,
'g': 14,
'h': 42,
'i': 63,
'j': 3,
'k': 6,
'l': 35,
'm': 20,
'n': 56,
'o': 56,
'p': 17,
'q': 4,
'r': 49,
's': 56,
't': 71,
'u': 31,
'v': 10,
'w': 18,
'x': 3,
'y': 18,
'z': 2,
'space': 166
}
glyph_order = ttFont.getGlyphOrder()
if not all(character in glyph_order for character in weightFactors):
yield FAIL,\
Message("missing-glyphs",
"Font is missing the required"
" latin lowercase letters and/or space.")
return
width_sum = 0
for glyph_id in weightFactors:
width = ttFont['hmtx'].metrics[glyph_id][0]
width_sum += (width * weightFactors[glyph_id])
expected_value = int(width_sum / 1000.0 + 0.5) # round to closest int
difference = abs(current_value - expected_value)
# We accept matches and off-by-ones due to rounding as correct.
if current_value == expected_value or difference == 1:
yield PASS, "OS/2 xAvgCharWidth value is correct."
elif difference < ACCEPTABLE_ERROR:
yield INFO, \
Message("xAvgCharWidth-close",
f"OS/2 xAvgCharWidth is {current_value} but it should be"
f" {expected_value} which corresponds to {calculation_rule}."
f" These are similar values, which"
f" may be a symptom of the slightly different"
f" calculation of the xAvgCharWidth value in"
f" font editors. There's further discussion on"
f" this at https://github.com/googlefonts/fontbakery"
f"/issues/1622")
else:
yield WARN, \
Message("xAvgCharWidth-wrong",
f"OS/2 xAvgCharWidth is {current_value} but it should be"
f" {expected_value} which corresponds to {calculation_rule}.")
@check(
id = 'com.adobe.fonts/check/fsselection_matches_macstyle',
rationale = """
The bold and italic bits in OS/2.fsSelection must match the bold and italic bits in head.macStyle per the OpenType spec.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2382'
}
)
def com_adobe_fonts_check_fsselection_matches_macstyle(ttFont):
"""Check if OS/2 fsSelection matches head macStyle bold and italic bits."""
# Check both OS/2 and head are present.
missing_tables = False
required = ["OS/2", "head"]
for key in required:
if key not in ttFont:
missing_tables = True
yield FAIL,\
Message(f'lacks-{key}',
f"The '{key}' table is missing.")
if missing_tables:
return
from fontbakery.constants import FsSelection, MacStyle
failed = False
head_bold = (ttFont['head'].macStyle & MacStyle.BOLD) != 0
os2_bold = (ttFont['OS/2'].fsSelection & FsSelection.BOLD) != 0
if head_bold != os2_bold:
failed = True
yield FAIL, \
Message("fsselection-macstyle-bold",
"The OS/2.fsSelection and head.macStyle " \
"bold settings do not match.")
head_italic = (ttFont['head'].macStyle & MacStyle.ITALIC) != 0
os2_italic = (ttFont['OS/2'].fsSelection & FsSelection.ITALIC) != 0
if head_italic != os2_italic:
failed = True
yield FAIL, \
Message("fsselection-macstyle-italic",
"The OS/2.fsSelection and head.macStyle " \
"italic settings do not match.")
if not failed:
yield PASS, ("The OS/2.fsSelection and head.macStyle "
"bold and italic settings match.")
@check(
id = 'com.adobe.fonts/check/family/bold_italic_unique_for_nameid1',
conditions=['RIBBI_ttFonts'],
rationale = """
Per the OpenType spec: name ID 1 'is used in combination with Font Subfamily name (name ID 2), and should be shared among at most four fonts that differ only in weight or style...
This four-way distinction should also be reflected in the OS/2.fsSelection field, using bits 0 and 5.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/pull/2388'
}
)
def com_adobe_fonts_check_family_bold_italic_unique_for_nameid1(RIBBI_ttFonts):
"""Check that OS/2.fsSelection bold & italic settings are unique
for each NameID1"""
from collections import Counter
from fontbakery.utils import get_name_entry_strings
from fontbakery.constants import NameID, FsSelection
failed = False
family_name_and_bold_italic = list()
for ttFont in RIBBI_ttFonts:
names_list = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME)
# names_list will likely contain multiple entries, e.g. multiple copies
# of the same name in the same language for different platforms, but
# also different names in different languages, we use set() below
# to remove the duplicates and only store the unique family name(s)
# used for a given font
names_set = set(names_list)
bold = (ttFont['OS/2'].fsSelection & FsSelection.BOLD) != 0
italic = (ttFont['OS/2'].fsSelection & FsSelection.ITALIC) != 0
bold_italic = 'Bold=%r, Italic=%r' % (bold, italic)
for name in names_set:
family_name_and_bold_italic.append((name, bold_italic,))
counter = Counter(family_name_and_bold_italic)
for (family_name, bold_italic), count in counter.items():
if count > 1:
failed = True
yield FAIL, \
Message("unique-fsselection",
f"Family '{family_name}' has {count} fonts"
f" (should be no more than 1) with the"
f" same OS/2.fsSelection bold & italic settings:"
f" {bold_italic}")
if not failed:
yield PASS, ("The OS/2.fsSelection bold & italic settings were unique "
"within each compatible family group.")
@check(
id = 'com.google.fonts/check/code_pages',
rationale = """
At least some programs (such as Word and Sublime Text) under Windows 7 do not recognize fonts unless code page bits are properly set on the ulCodePageRange1 (and/or ulCodePageRange2) fields of the OS/2 table.
More specifically, the fonts are selectable in the font menu, but whichever Windows API these applications use considers them unsuitable for any character set, so anything set in these fonts is rendered with a fallback font of Arial.
This check currently does not identify which code pages should be set. Auto-detecting coverage is not trivial since the OpenType specification leaves the interpretation of whether a given code page is "functional" or not open to the font developer to decide.
So here we simply detect as a FAIL when a given font has no code page declared at all.
"""
)
def com_google_fonts_check_code_pages(ttFont):
"""Check code page character ranges"""
if "OS/2" not in ttFont:
yield FAIL,\
Message("lacks-OS/2",
"The required OS/2 table is missing.")
return
if not hasattr(ttFont['OS/2'], "ulCodePageRange1") or \
not hasattr(ttFont['OS/2'], "ulCodePageRange2") or \
(ttFont['OS/2'].ulCodePageRange1 == 0 and \
ttFont['OS/2'].ulCodePageRange2 == 0):
yield FAIL, \
Message("no-code-pages",
"No code pages defined in the OS/2 table"
" ulCodePageRange1 and CodePageRange2 fields.")
else:
yield PASS, "At least one code page is defined."
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/os2.py",
"copies": "1",
"size": "12482",
"license": "apache-2.0",
"hash": 6095906120533072000,
"line_mean": 38.251572327,
"line_max": 266,
"alpha_frac": 0.5799551354,
"autogenerated": false,
"ratio": 4.00834938985228,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5088304525252281,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS, WARN
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
profile_imports = [
('.shared_conditions', ('is_variable_font'
, 'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord'
, 'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord'))
]
@check(
id = 'com.google.fonts/check/varfont/regular_wght_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wght' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wght
If a variable font has a 'wght' (Weight) axis, then the coordinate of its 'Regular' instance is required to be 400.
""",
conditions = ['is_variable_font',
'regular_wght_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_wght_coord(ttFont, regular_wght_coord):
"""The variable font 'wght' (Weight) axis coordinate must be 400 on the
'Regular' instance."""
if regular_wght_coord == 400:
yield PASS, "Regular:wght is 400."
else:
yield FAIL,\
Message("not-400",
f'The "wght" axis coordinate of'
f' the "Regular" instance must be 400.'
f' Got {regular_wght_coord} instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_wdth_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wdth' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wdth
If a variable font has a 'wdth' (Width) axis, then the coordinate of its 'Regular' instance is required to be 100.
""",
conditions = ['is_variable_font',
'regular_wdth_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_wdth_coord(ttFont, regular_wdth_coord):
"""The variable font 'wdth' (Width) axis coordinate must be 100 on the 'Regular' instance."""
if regular_wdth_coord == 100:
yield PASS, "Regular:wdth is 100."
else:
yield FAIL,\
Message("not-100",
f'The "wdth" coordinate of'
f' the "Regular" instance must be 100.'
f' Got {regular_wdth_coord} as a default value instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_slnt_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'slnt' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_slnt
If a variable font has a 'slnt' (Slant) axis, then the coordinate of its 'Regular' instance is required to be zero.
""",
conditions = ['is_variable_font',
'regular_slnt_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_slnt_coord(ttFont, regular_slnt_coord):
"""The variable font 'slnt' (Slant) axis coordinate must be zero on the 'Regular' instance."""
if regular_slnt_coord == 0:
yield PASS, "Regular:slnt is zero."
else:
yield FAIL,\
Message("non-zero",
f'The "slnt" coordinate of'
f' the "Regular" instance must be zero.'
f' Got {regular_slnt_coord} as a default value instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_ital_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'ital' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_ital
If a variable font has a 'ital' (Italic) axis, then the coordinate of its 'Regular' instance is required to be zero.
""",
conditions = ['is_variable_font',
'regular_ital_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_ital_coord(ttFont, regular_ital_coord):
"""The variable font 'ital' (Italic) axis coordinate must be zero on the 'Regular' instance."""
if regular_ital_coord == 0:
yield PASS, "Regular:ital is zero."
else:
yield FAIL,\
Message("non-zero",
f'The "ital" coordinate of'
f' the "Regular" instance must be zero.'
f' Got {regular_ital_coord} as a default value instead.')
@check(
id = 'com.google.fonts/check/varfont/regular_opsz_coord',
rationale = """
According to the Open-Type spec's registered design-variation tag 'opsz' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_opsz
If a variable font has an 'opsz' (Optical Size) axis, then the coordinate of its 'Regular' instance is recommended to be a value in the range 10 to 16.
""",
conditions = ['is_variable_font',
'regular_opsz_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_regular_opsz_coord(ttFont, regular_opsz_coord):
"""The variable font 'opsz' (Optical Size) axis coordinate should be between 10 and 16 on the 'Regular' instance."""
if regular_opsz_coord >= 10 and regular_opsz_coord <= 16:
yield PASS, ("Regular:opsz coordinate ({regular_opsz_coord}) looks good.")
else:
yield WARN,\
Message("out-of-range",
f'The "opsz" (Optical Size) coordinate'
f' on the "Regular" instance is recommended'
f' to be a value in the range 10 to 16.'
f' Got {regular_opsz_coord} instead.')
@check(
id = 'com.google.fonts/check/varfont/bold_wght_coord',
rationale = """
The Open-Type spec's registered design-variation tag 'wght' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wght does not specify a required value for the 'Bold' instance of a variable font.
But Dave Crossland suggested that we should enforce a required value of 700 in this case.
""",
conditions = ['is_variable_font',
'bold_wght_coord'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/1707'
}
)
def com_google_fonts_check_varfont_bold_wght_coord(ttFont, bold_wght_coord):
"""The variable font 'wght' (Weight) axis coordinate must be 700 on the 'Bold' instance."""
if bold_wght_coord == 700:
yield PASS, "Bold:wght is 700."
else:
yield FAIL,\
Message("not-700",
f'The "wght" axis coordinate of'
f' the "Bold" instance must be 700.'
f' Got {bold_wght_coord} instead.')
@check(
id = 'com.google.fonts/check/varfont/wght_valid_range',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wght' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wght
On the 'wght' (Weight) axis, the valid coordinate range is 1-1000.
""",
conditions = ['is_variable_font'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2264'
}
)
def com_google_fonts_check_varfont_wght_valid_range(ttFont):
"""The variable font 'wght' (Weight) axis coordinate
must be within spec range of 1 to 1000 on all instances."""
Failed = False
for instance in ttFont['fvar'].instances:
if 'wght' in instance.coordinates:
value = instance.coordinates['wght']
if value < 1 or value > 1000:
Failed = True
yield FAIL,\
Message("out-of-range",
f'Found a bad "wght" coordinate with value {value}'
f' outside of the valid range from 1 to 1000.')
break
if not Failed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/varfont/wdth_valid_range',
rationale = """
According to the Open-Type spec's registered design-variation tag 'wdth' available at https://docs.microsoft.com/en-gb/typography/opentype/spec/dvaraxistag_wdth
On the 'wdth' (Width) axis, the valid coordinate range is 1-1000
""",
conditions = ['is_variable_font']
)
def com_google_fonts_check_varfont_wdth_valid_range(ttFont):
"""The variable font 'wdth' (Weight) axis coordinate
must be within spec range of 1 to 1000 on all instances."""
Failed = False
for instance in ttFont['fvar'].instances:
if 'wdth' in instance.coordinates:
value = instance.coordinates['wdth']
if value < 1 or value > 1000:
Failed = True
yield FAIL,\
Message("out-of-range",
f'Found a bad "wdth" coordinate with value {value}'
f' outside of the valid range from 1 to 1000.')
break
if not Failed:
yield PASS, "OK"
@check(
id = 'com.google.fonts/check/varfont/slnt_range',
rationale = """
The OpenType spec says at https://docs.microsoft.com/en-us/typography/opentype/spec/dvaraxistag_slnt that:
[...] the scale for the Slant axis is interpreted as the angle of slant in counter-clockwise degrees from upright. This means that a typical, right-leaning oblique design will have a negative slant value. This matches the scale used for the italicAngle field in the post table.
""",
conditions = ['is_variable_font',
'slnt_axis'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2572'
}
)
def com_google_fonts_check_varfont_slnt_range(ttFont, slnt_axis):
""" The variable font 'slnt' (Slant) axis coordinate
specifies positive values in its range? """
if slnt_axis.minValue < 0 and slnt_axis.maxValue >= 0:
yield PASS, "Looks good!"
else:
yield WARN,\
Message("unusual-range",
f'The range of values for the "slnt" axis in'
f' this font only allows positive coordinates'
f' (from {slnt_axis.minValue} to {slnt_axis.maxValue}),'
f' indicating that this may be a back slanted design,'
f' which is rare. If that\'s not the case, then'
f' the "slant" axis should be a range of'
f' negative values instead.')
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/fvar.py",
"copies": "1",
"size": "11057",
"license": "apache-2.0",
"hash": 7585205092668191000,
"line_mean": 41.5269230769,
"line_max": 285,
"alpha_frac": 0.616261192,
"autogenerated": false,
"ratio": 3.7392627663172133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48555239583172133,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS, WARN, SKIP
from fontbakery.message import Message
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
def _is_non_spacing_mark_char(charcode):
from fontTools import unicodedata
category = unicodedata.category(chr(charcode))
if category.startswith("C"):
# skip control characters
return None
else:
# Non spacing marks either have the Unicode General_category:
# Mn, Nonspacing_Mark
# Me, Enclosing_Mark
# Characters with the category Mc, Spacing_Mark should not be considered
# as non spacing marks.
return category in ("Mn", "Me")
def _get_mark_class_glyphnames(ttFont):
from fontbakery.constants import GlyphClass
class_defs = ttFont["GDEF"].table.GlyphClassDef.classDefs.items()
return {name for (name, value) in class_defs
if value == GlyphClass.MARK}
@check(
id = 'com.google.fonts/check/gdef_spacing_marks',
rationale = """
Glyphs in the GDEF mark glyph class should be non-spacing.
Spacing glyphs in the GDEF mark glyph class may have incorrect anchor positioning that was only intended for building composite glyphs during design.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2877'
}
)
def com_google_fonts_check_gdef_spacing_marks(ttFont):
"""Check mark characters are in GDEF mark glyph class)"""
from fontbakery.utils import pretty_print_list
if "GDEF" in ttFont and ttFont["GDEF"].table.GlyphClassDef:
spacing_glyphnames = {name
for (name, (width, lsb)) in ttFont["hmtx"].metrics.items()
if width > 0}
mark_class_glyphnames = _get_mark_class_glyphnames(ttFont)
spacing_glyphnames_in_mark_glyph_class = spacing_glyphnames & mark_class_glyphnames
if spacing_glyphnames_in_mark_glyph_class :
formatted_list = "\t " +\
pretty_print_list(sorted(spacing_glyphnames_in_mark_glyph_class),
shorten=10,
sep=", ")
yield WARN,\
Message('spacing-mark-glyphs',
f"The following spacing glyphs may be in"
f" the GDEF mark glyph class by mistake:\n"
f"{formatted_list}")
else:
yield PASS, ('Font does not has spacing glyphs'
' in the GDEF mark glyph class.')
else:
yield SKIP, ('Font does not declare an optional "GDEF" table'
' or has any GDEF glyph class definition.')
@check(
id = 'com.google.fonts/check/gdef_mark_chars',
rationale = """
Mark characters should be in the GDEF mark glyph class.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2877'
}
)
def com_google_fonts_check_gdef_mark_chars(ttFont):
"""Check mark characters are in GDEF mark glyph class"""
from fontbakery.utils import pretty_print_list
if "GDEF" in ttFont and ttFont["GDEF"].table.GlyphClassDef:
cmap = ttFont.getBestCmap()
mark_class_glyphnames = _get_mark_class_glyphnames(ttFont)
mark_chars_not_in_mark_class = {
charcode for charcode in cmap
if _is_non_spacing_mark_char(charcode) is True and
cmap[charcode] not in mark_class_glyphnames
}
if mark_chars_not_in_mark_class:
formatted_marks = "\t " +\
pretty_print_list(sorted("U+%04X" % c for c in
mark_chars_not_in_mark_class),
shorten=None,
sep=", ")
yield WARN,\
Message('mark-chars',
f"The following mark characters could be"
f" in the GDEF mark glyph class:\n"
f"{formatted_marks}")
else:
yield PASS, ('Font does not have mark characters'
' not in the GDEF mark glyph class.')
else:
yield SKIP, ('Font does not declare an optional "GDEF" table'
' or has any GDEF glyph class definition.')
@check(
id = 'com.google.fonts/check/gdef_non_mark_chars',
rationale = """
Glyphs in the GDEF mark glyph class become non-spacing and may be repositioned if they have mark anchors.
Only combining mark glyphs should be in that class. Any non-mark glyph must not be in that class, in particular spacing glyphs.
""",
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2877'
}
)
def com_google_fonts_check_gdef_non_mark_chars(ttFont):
"""Check GDEF mark glyph class doesn't have characters that are not marks)"""
from fontbakery.utils import pretty_print_list
if "GDEF" in ttFont and ttFont["GDEF"].table.GlyphClassDef:
cmap = ttFont.getBestCmap()
nonmark_chars = {
charcode for charcode in cmap
if _is_non_spacing_mark_char(charcode) is False
}
nonmark_char_glyphnames = {cmap[c] for c in nonmark_chars}
glyphname_to_char_mapping = dict()
for k, v in cmap.items():
if v in glyphname_to_char_mapping:
glyphname_to_char_mapping[v].add(k)
else:
glyphname_to_char_mapping[v] = {k}
mark_class_glyphnames = _get_mark_class_glyphnames(ttFont)
nonmark_char_glyphnames_in_mark_class = nonmark_char_glyphnames & mark_class_glyphnames
if nonmark_char_glyphnames_in_mark_class:
nonmark_chars_in_mark_class = set()
for glyphname in nonmark_char_glyphnames_in_mark_class:
chars = glyphname_to_char_mapping[glyphname]
for char in chars:
if char in nonmark_chars:
nonmark_chars_in_mark_class.add(char)
formatted_nonmarks = "\t " +\
pretty_print_list(sorted("U+%04X" % c for c in
nonmark_chars_in_mark_class),
shorten=None,
sep=", ")
yield WARN,\
Message('non-mark-chars',
f"The following non-mark characters should"
f" not be in the GDEF mark glyph class:\n"
f"{formatted_nonmarks}")
else:
yield PASS, ('Font does not have non-mark characters'
' in the GDEF mark glyph class.')
else:
yield SKIP, ('Font does not declare an optional "GDEF" table'
' or has any GDEF glyph class definition.')
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/gdef.py",
"copies": "1",
"size": "7117",
"license": "apache-2.0",
"hash": 3950665092659042300,
"line_mean": 43.2049689441,
"line_max": 157,
"alpha_frac": 0.5710271182,
"autogenerated": false,
"ratio": 4.1377906976744185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5208817815874418,
"avg_score": null,
"num_lines": null
} |
from fontbakery.callable import check
from fontbakery.status import PASS, FAIL
from fontbakery.message import Message
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
from opentypespec.tags import FEATURE_TAGS, SCRIPT_TAGS, LANGUAGE_TAGS
def feature_tags(ttFont):
in_this_font = set()
for table in ["GSUB", "GPOS"]:
if ttFont.get(table) and ttFont[table].table.FeatureList:
for fr in ttFont[table].table.FeatureList.FeatureRecord:
in_this_font.add(fr.FeatureTag)
return in_this_font
DEPRECATED_TAGS = ["hngl", "opbd", "size"]
@check(
id = "com.google.fonts/check/layout_valid_feature_tags",
rationale = """
Incorrect tags can be indications of typos, leftover debugging code or questionable approaches, or user error in the font editor. Such typos can cause features and language support to fail to work as intended.
""",
misc_metadata = {
"request": ["https://github.com/googlefonts/fontbakery/issues/3355"],
"severity": 8,
},
)
def com_google_fonts_check_layout_valid_feature_tags(ttFont):
"""Does the font have any invalid feature tags?"""
bad_tags = set()
for tag in feature_tags(ttFont):
if tag not in FEATURE_TAGS.keys():
bad_tags.add(tag)
if bad_tags:
yield FAIL, \
Message("bad-feature-tags",
"The following invalid feature tags were found in the font: "
+ ", ".join(bad_tags))
else:
yield PASS, "No invalid feature tags were found"
def script_tags(ttFont):
in_this_font = set()
for table in ["GSUB", "GPOS"]:
if ttFont.get(table) and ttFont[table].table.ScriptList:
for fr in ttFont[table].table.ScriptList.ScriptRecord:
in_this_font.add(fr.ScriptTag)
return in_this_font
@check(
id = "com.google.fonts/check/layout_valid_script_tags",
rationale = """
Incorrect script tags can be indications of typos, leftover debugging code or questionable approaches, or user error in the font editor. Such typos can cause features and language support to fail to work as intended.
""",
misc_metadata = {
"request": ["https://github.com/googlefonts/fontbakery/issues/3355"],
"severity": 8,
},
)
def com_google_fonts_check_layout_valid_script_tags(ttFont):
"""Does the font have any invalid script tags?"""
bad_tags = set()
for tag in script_tags(ttFont):
if tag not in SCRIPT_TAGS.keys():
bad_tags.add(tag)
if bad_tags:
yield FAIL, \
Message("bad-script-tags",
"The following invalid script tags were found in the font: "
+ ", ".join(bad_tags))
else:
yield PASS, "No invalid script tags were found"
def language_tags(ttFont):
in_this_font = set()
for table in ["GSUB", "GPOS"]:
if ttFont.get(table) and ttFont[table].table.ScriptList:
for fr in ttFont[table].table.ScriptList.ScriptRecord:
for lsr in fr.Script.LangSysRecord:
in_this_font.add(lsr.LangSysTag)
return in_this_font
@check(
id = "com.google.fonts/check/layout_valid_language_tags",
rationale = """
Incorrect language tags can be indications of typos, leftover debugging code or questionable approaches, or user error in the font editor. Such typos can cause features and language support to fail to work as intended.
""",
misc_metadata = {
"request": ["https://github.com/googlefonts/fontbakery/issues/3355"],
"severity": 8,
},
)
def com_google_fonts_check_layout_valid_language_tags(ttFont):
"""Does the font have any invalid language tags?"""
bad_tags = set()
for tag in language_tags(ttFont):
if tag not in LANGUAGE_TAGS.keys():
bad_tags.add(tag)
if bad_tags:
yield FAIL, \
Message("bad-language-tags",
"The following invalid language tags were found in the font: "
+ ", ".join(bad_tags))
else:
yield PASS, "No invalid language tags were found"
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/layout.py",
"copies": "1",
"size": "4202",
"license": "apache-2.0",
"hash": -3288724009701863400,
"line_mean": 36.185840708,
"line_max": 226,
"alpha_frac": 0.6320799619,
"autogenerated": false,
"ratio": 3.7618621307072515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9861580534454892,
"avg_score": 0.0064723116304717405,
"num_lines": 113
} |
from fontbakery.checkrunner import FAIL
from fontbakery.codetesting import (assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE)
from fontbakery.profiles import iso15008 as iso15008_profile
def test_check_iso15008_proportions():
"""Check if 0.65 => (H width / H height) => 0.80"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_proportions")
# Cabin has a proportion of 0.7, so that's good.
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
"with a good font...")
# Wonky Paths doesn't have an H
font = TEST_FILE("wonky_paths/WonkySourceSansPro-Regular.otf")
assert_results_contain(check(font),
FAIL, "glyph-not-present",
"with a font that does not have an 'H' glyph...")
# Covered By Your Grace is really tall (proportion 0.39)
font = TEST_FILE("coveredbyyourgrace/CoveredByYourGrace.ttf")
assert_results_contain(check(font),
FAIL, "invalid-proportion",
"with a very tall font (proportion of 'H' width to 'H' height)...")
def test_check_iso15008_stem_width():
"""Check if 0.10 <= (stem width / ascender) <= 0.82"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_stem_width")
font = TEST_FILE("cabin/Cabin-SemiBold.ttf")
assert_PASS(check(font),
"with a good font...")
# Wonky Paths doesn't have an 'l'
font = TEST_FILE("wonky_paths/WonkySourceSansPro-Regular.otf")
assert_results_contain(check(font),
FAIL, "no-stem-width",
"with a font lacking an 'l' glyph...")
# Cabin Regular is actually slightly too thin for displays
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_results_contain(check(font),
FAIL, "invalid-proportion",
"with a too thin font (proportion of stem width to ascender)...")
def test_check_iso15008_intercharacter_spacing():
"""Check if spacing between characters is adequate for display use"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_intercharacter_spacing")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
"with a good font...")
font = TEST_FILE("cabin/Cabin-SemiBold.ttf")
# l stem width is 111, LSB at x-height is 59, RSB at x-Height is 83
# 142 / 111 = 128%, so this font is too tight.
assert_results_contain(check(font),
FAIL, "bad-vertical-vertical-spacing",
"with a too tight font (space between vertical strokes)...")
# v LSB is 5, lv kern is -6 (!) so lv distance is 83+5-6 = 82
# 82 / 111 = 0.73%, so that fails too.
assert_results_contain(check(font),
FAIL, "bad-vertical-diagonal-spacing",
"with bad spacing between vertical and diagonal strokes...")
font = TEST_FILE("montserrat/Montserrat-Black.ttf")
# vv touches
assert_results_contain(check(font),
FAIL, "bad-diagonal-diagonal-spacing",
"with diagonal strokes (vv) that are touching...")
def test_check_iso15008_interword_spacing():
"""Check if spacing between words is adequate for display use"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_interword_spacing")
font = TEST_FILE("cabin/CabinCondensed-Bold.ttf")
# lm space is 112; m+space+l space is 286; 286/112 = 255%
assert_PASS(check(font),
"with a good font...")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
# lm space is 147; m+space+l space is 341; 341/147 = 232%
assert_results_contain(check(font),
FAIL, "bad-interword-spacing",
"with bad interword space...")
def test_check_iso15008_interline_spacing():
"""Check if spacing between lines is adequate for display use"""
check = CheckTester(iso15008_profile,
"com.google.fonts/check/iso15008_interline_spacing")
font = TEST_FILE("cabin/Cabin-Regular.ttf")
assert_PASS(check(font),
"with a good font...")
font = TEST_FILE("source-sans-pro/TTF/SourceSansPro-Bold.ttf")
# 39 units at bottom of g + 49 units at top of h + no typolinegap = 88
# stem width = 147
assert_results_contain(check(font),
FAIL, "bad-interline-spacing",
"with bad interline space...")
| {
"repo_name": "googlefonts/fontbakery",
"path": "tests/profiles/iso15008_test.py",
"copies": "1",
"size": "4828",
"license": "apache-2.0",
"hash": 8523669189090395000,
"line_mean": 40.9826086957,
"line_max": 94,
"alpha_frac": 0.5828500414,
"autogenerated": false,
"ratio": 3.7167051578137027,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47995551992137025,
"avg_score": null,
"num_lines": null
} |
from fontbakery.checkrunner import Section
from fontbakery.fonts_profile import profile_factory
def check_filter(item_type, item_id, item):
# Filter out external tool checks for testing purposes.
if item_type == "check" and item_id in (
"com.google.fonts/check/ftxvalidator",
"com.google.fonts/check/ots",
"com.google.fonts/check/fontvalidator",
):
return False
return True
def test_external_profile():
"""Test the creation of external profiles."""
profile = profile_factory(default_section=Section("Dalton Maag OpenType"))
profile.auto_register(
globals(),
profile_imports=["fontbakery.profiles.opentype"],
filter_func=check_filter)
# Probe some tests
expected_tests = ["com.google.fonts/check/family/panose_proportion",
"com.google.fonts/check/varfont/regular_opsz_coord"]
profile.test_expected_checks(expected_tests)
# Probe tests we don't want
assert "com.google.fonts/check/ftxvalidator" not in profile._check_registry.keys()
assert len(profile.sections) > 1
def test_profile_imports():
"""
When a names array in profile_imports contained sub module names, the import
would fail.
https://github.com/googlefonts/fontbakery/issues/1886
"""
def _test(profile_imports, expected_tests,expected_conditions=tuple()):
profile = profile_factory(default_section=Section("Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_expected_checks(expected_tests)
if expected_conditions:
registered_conditions = profile.conditions.keys()
for name in expected_conditions:
assert name in registered_conditions, \
f'"{name}" is expected to be registered as a condition.'
# this is in docs/writing profiles
profile_imports = [
['fontbakery.profiles', ['cmap', 'head']]
]
# Probe some tests
expected_tests = [
"com.google.fonts/check/all_glyphs_have_codepoints", # in cmap
"com.google.fonts/check/unitsperem" # in head
]
_test(profile_imports, expected_tests)
# the example from issue #1886
profile_imports = (
(
"fontbakery.profiles",
(
"cmap",
"head",
"os2",
"post",
"name",
"hhea",
"dsig",
"hmtx",
"gpos",
"kern",
"glyf",
"fvar",
"shared_conditions",
),
),
)
# Probe some tests
expected_tests = [
"com.google.fonts/check/all_glyphs_have_codepoints", # in cmap
"com.google.fonts/check/unitsperem" # in head
]
_test(profile_imports, expected_tests)
# make sure the suggested workaround still works:
# https://github.com/googlefonts/fontbakery/issues/1886#issuecomment-392535435
profile_imports = (
"fontbakery.profiles.cmap",
"fontbakery.profiles.head",
"fontbakery.profiles.os2",
"fontbakery.profiles.post",
"fontbakery.profiles.name",
"fontbakery.profiles.hhea",
"fontbakery.profiles.dsig",
"fontbakery.profiles.hmtx",
"fontbakery.profiles.gpos",
"fontbakery.profiles.kern",
"fontbakery.profiles.glyf",
"fontbakery.profiles.fvar",
"fontbakery.profiles.shared_conditions"
)
# Probe some tests
expected_tests = [
"com.google.fonts/check/all_glyphs_have_codepoints", # in cmap
"com.google.fonts/check/unitsperem" # in head
]
_test(profile_imports, expected_tests)
# cherry pick attributes from a module (instead of getting submodules)
# also from this is in docs/writing profiles
# Import just certain attributes from modules.
# Also, using absolute import module names:
profile_imports = [
# like we do in fontbakery.profiles.fvar
('fontbakery.profiles.shared_conditions', ('is_variable_font',
'regular_wght_coord', 'regular_wdth_coord', 'regular_slnt_coord',
'regular_ital_coord', 'regular_opsz_coord', 'bold_wght_coord')),
# just as an example: import a check and a dependency/condition of
# that check from the googlefonts specific profile:
('fontbakery.profiles.googlefonts', (
# "License URL matches License text on name table?"
'com_google_fonts_check_name_license_url',
# This condition is a dependency of the check above:
'familyname',
))
]
# Probe some tests
expected_tests = [
"com.google.fonts/check/name/license_url" # in googlefonts
]
expected_conditions = ('is_variable_font', 'regular_wght_coord',
'regular_wdth_coord', 'regular_slnt_coord', 'regular_ital_coord',
'regular_opsz_coord', 'bold_wght_coord', 'familyname')
_test(profile_imports, expected_tests, expected_conditions)
def test_opentype_checks_load():
profile_imports = ("fontbakery.profiles.opentype", )
profile = profile_factory(default_section=Section("OpenType Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
def test_googlefonts_checks_load():
profile_imports = ("fontbakery.profiles.googlefonts", )
profile = profile_factory(default_section=Section("Google Fonts Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
def test_in_and_exclude_checks():
profile_imports = ("fontbakery.profiles.opentype", )
profile = profile_factory(default_section=Section("OpenType Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
explicit_checks = ["06", "07"] # "06" or "07" in check ID
exclude_checks = ["065", "079"] # "065" or "079" in check ID
iterargs = {"font": 1}
check_names = {
c[1].id for c in \
profile.execution_order(iterargs,
explicit_checks=explicit_checks,
exclude_checks=exclude_checks)
}
check_names_expected = set()
for section in profile.sections:
for check in section.checks:
if any(i in check.id
for i in explicit_checks) and \
not any(x in check.id
for x in exclude_checks):
check_names_expected.add(check.id)
assert check_names == check_names_expected
def test_in_and_exclude_checks_default():
profile_imports = ("fontbakery.profiles.opentype",)
profile = profile_factory(default_section=Section("OpenType Testing"))
profile.auto_register({}, profile_imports=profile_imports)
profile.test_dependencies()
explicit_checks = None # "All checks aboard"
exclude_checks = None # "No checks left behind"
iterargs = {"font": 1}
check_names = {
c[1].id for c in \
profile.execution_order(iterargs,
explicit_checks=explicit_checks,
exclude_checks=exclude_checks)
}
check_names_expected = set()
for section in profile.sections:
for check in section.checks:
check_names_expected.add(check.id)
assert check_names == check_names_expected
| {
"repo_name": "moyogo/fontbakery",
"path": "tests/profiles/external_profile_test.py",
"copies": "2",
"size": "7478",
"license": "apache-2.0",
"hash": -8639232076793543000,
"line_mean": 35.8374384236,
"line_max": 86,
"alpha_frac": 0.6171436213,
"autogenerated": false,
"ratio": 3.954521417239556,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5571665038539556,
"avg_score": null,
"num_lines": null
} |
from fontbakery.commands.check_profile import get_module
from fontbakery.reporters import FontbakeryReporter
from fontbakery.reporters.html import HTMLReporter
from fontbakery.reporters.ghmarkdown import GHMarkdownReporter
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from fontbakery.checkrunner import (
get_module_profile,
CheckRunner,
START,
ENDCHECK,
distribute_generator,
)
class ProgressReporter(FontbakeryReporter):
def __init__(self, signal, is_async=False, runner=None):
self.signal = signal
super().__init__(is_async, runner)
def receive(self, event):
status, message, identity = event
if status == START:
self.count = len(message)
elif status == ENDCHECK:
self._tick += 1
self.signal.emit(100 * self._tick / float(self.count))
class FontbakeryRunner(QObject):
signalStatus = pyqtSignal(str, str)
progressStatus = pyqtSignal(float)
def __init__(self, profilename, loglevels, paths, checks=None, parent=None):
super(self.__class__, self).__init__(parent)
self.paths = paths
self.profilename = profilename
self.loglevels = loglevels
self.checks = checks
@pyqtSlot()
def start(self):
profile = get_module_profile(
get_module("fontbakery.profiles." + self.profilename)
)
print(self.checks)
runner = CheckRunner(profile, values={"fonts": self.paths},
config={
"custom_order": None,
"explicit_checks": self.checks,
"exclude_checks": None
}
)
print("Log levels: ", self.loglevels)
hr = HTMLReporter(runner=runner, loglevels=self.loglevels)
ghmd = GHMarkdownReporter(runner=runner, loglevels=self.loglevels)
prog = ProgressReporter(self.progressStatus, runner=runner)
reporters = [hr.receive, prog.receive, ghmd.receive]
status_generator = runner.run()
print("Starting distribute_generator")
distribute_generator(status_generator, reporters)
print("Done with distribute_generator")
self.signalStatus.emit(hr.get_html(), ghmd.get_markdown())
| {
"repo_name": "googlefonts/fontbakery-ui",
"path": "qfontbakery/fbinterface.py",
"copies": "1",
"size": "2224",
"license": "apache-2.0",
"hash": -6836607833945711000,
"line_mean": 34.8709677419,
"line_max": 80,
"alpha_frac": 0.648381295,
"autogenerated": false,
"ratio": 4.073260073260073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5221641368260073,
"avg_score": null,
"num_lines": null
} |
from fontbakery.errors import SetupError
class Section:
""" An ordered set of checks.
Used to structure checks in a profile. A profile consists
of one or more sections.
"""
def __init__(self, name, checks=None, order=None, description=None):
self.name = name
self.description = description
self._add_check_callback = None
self._remove_check_callback = None
self._checks = [] if checks is None else list(checks)
self._checkid2index = {check.id:i for i, check in enumerate(self._checks)}
# a list of iterarg-names
self._order = order or []
def clone(self, filter_func=None):
checks = self.checks if not filter_func else filter(filter_func, self.checks)
return Section(self.name,
checks=checks,
order=self.order,
description=self.description)
def __repr__(self):
return f'<Section: {self.name}>'
# This was problematic. See: https://github.com/googlefonts/fontbakery/issues/2194
# def __str__(self):
# return self.name
def __eq__(self, other):
""" True if other.checks has the same checks in the same order"""
if hasattr(other, "checks"):
return self._checks == other.checks
else:
return False
@property
def order(self):
return self._order[:]
@property
def checks(self):
return self._checks
def on_add_check(self, callback):
if self._add_check_callback is not None:
# allow only one, otherwise, skipping registration in
# add_check becomes problematic, can't skip just for some
# callbacks.
raise Exception(f'{self} already has an on_add_check callback')
self._add_check_callback = callback
def on_remove_check(self, callback):
if self._remove_check_callback is not None:
# allow only one, otherwise, skipping un-registration in
# remove_check becomes problematic, can't skip just for some
# callbacks.
raise Exception(f'{self} already has an on_add_check callback')
self._remove_check_callback = callback
def add_check(self, check):
"""
Please use rather `register_check` as a decorator.
"""
if self._add_check_callback is not None:
if not self._add_check_callback(self, check):
# rejected, skip!
return False
self._checkid2index[check.id] = len(self._checks)
self._checks.append(check)
return True
def remove_check(self, check_id):
index = self._checkid2index[check_id]
if self._remove_check_callback is not None:
if not self._remove_check_callback(self, check_id):
# rejected, skip!
return False
del self._checks[index]
# Fixing the index, maybe an ordered dict would work here the same
# but simpler. Could also rebuild the entire index.
del self._checkid2index[check_id]
for cid, idx in self._checkid2index.items():
if idx > index:
self._checkid2index[cid] = idx - 1
return True
def replace_check(self, override_check_id, new_check):
index = self._checkid2index[override_check_id]
if self._remove_check_callback is not None:
if not self._remove_check_callback(self, override_check_id):
# rejected, skip!
return False
if self._add_check_callback is not None:
if not self._add_check_callback(self, new_check):
# rejected, skip!
# But first restore the old check registration.
# Maybe a resource manager would be nice here.
# Also, raising and failing could be an option.
self._add_check_callback(self, self.get_check(override_check_id))
return False
del self._checkid2index[override_check_id]
self._checkid2index[new_check.id] = index
self._checks[index] = new_check
return True
def merge_section(self, section, filter_func=None):
"""
Add section.checks to self, if not skipped by self._add_check_callback.
order, description, etc. are not updated.
"""
for check in section.checks:
if filter_func and not filter_func(check):
continue
self.add_check(check)
def get_check(self, check_id):
index = self._checkid2index[check_id]
return self._checks[index]
def register_check(self, func):
"""
# register in `special_section`
@my_section.register_check
@check(id='com.example.fontbakery/check/0')
def my_check():
yield PASS, 'example'
"""
if not self.add_check(func):
raise SetupError(f'Can\'t add check {func} to section {self}.')
return func
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/section.py",
"copies": "1",
"size": "5029",
"license": "apache-2.0",
"hash": -8798079821639177000,
"line_mean": 35.4420289855,
"line_max": 86,
"alpha_frac": 0.5871942732,
"autogenerated": false,
"ratio": 4.197829716193656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00043053382133112744,
"num_lines": 138
} |
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
from fontbakery.checkrunner import Section
OPENTYPE_PROFILE_IMPORTS = (
".",
(
"cff",
"cmap",
"head",
"os2",
"post",
"name",
"loca",
"hhea",
"dsig",
"hmtx",
"gdef",
"gpos",
"kern",
"glyf",
"fvar",
"stat",
"shared_conditions",
),
)
profile_imports = (OPENTYPE_PROFILE_IMPORTS, )
profile = profile_factory(default_section=Section("OpenType Specification Checks"))
OPENTYPE_PROFILE_CHECKS = [
'com.google.fonts/check/family/underline_thickness',
'com.google.fonts/check/family/panose_proportion',
'com.google.fonts/check/family/panose_familytype',
'com.google.fonts/check/family/equal_unicode_encodings',
'com.google.fonts/check/family/equal_font_versions',
'com.adobe.fonts/check/family/bold_italic_unique_for_nameid1',
'com.adobe.fonts/check/family/max_4_fonts_per_family_name',
'com.adobe.fonts/check/name/postscript_vs_cff',
'com.adobe.fonts/check/name/postscript_name_consistency',
'com.adobe.fonts/check/name/empty_records',
'com.google.fonts/check/name/no_copyright_on_description',
'com.google.fonts/check/name/match_familyname_fullfont',
'com.google.fonts/check/varfont/regular_wght_coord',
'com.google.fonts/check/varfont/regular_wdth_coord',
'com.google.fonts/check/varfont/regular_slnt_coord',
'com.google.fonts/check/varfont/regular_ital_coord',
'com.google.fonts/check/varfont/regular_opsz_coord',
'com.google.fonts/check/varfont/bold_wght_coord',
'com.google.fonts/check/varfont/slnt_range',
'com.google.fonts/check/varfont/wght_valid_range',
'com.google.fonts/check/varfont/wdth_valid_range',
'com.google.fonts/check/varfont/stat_axis_record_for_each_axis',
'com.google.fonts/check/loca/maxp_num_glyphs',
'com.adobe.fonts/check/cff2_call_depth',
'com.adobe.fonts/check/cff_call_depth',
'com.adobe.fonts/check/cff_deprecated_operators',
'com.google.fonts/check/font_version',
'com.google.fonts/check/post_table_version',
'com.google.fonts/check/monospace',
'com.google.fonts/check/xavgcharwidth',
'com.adobe.fonts/check/fsselection_matches_macstyle',
'com.google.fonts/check/linegaps',
'com.google.fonts/check/unitsperem',
'com.google.fonts/check/dsig',
'com.google.fonts/check/whitespace_widths',
'com.google.fonts/check/gdef_spacing_marks',
'com.google.fonts/check/gdef_mark_chars',
'com.google.fonts/check/gdef_non_mark_chars',
'com.google.fonts/check/gpos_kerning_info',
'com.google.fonts/check/kern_table',
'com.google.fonts/check/glyf_unused_data',
'com.google.fonts/check/family_naming_recommendations',
'com.google.fonts/check/maxadvancewidth',
'com.google.fonts/check/points_out_of_bounds',
'com.google.fonts/check/glyf_non_transformed_duplicate_components',
'com.google.fonts/check/all_glyphs_have_codepoints',
'com.google.fonts/check/code_pages',
]
profile.auto_register(globals())
profile.test_expected_checks(OPENTYPE_PROFILE_CHECKS, exclusive=True)
| {
"repo_name": "graphicore/fontbakery",
"path": "Lib/fontbakery/profiles/opentype.py",
"copies": "1",
"size": "3205",
"license": "apache-2.0",
"hash": -567360232447593300,
"line_mean": 39.0625,
"line_max": 90,
"alpha_frac": 0.6920436817,
"autogenerated": false,
"ratio": 3.012218045112782,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.42042617268127813,
"avg_score": null,
"num_lines": null
} |
from fontbakery.message import Message
from fontbakery.callable import check
from fontbakery.checkrunner import FAIL, PASS, WARN
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
import fontTools.ttLib
@check(
id = 'com.google.fonts/check/glyf_unused_data',
conditions = ['is_ttf']
)
def com_google_fonts_check_glyf_unused_data(ttFont):
"""Is there any unused data at the end of the glyf table?"""
try:
expected_glyphs = len(ttFont.getGlyphOrder())
actual_glyphs = len(ttFont['glyf'].glyphs)
diff = actual_glyphs - expected_glyphs
if diff < 0:
yield FAIL,\
Message("unreachable-data",
f"Glyf table has unreachable data at the end of the table."
f" Expected glyf table length {expected_glyphs} (from loca"
f" table), got length {actual_glyphs}"
f" (difference: {diff})")
elif not diff: # negative diff -> exception below
yield PASS, "There is no unused data at the end of the glyf table."
else:
raise Exception("Bug: fontTools did not raise an expected exception.")
except fontTools.ttLib.TTLibError as error:
if "not enough 'glyf' table data" in format(error):
yield FAIL,\
Message("missing-data",
f"Loca table references data beyond"
f" the end of the glyf table."
f" Expected glyf table length {expected_glyphs}"
f" (from loca table).")
else:
raise Exception("Bug: Unexpected fontTools exception.")
# This check was originally ported from
# Mekkablue Preflight Checks available at:
# https://github.com/mekkablue/Glyphs-Scripts/blob/master/Test/Preflight%20Font.py
@check(
id = 'com.google.fonts/check/points_out_of_bounds',
conditions = ['is_ttf'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/735'
}
)
def com_google_fonts_check_points_out_of_bounds(ttFont):
"""Check for points out of bounds."""
from fontbakery.utils import pretty_print_list
passed = True
out_of_bounds = []
for glyphName in ttFont['glyf'].keys():
glyph = ttFont['glyf'][glyphName]
coords = glyph.getCoordinates(ttFont['glyf'])[0]
for x, y in coords:
if round(x) < glyph.xMin or round(x) > glyph.xMax or \
round(y) < glyph.yMin or round(y) > glyph.yMax or \
abs(x) > 32766 or abs(y) > 32766:
passed = False
out_of_bounds.append((glyphName, x, y))
if not passed:
formatted_list = "\t* " + pretty_print_list(out_of_bounds,
shorten=10,
sep="\n\t* ")
yield WARN,\
Message("points-out-of-bounds",
f"The following glyphs have coordinates"
f" which are out of bounds:\n"
f"{formatted_list}\n"
f"\n"
f"This happens a lot when points are not extremes,"
f" which is usually bad. However, fixing this alert"
f" by adding points on extremes may do more harm"
f" than good, especially with italics,"
f" calligraphic-script, handwriting, rounded and"
f" other fonts. So it is common to ignore this message.")
else:
yield PASS, "All glyph paths have coordinates within bounds!"
@check(
id = 'com.google.fonts/check/glyf_non_transformed_duplicate_components',
rationale = """
There have been cases in which fonts had faulty double quote marks, with each of them containing two single quote marks as components with the same x, y coordinates which makes them visually look like single quote marks.
This check ensures that glyphs do not contain duplicate components which have the same x,y coordinates.
""",
conditions = ['is_ttf']
)
def com_google_fonts_check_glyf_non_transformed_duplicate_components(ttFont):
"""Check glyphs do not have duplicate components which have the same x,y coordinates."""
from fontbakery.utils import pretty_print_list
failed = []
for glyph_name in ttFont['glyf'].keys():
glyph = ttFont['glyf'][glyph_name]
if not glyph.isComposite():
continue
seen = []
for comp in glyph.components:
comp_info = {
"glyph": glyph_name,
"component": comp.glyphName,
"x": comp.x,
"y": comp.y
}
if comp_info in seen:
failed.append(comp_info)
else:
seen.append(comp_info)
if failed:
formatted_list = "\t* " + pretty_print_list(failed,
shorten=10,
sep="\n\t* ")
yield FAIL, \
Message('found-duplicates',
f"The following glyphs have duplicate components which"
f" have the same x,y coordinates:\n"
f"{formatted_list}")
else:
yield PASS, ("Glyphs do not contain duplicate components which have"
" the same x,y coordinates.")
@check(
id = 'com.google.fonts/check/glyf_nested_components',
rationale = """
There have been bugs rendering variable fonts with nested components. Additionally, some static fonts with nested components have been reported to have rendering and printing issues.
For more info, see:
* https://github.com/googlefonts/fontbakery/issues/2961
* https://github.com/arrowtype/recursive/issues/412
""",
conditions = ['is_ttf'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2961'
}
)
def com_google_fonts_check_glyf_nested_components(ttFont):
"""Check glyphs do not have components which are themselves components."""
from fontbakery.utils import pretty_print_list
failed = []
for glyph_name in ttFont['glyf'].keys():
glyph = ttFont['glyf'][glyph_name]
if not glyph.isComposite():
continue
for comp in glyph.components:
if ttFont['glyf'][comp.glyphName].isComposite():
failed.append(glyph_name)
if failed:
formatted_list = "\t* " + pretty_print_list(failed,
shorten=10,
sep="\n\t* ")
yield FAIL, \
Message('found-nested-components',
f"The following glyphs have components which"
f" themselves are component glyphs:\n"
f"{formatted_list}")
else:
yield PASS, ("Glyphs do not contain nested components.")
| {
"repo_name": "moyogo/fontbakery",
"path": "Lib/fontbakery/profiles/glyf.py",
"copies": "1",
"size": "7203",
"license": "apache-2.0",
"hash": 6122703778527910000,
"line_mean": 42.3915662651,
"line_max": 228,
"alpha_frac": 0.5658753297,
"autogenerated": false,
"ratio": 4.305439330543933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007162015525276199,
"num_lines": 166
} |
from fontbakery.message import Message
from fontbakery.callable import check
from fontbakery.status import FAIL, PASS, WARN
# used to inform get_module_profile whether and how to create a profile
from fontbakery.fonts_profile import profile_factory # NOQA pylint: disable=unused-import
import fontTools.ttLib
@check(
id = 'com.google.fonts/check/glyf_unused_data',
conditions = ['is_ttf']
)
def com_google_fonts_check_glyf_unused_data(ttFont):
"""Is there any unused data at the end of the glyf table?"""
try:
expected_glyphs = len(ttFont.getGlyphOrder())
actual_glyphs = len(ttFont['glyf'].glyphs)
diff = actual_glyphs - expected_glyphs
if diff < 0:
yield FAIL,\
Message("unreachable-data",
f"Glyf table has unreachable data at the end of the table."
f" Expected glyf table length {expected_glyphs} (from loca"
f" table), got length {actual_glyphs}"
f" (difference: {diff})")
elif not diff: # negative diff -> exception below
yield PASS, "There is no unused data at the end of the glyf table."
else:
raise Exception("Bug: fontTools did not raise an expected exception.")
except fontTools.ttLib.TTLibError as error:
if "not enough 'glyf' table data" in format(error):
yield FAIL,\
Message("missing-data",
f"Loca table references data beyond"
f" the end of the glyf table."
f" Expected glyf table length {expected_glyphs}"
f" (from loca table).")
else:
raise Exception("Bug: Unexpected fontTools exception.")
# This check was originally ported from
# Mekkablue Preflight Checks available at:
# https://github.com/mekkablue/Glyphs-Scripts/blob/master/Test/Preflight%20Font.py
@check(
id = 'com.google.fonts/check/points_out_of_bounds',
conditions = ['is_ttf'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/735'
}
)
def com_google_fonts_check_points_out_of_bounds(ttFont):
"""Check for points out of bounds."""
from fontbakery.utils import pretty_print_list
passed = True
out_of_bounds = []
for glyphName in ttFont['glyf'].keys():
glyph = ttFont['glyf'][glyphName]
coords = glyph.getCoordinates(ttFont['glyf'])[0]
for x, y in coords:
if round(x) < glyph.xMin or round(x) > glyph.xMax or \
round(y) < glyph.yMin or round(y) > glyph.yMax or \
abs(x) > 32766 or abs(y) > 32766:
passed = False
out_of_bounds.append((glyphName, x, y))
if not passed:
formatted_list = "\t* " + pretty_print_list(out_of_bounds,
shorten=10,
sep="\n\t* ")
yield WARN,\
Message("points-out-of-bounds",
f"The following glyphs have coordinates"
f" which are out of bounds:\n"
f"{formatted_list}\n"
f"\n"
f"This happens a lot when points are not extremes,"
f" which is usually bad. However, fixing this alert"
f" by adding points on extremes may do more harm"
f" than good, especially with italics,"
f" calligraphic-script, handwriting, rounded and"
f" other fonts. So it is common to ignore this message.")
else:
yield PASS, "All glyph paths have coordinates within bounds!"
@check(
id = 'com.google.fonts/check/glyf_non_transformed_duplicate_components',
rationale = """
There have been cases in which fonts had faulty double quote marks, with each of them containing two single quote marks as components with the same x, y coordinates which makes them visually look like single quote marks.
This check ensures that glyphs do not contain duplicate components which have the same x,y coordinates.
""",
conditions = ['is_ttf']
)
def com_google_fonts_check_glyf_non_transformed_duplicate_components(ttFont):
"""Check glyphs do not have duplicate components which have the same x,y coordinates."""
from fontbakery.utils import pretty_print_list
failed = []
for glyph_name in ttFont['glyf'].keys():
glyph = ttFont['glyf'][glyph_name]
if not glyph.isComposite():
continue
seen = []
for comp in glyph.components:
comp_info = {
"glyph": glyph_name,
"component": comp.glyphName,
"x": comp.x,
"y": comp.y
}
if comp_info in seen:
failed.append(comp_info)
else:
seen.append(comp_info)
if failed:
formatted_list = "\t* " + pretty_print_list(failed,
shorten=10,
sep="\n\t* ")
yield FAIL, \
Message('found-duplicates',
f"The following glyphs have duplicate components which"
f" have the same x,y coordinates:\n"
f"{formatted_list}")
else:
yield PASS, ("Glyphs do not contain duplicate components which have"
" the same x,y coordinates.")
@check(
id = 'com.google.fonts/check/glyf_nested_components',
rationale = """
There have been bugs rendering variable fonts with nested components. Additionally, some static fonts with nested components have been reported to have rendering and printing issues.
For more info, see:
* https://github.com/googlefonts/fontbakery/issues/2961
* https://github.com/arrowtype/recursive/issues/412
""",
conditions = ['is_ttf'],
misc_metadata = {
'request': 'https://github.com/googlefonts/fontbakery/issues/2961'
}
)
def com_google_fonts_check_glyf_nested_components(ttFont):
"""Check glyphs do not have components which are themselves components."""
from fontbakery.utils import pretty_print_list
failed = []
for glyph_name in ttFont['glyf'].keys():
glyph = ttFont['glyf'][glyph_name]
if not glyph.isComposite():
continue
for comp in glyph.components:
if ttFont['glyf'][comp.glyphName].isComposite():
failed.append(glyph_name)
if failed:
formatted_list = "\t* " + pretty_print_list(failed,
shorten=10,
sep="\n\t* ")
yield FAIL, \
Message('found-nested-components',
f"The following glyphs have components which"
f" themselves are component glyphs:\n"
f"{formatted_list}")
else:
yield PASS, ("Glyphs do not contain nested components.")
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/glyf.py",
"copies": "1",
"size": "7200",
"license": "apache-2.0",
"hash": 7835126607458483000,
"line_mean": 41.8571428571,
"line_max": 228,
"alpha_frac": 0.5654166667,
"autogenerated": false,
"ratio": 4.30622009569378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.537163676239378,
"avg_score": null,
"num_lines": null
} |
from fontbakery.profiles.universal import UNIVERSAL_PROFILE_CHECKS
from fontbakery.section import Section
from fontbakery.status import WARN, PASS #, INFO, ERROR, SKIP, FAIL
from fontbakery.callable import check #, disable
from fontbakery.message import Message
from fontbakery.fonts_profile import profile_factory
from fontbakery.constants import (PlatformID,
WindowsEncodingID,
UnicodeEncodingID,
MacintoshEncodingID)
from .googlefonts_conditions import * # pylint: disable=wildcard-import,unused-wildcard-import
profile_imports = ('fontbakery.profiles.universal',) # Maybe this should be .googlefonts instead...
profile = profile_factory(default_section=Section("Noto Fonts"))
CMAP_TABLE_CHECKS = [
'com.google.fonts/check/cmap/unexpected_subtables',
]
OS2_TABLE_CHECKS = [
'com.google.fonts/check/unicode_range_bits',
]
# Maybe this should be GOOGLEFONTS_PROFILE_CHECKS instead...
NOTOFONTS_PROFILE_CHECKS = \
UNIVERSAL_PROFILE_CHECKS + \
CMAP_TABLE_CHECKS + \
OS2_TABLE_CHECKS
@check(
id = 'com.google.fonts/check/cmap/unexpected_subtables',
rationale = """
There are just a few typical types of cmap subtables that are used in fonts.
If anything different is declared in a font, it will be treated as a FAIL.
"""
)
def com_google_fonts_check_cmap_unexpected_subtables(ttFont):
"""Ensure all cmap subtables are the typical types expected in a font."""
from fontbakery.profiles.shared_conditions import is_cjk_font
passed = True
# Note:
# Format 0 = Byte encoding table
# Format 4 = Segment mapping to delta values
# Format 6 = Trimmed table mapping
# Format 12 = Segmented coverage
# Format 14 = Unicode Variation Sequences
EXPECTED_SUBTABLES = [
( 0, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN), # 13.7% of GFonts TTFs (389 files)
#( 4, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN), # only the Sansation family has this on GFonts
( 6, PlatformID.MACINTOSH, MacintoshEncodingID.ROMAN), # 38.1% of GFonts TTFs (1.082 files)
#( 4, PlatformID.UNICODE, UnicodeEncodingID.UNICODE_1_0), # only the Gentium family has this on GFonts
#(12, PlatformID.UNICODE, 10), # INVALID? - only the Overpass family and SawarabiGothic-Regular has this on GFonts
# -----------------------------------------------------------------------
( 4, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP), # Absolutely all GFonts TTFs have this table :-)
(12, PlatformID.WINDOWS, WindowsEncodingID.UNICODE_FULL_REPERTOIRE), # 5.7% of GFonts TTFs (162 files)
(14, PlatformID.UNICODE, UnicodeEncodingID.UNICODE_VARIATION_SEQUENCES), # 1.1% - Only 4 families (30 TTFs),
# including SourceCodePro, have this on GFonts
( 4, PlatformID.UNICODE, UnicodeEncodingID.UNICODE_2_0_BMP_ONLY), # 97.0% of GFonts TTFs (only 84 files lack it)
(12, PlatformID.UNICODE, UnicodeEncodingID.UNICODE_2_0_FULL) # 2.9% of GFonts TTFs (82 files)
]
if is_cjk_font(ttFont):
EXPECTED_SUBTABLES.extend([
# Adobe says historically some programs used these to identify
# the script in the font. The encodingID is the quickdraw
# script manager code. These are dummy tables.
(6, PlatformID.MACINTOSH, MacintoshEncodingID.JAPANESE),
(6, PlatformID.MACINTOSH, MacintoshEncodingID.CHINESE_TRADITIONAL),
(6, PlatformID.MACINTOSH, MacintoshEncodingID.KOREAN),
(6, PlatformID.MACINTOSH, MacintoshEncodingID.CHINESE_SIMPLIFIED)
])
for subtable in ttFont['cmap'].tables:
if (subtable.format,
subtable.platformID,
subtable.platEncID) not in EXPECTED_SUBTABLES:
passed = False
yield WARN,\
Message("unexpected-subtable",
f"'cmap' has a subtable of"
f" (format={subtable.format}, platform={subtable.platformID},"
f" encoding={subtable.platEncID}), which it shouldn't have.")
if passed:
yield PASS, "All cmap subtables look good!"
@check(
id = 'com.google.fonts/check/unicode_range_bits',
rationale = """
When the UnicodeRange bits on the OS/2 table are not properly set, some programs running on Windows may not recognize the font and use a system fallback font instead. For that reason, this check calculates the proper settings by inspecting the glyphs declared on the cmap table and then ensures that their corresponding ranges are enabled.
""",
conditions = ["unicoderange",
"preferred_cmap"]
)
def com_google_fonts_check_unicode_range_bits(ttFont, unicoderange, preferred_cmap):
"""Ensure UnicodeRange bits are properly set."""
from fontbakery.constants import UNICODERANGE_DATA
from fontbakery.utils import (compute_unicoderange_bits,
unicoderange_bit_name,
chars_in_range)
expected_unicoderange = compute_unicoderange_bits(ttFont)
difference = unicoderange ^ expected_unicoderange
if not difference:
yield PASS, "Looks good!"
else:
for bit in range(128):
if difference & (1 << bit):
range_name = unicoderange_bit_name(bit)
num_chars = len(chars_in_range(ttFont, bit))
range_size = sum(entry[3] - entry[2] + 1 for entry in UNICODERANGE_DATA[bit])
set_unset = "1"
if num_chars == 0:
set_unset = "0"
num_chars = "none"
yield WARN, \
Message("bad-range-bit",
f'UnicodeRange bit {bit} "{range_name}" should be {set_unset} because'
f' cmap has {num_chars} of the {range_size} codepoints in this range.')
profile.auto_register(globals())
profile.test_expected_checks(NOTOFONTS_PROFILE_CHECKS, exclusive=True)
| {
"repo_name": "googlefonts/fontbakery",
"path": "Lib/fontbakery/profiles/notofonts.py",
"copies": "1",
"size": "6257",
"license": "apache-2.0",
"hash": 7293592882590449000,
"line_mean": 50.2868852459,
"line_max": 347,
"alpha_frac": 0.6260188589,
"autogenerated": false,
"ratio": 3.8222357971899816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49482546560899815,
"avg_score": null,
"num_lines": null
} |
from font import font
class zschemaname( font ):
"""
Displays a header name for a Z Schema. It may contain text, images,
equations, etc... but the width of it should be kept to a minimum so
it isn't wider than the containing Z Schema box. See
<a href="zschema.html"><zschema></a> for proper usage.
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the for constructor.
"""
apply( font.__init__, (self,) + args )
self.setColorDefined( self.hasProperty("color") )
def render( self, app, x, y ):
"""
-app, SlideApplication object
-x, x coordinate to start drawing at
-y, y coordinate to start drawing at
Returns x, y coordinates where the rendering left off.
"""
#
# Don't draw anything it this isn't a direct child of a
# <zschema> tag in the XML document.
#
from zschema import zschema
if not isinstance(self.getContainer(), zschema):
return x, y
if not self.colorDefined():
borderQColor = self.getContainer().getBorderColor()
self.setProperty( "color", str(borderQColor.name()) )
container = self.getContainer()
self.setProperty( "marginleft",
container.getProperty("marginleft") + \
container.getProperty("cellspacing") + \
16 )
self.setProperty( "marginright",
app.getWidth() - \
(x + container.getWidth()) + \
self.getProperty("cellpadding") + \
16)
x = self.getProperty( "marginleft" )
return font.render( self, app, x, y )
def move( self, x, y ):
"""
"""
x = x + 16
font.move( self, x, y )
def setColorDefined( self, colorDefined ):
"""
"""
self.__colorDefined = colorDefined
def colorDefined( self ):
"""
"""
try:
return self.__colorDefined
except AttributeError:
self.__colorDefined = false
return false
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
return font.getHtml( self )
| {
"repo_name": "derekmd/opentag-presenter",
"path": "tags/zschemaname.py",
"copies": "1",
"size": "2160",
"license": "bsd-2-clause",
"hash": -2531222877805202400,
"line_mean": 22.2258064516,
"line_max": 69,
"alpha_frac": 0.6106481481,
"autogenerated": false,
"ratio": 3.4838709677419355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9140438706274296,
"avg_score": 0.09081608191352768,
"num_lines": 93
} |
from font import Font
current_black_print = 0
current_white_print = 0
def sentence_within_word(sentence):
return lambda arg: sentence[current_black_print % len(sentence)]
def sentence_within_char(sentence, word, font):
word_map = [font.character_map[c] for c in word]
replace_word_map = []
sentence_index = 0
for c in word_map:
replace_c_map = []
for row in c:
replace_row_map = []
for pixel in row:
if pixel:
replace_row_map.append(sentence[sentence_index % len(sentence)])
sentence_index += 1
else:
replace_row_map.append(' ')
replace_c_map.append(replace_row_map)
replace_word_map.append(replace_c_map)
def internal_function(arg):
row = arg['row']
x = arg['x']
char_index = arg['char_index']
return replace_word_map[char_index][row][x]
return internal_function
def default_black_func(arg):
return arg['word'][arg['char_index']]
def default_white_func(arg):
return ' '
def print_word(word, font, black_func=default_black_func, white_func=default_white_func):
global current_black_print
current_black_print = 0
global current_white_print
current_white_print = 0
# Assemble the text row by row and print it
for row in range(font.char_height):
s = ''
for char_index in range(len(word)):
s += single_row_char(word, char_index, font, row, black_func=black_func, white_func=white_func)
print(s.rstrip(' '))
def single_row_char(word, char_index, font, row, black_func=default_black_func, white_func=default_white_func):
arg = locals()
# Writes each character out of its own character if no black specified
char = word[char_index]
c_map = font.character_map[char]
# Use spacing or the character data
if char == ' ':
return white_func(arg) * (font.word_spacing - font.letter_spacing)
else:
s = ''
for x in range(font.character_widths[char]):
arg['x'] = x
if c_map[row][x]:
s += black_func(arg)
global current_black_print
current_black_print += 1
else:
s += white_func(arg)
global current_white_print
current_white_print += 1
return s + white_func(arg) * font.letter_spacing
if __name__ == '__main__':
default_font = Font("letters5x5.png", monospaced=False)
in_text = input("Input text: ")
# Example displays:
print("\n-- Default display --")
print_word(in_text, default_font)
print("\n-- Sentence within characters --")
print_word(in_text, default_font, black_func=sentence_within_char('SENTENCEWITHINCHARACTERS', in_text, default_font))
print("\n-- Sentence within the entire word(s) --")
print_word(in_text, default_font, black_func=sentence_within_word('SENTENCEWITHINWORDS'))
print("\n-- Negative --")
print_word(in_text, default_font, black_func=lambda arg: ' ', white_func=lambda arg: 'X') | {
"repo_name": "jacobcheatley/AsciiPrinter",
"path": "advanced_character_printer.py",
"copies": "1",
"size": "3119",
"license": "mit",
"hash": -121393335203966260,
"line_mean": 33.6666666667,
"line_max": 121,
"alpha_frac": 0.6014748317,
"autogenerated": false,
"ratio": 3.597462514417532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9692357286103852,
"avg_score": 0.0013160120027359292,
"num_lines": 90
} |
from font import font
true = success = 1
false = failure = 0
class pre( font ):
"""
<pre> is used to preformat text, such as source code.
Automatic text wrapping is disabled, and \n in the source will
translate to a line break.
<p>
<b>Properties:</b>
<br>
See <a href="font.html"><font></a> for properties.
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the font constructor.
"""
apply( font.__init__, (self,) + args )
self.setKeepWhitespace( true )
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
htmlList = [ "<pre>" ]
#
# Ensure the HTML contents in this tag have the correct
# whitespace required for <pre>
#
buffer = ""
for line in font.getHtml(self):
if line == "\n":
htmlList = htmlList + [ buffer ]
buffer = ""
else:
buffer = "%s%s" % ( buffer, line )
if len(buffer):
htmlList = htmlList + [ buffer ]
htmlList = htmlList + [ "</pre>" ]
return htmlList
| {
"repo_name": "derekmd/opentag-presenter",
"path": "tags/pre.py",
"copies": "1",
"size": "1120",
"license": "bsd-2-clause",
"hash": -3286338519338541000,
"line_mean": 19.7407407407,
"line_max": 63,
"alpha_frac": 0.6232142857,
"autogenerated": false,
"ratio": 3.0853994490358128,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42086137347358127,
"avg_score": null,
"num_lines": null
} |
from font import font
true = success = 1
false = failure = 0
class subscript( font ):
"""
<subscript> makes text aligned to the bottom of the current line.
<br>
<br>
For example:
<pre>
A<subscript>3,1</subscript>
</pre>
... would produce A<sub>3,1</sub>
<p>
<b>Properties:</b>
<br>
See <a href="font.html"><font></a> for properties.
"""
def __init__( self, *args ):
"""
"""
apply( font.__init__, (self,) + args )
def renderWord( self, canvasWord, app, x, y, wordWrap=true ):
"""
Renders a string within a certain pixel width, justifying
words as necessary to fit on the line as best as possible.
-canvasWord, QCanvasText object
-app, SlideApplication object
-x, x pixel to try to draw the QCanvasText object at
-y, y pixel to try to draw the QCanvasText object at
-wordWrap, boolean value, set to false to turn automatic word
wrap off (which is the purpose of the <pre> tag)
Returns the topright x, y coordinates of the now drawn
QCanvasText.
"""
#
# Ensure the proper sized font is displayed.
#
try:
qFont = self.getFont( app.getHeight() )
qFont.setPixelSize( self.getFontHeight(app.getHeight()) / 1.75 )
canvasWord.setFont( qFont )
except AttributeError:
pass
#
# Set the greatest height font for the current line.
#
height = canvasWord.boundingRect().height()
if height > app.getSlide().getFontHeight():
app.getSlide().setFontHeight( height )
self.alignPrevious(y)
#
# If the current word can't fit on the end of this line
# without going off the page or intersecting the right
# margin, go to the start of the next line to render the
# word.
#
if wordWrap:
width = canvasWord.boundingRect().width()
if self.beyondMargin(x, width):
x, y = self.lineBreak( app, y )
app.getSlide().setFontHeight( height )
#
# Display the word on the current line.
#
width = canvasWord.boundingRect().width()
alignY = y + \
self.getSlide().getFontHeight() - \
height + \
4
canvasWord.move( x, alignY )
canvasWord.show()
x = x + width
return x, y
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
htmlList = [ "<sub>" ]
htmlList = htmlList + font.getHtml( self )
htmlList = htmlList + [ "</sub>" ]
return htmlList
| {
"repo_name": "derekmd/opentag-presenter",
"path": "tags/subscript.py",
"copies": "1",
"size": "2489",
"license": "bsd-2-clause",
"hash": -6894447438412045000,
"line_mean": 23.1650485437,
"line_max": 72,
"alpha_frac": 0.6347930896,
"autogenerated": false,
"ratio": 3.1347607052896724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.886647406571934,
"avg_score": 0.08061594583406645,
"num_lines": 103
} |
from font import font
true = success = 1
false = failure = 0
class superscript( font ):
"""
<superscript> makes text aligned to the top of the current line.
It is most often used for exponents.<br>
<br>
For example:
<pre>
8
<superscript>
<u>3</u>
</superscript>
= 512
</pre>
... would produce 8<sup><u>3</u></sup> = 512
<p>
<b>Properties:</b>
<br>
See <a href="font.html"><font></a> for properties.
"""
def __init__( self, *args ):
"""
"""
apply( font.__init__, (self,) + args )
def renderWord( self, canvasWord, app, x, y, wordWrap=true ):
"""
Renders a string within a certain pixel width, justifying
words as necessary to fit on the line as best as possible.
-canvasWord, QCanvasText object
-app, SlideApplication object
-x, x pixel to try to draw the QCanvasText object at
-y, y pixel to try to draw the QCanvasText object at
-wordWrap, boolean value, set to false to turn automatic word
wrap off (which is the purpose of the <pre> tag)
Returns the topright x, y coordinates of the now drawn
QCanvasText.
"""
#
# Ensure the proper sized font is displayed.
#
try:
qFont = self.getFont( app.getHeight() )
qFont.setPixelSize( self.getFontHeight(app.getHeight()) / 1.75 )
canvasWord.setFont( qFont )
except AttributeError:
pass
#
# Set the greatest height font for the current line.
#
height = canvasWord.boundingRect().height()
if height > app.getSlide().getFontHeight():
app.getSlide().setFontHeight( height )
self.alignPrevious(y)
#
# If the current word can't fit on the end of this line
# without going off the page or intersecting the right
# margin, go to the start of the next line to render the
# word.
#
if wordWrap:
width = canvasWord.boundingRect().width()
if self.beyondMargin(x, width):
x, y = self.lineBreak( app, y )
app.getSlide().setFontHeight( height )
#
# Display the word on the current line.
#
width = canvasWord.boundingRect().width()
canvasWord.move( x, y-4 )
x = x + width
canvasWord.show()
return x, y
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
htmlList = [ "<sup>" ]
htmlList = htmlList + font.getHtml( self )
htmlList = htmlList + [ "</sup>" ]
return htmlList
| {
"repo_name": "derekmd/opentag-presenter",
"path": "tags/superscript.py",
"copies": "1",
"size": "2424",
"license": "bsd-2-clause",
"hash": -4118162794070049300,
"line_mean": 22.5339805825,
"line_max": 71,
"alpha_frac": 0.6547029703,
"autogenerated": false,
"ratio": 3.0452261306532664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41999291009532663,
"avg_score": null,
"num_lines": null
} |
from .font import getFont
from reportlab.lib.rl_accel import escapePDF
from reportlab.pdfbase import pdfmetrics
class PDFText:
def __init__(self, page, x=0, y=0):
self.page = page
self.write("BT\n")
self.set_position(x, y)
self.font_name = None
self.font_size = None
self.font_leading = None
self.font_subset = None
def write(self, code):
self.page.write(code)
def move_position(self, dx=0, dy=0):
self.write("{} {} Td\n".format(dx, dy))
def set_position(self, x=0, y=0):
self.write("1 0 0 1 {} {} Tm\n".format(x, y))
def set_font(self, font_name, size, leading):
self.font_name = font_name
self.font_size = size
self.font_leading = leading
self.font_subset = None
def draw(self, txt, new_line=False):
doc = self.page.document
font = getFont(self.font_name)
if font._dynamicFont:
for subset, t in font.splitString(txt, doc):
if self.font_subset != subset:
name = font.getSubsetInternalName(subset, doc)
if name not in doc.font_references:
doc.font_references[name] = doc.ref()
if name not in self.page.font:
self.page.font[name] = doc.font_references[name]
self.write("{} {} Tf {} TL\n".format(
name, self.font_size, self.font_leading
))
self.write("({}) Tj ".format(escapePDF(t)))
elif font._multiByte:
name = doc.fontMapping.get(font.fontName)
if name is None:
name = doc.fontMapping[font.fontName] = '/F{}'.format(len(doc.fontMapping)+1)
doc.delayedFonts.append(font)
if name not in doc.font_references:
doc.font_references[name] = doc.ref()
if name not in self.page.font:
self.page.font[name] = doc.font_references[name]
self.write("/{} {} Tf {} TL\n".format(name, self.font_size, self.font_leading))
self.write("(%s) Tj ".format(font.formatForPdf(txt)))
else:
_font = None
for f, t in pdfmetrics.unicode2T1(txt, [font]+font.substitutionFonts):
if f != _font:
name = doc.fontMapping.get(f.fontName)
if name is None:
name = doc.fontMapping[f.fontName] = '/F{}'.format(len(doc.fontMapping)+1)
doc.delayedFonts.append(f)
if name not in doc.font_references:
doc.font_references[name] = doc.ref()
if name not in self.page.font:
self.page.font[name] = doc.font_references[name]
self.write("{} {} Tf {} TL\n".format(name, self.font_size, self.font_leading))
_font = f
self.write("({}) Tj ".format(escapePDF(t)))
if new_line:
self.write('T*\n')
def close(self):
self.write("ET\n")
| {
"repo_name": "systori/bericht",
"path": "bericht/pdf/text.py",
"copies": "1",
"size": "3119",
"license": "bsd-3-clause",
"hash": -2788084669134624300,
"line_mean": 38.9871794872,
"line_max": 98,
"alpha_frac": 0.5190766271,
"autogenerated": false,
"ratio": 3.7175208581644816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9733151454747633,
"avg_score": 0.0006892061033696958,
"num_lines": 78
} |
from fontMath.mathFunctions import factorAngle, _roundNumber
__all__ = [
"_expandGuideline",
"_compressGuideline",
"_pairGuidelines",
"_processMathOneGuidelines",
"_processMathTwoGuidelines",
"_roundGuidelines"
]
def _expandGuideline(guideline):
guideline = dict(guideline)
x = guideline.get("x")
y = guideline.get("y")
# horizontal
if x is None:
guideline["x"] = 0
guideline["angle"] = 0
# vertical
elif y is None:
guideline["y"] = 0
guideline["angle"] = 90
return guideline
def _compressGuideline(guideline):
guideline = dict(guideline)
x = guideline["x"]
y = guideline["y"]
angle = guideline["angle"]
# horizontal
if x == 0 and angle in (0, 180):
guideline["x"] = None
guideline["angle"] = None
# vertical
elif y == 0 and angle in (90, 270):
guideline["y"] = None
guideline["angle"] = None
return guideline
def _pairGuidelines(guidelines1, guidelines2):
guidelines1 = list(guidelines1)
guidelines2 = list(guidelines2)
pairs = []
# name + identifier + (x, y, angle)
_findPair(guidelines1, guidelines2, pairs, ("name", "identifier", "x", "y", "angle"))
# name + identifier matches
_findPair(guidelines1, guidelines2, pairs, ("name", "identifier"))
# name + (x, y, angle)
_findPair(guidelines1, guidelines2, pairs, ("name", "x", "y", "angle"))
# identifier + (x, y, angle)
_findPair(guidelines1, guidelines2, pairs, ("identifier", "x", "y", "angle"))
# name matches
if guidelines1 and guidelines2:
_findPair(guidelines1, guidelines2, pairs, ("name",))
# identifier matches
if guidelines1 and guidelines2:
_findPair(guidelines1, guidelines2, pairs, ("identifier",))
# done
return pairs
def _findPair(guidelines1, guidelines2, pairs, attrs):
removeFromGuidelines1 = []
for guideline1 in guidelines1:
match = None
for guideline2 in guidelines2:
attrMatch = False not in [guideline1.get(attr) == guideline2.get(attr) for attr in attrs]
if attrMatch:
match = guideline2
break
if match is not None:
guideline2 = match
removeFromGuidelines1.append(guideline1)
guidelines2.remove(guideline2)
pairs.append((guideline1, guideline2))
def _processMathOneGuidelines(guidelinePairs, ptFunc, func):
result = []
for guideline1, guideline2 in guidelinePairs:
guideline = dict(guideline1)
pt1 = (guideline1["x"], guideline1["y"])
pt2 = (guideline2["x"], guideline2["y"])
guideline["x"], guideline["y"] = ptFunc(pt1, pt2)
angle1 = guideline1["angle"]
angle2 = guideline2["angle"]
guideline["angle"] = func(angle1, angle2) % 360
result.append(guideline)
return result
def _processMathTwoGuidelines(guidelines, factor, func):
result = []
for guideline in guidelines:
guideline = dict(guideline)
guideline["x"] = func(guideline["x"], factor[0])
guideline["y"] = func(guideline["y"], factor[1])
angle = guideline["angle"]
guideline["angle"] = factorAngle(angle, factor, func) % 360
result.append(guideline)
return result
def _roundGuidelines(guidelines, digits=None):
results = []
for guideline in guidelines:
guideline = dict(guideline)
guideline['x'] = _roundNumber(guideline['x'], digits)
guideline['y'] = _roundNumber(guideline['y'], digits)
results.append(guideline)
return results
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
| {
"repo_name": "anthrotype/fontMath",
"path": "Lib/fontMath/mathGuideline.py",
"copies": "1",
"size": "3731",
"license": "mit",
"hash": 1188208854885928000,
"line_mean": 31.4434782609,
"line_max": 101,
"alpha_frac": 0.6161886894,
"autogenerated": false,
"ratio": 3.519811320754717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9626274185698817,
"avg_score": 0.0019451648911800652,
"num_lines": 115
} |
from fontParts.base.base import BaseDict, dynamicProperty, reference
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedGroups, RemovedGroups
class BaseGroups(BaseDict, DeprecatedGroups, RemovedGroups):
"""
A Groups object. This object normally created as part of a
:class:`BaseFont`. An orphan Groups object can be created like this::
>>> groups = RGroups()
This object behaves like a Python dictionary. Most of the dictionary
functionality comes from :class:`BaseDict`, look at that object for the
required environment implementation details.
Groups uses :func:`normalizers.normalizeGroupKey` to normalize the key of
the ``dict``, and :func:`normalizers.normalizeGroupValue` to normalize the
value of the ``dict``.
"""
keyNormalizer = normalizers.normalizeGroupKey
valueNormalizer = normalizers.normalizeGroupValue
def _reprContents(self):
contents = []
if self.font is not None:
contents.append("for font")
contents += self.font._reprContents()
return contents
# -------
# Parents
# -------
# Font
_font = None
font = dynamicProperty("font", "The Groups' parent :class:`BaseFont`.")
def _get_font(self):
if self._font is None:
return None
return self._font()
def _set_font(self, font):
if self._font is not None and self._font != font:
raise AssertionError("font for groups already set and is not same as font")
if font is not None:
font = reference(font)
self._font = font
# ---------
# Searching
# ---------
def findGlyph(self, glyphName):
"""
Returns a ``list`` of the group or groups associated with
**glyphName**.
**glyphName** will be an :ref:`type-string`. If no group is found
to contain **glyphName** an empty ``list`` will be returned. ::
>>> font.groups.findGlyph("A")
["A_accented"]
"""
glyphName = normalizers.normalizeGlyphName(glyphName)
groupNames = self._findGlyph(glyphName)
groupNames = [self.keyNormalizer.__func__(
groupName) for groupName in groupNames]
return groupNames
def _findGlyph(self, glyphName):
"""
This is the environment implementation of
:meth:`BaseGroups.findGlyph`. **glyphName** will be
an :ref:`type-string`.
Subclasses may override this method.
"""
found = []
for key, groupList in self.items():
if glyphName in groupList:
found.append(key)
return found
# --------------
# Kerning Groups
# --------------
side1KerningGroups = dynamicProperty(
"base_side1KerningGroups",
"""
All groups marked as potential side 1
kerning members.
>>> side1Groups = groups.side1KerningGroups
The value will be a :ref:`dict` with
:ref:`string` keys representing group names
and :ref:`tuple` contaning glyph names.
"""
)
def _get_base_side1KerningGroups(self):
kerningGroups = self._get_side1KerningGroups()
normalized = {}
for name, members in kerningGroups.items():
name = normalizers.normalizeGroupKey(name)
members = normalizers.normalizeGroupValue(members)
normalized[name] = members
return normalized
def _get_side1KerningGroups(self):
"""
Subclasses may override this method.
"""
found = {}
for name, contents in self.items():
if name.startswith("public.kern1."):
found[name] = contents
return found
side2KerningGroups = dynamicProperty(
"base_side2KerningGroups",
"""
All groups marked as potential side 1
kerning members.
>>> side2Groups = groups.side2KerningGroups
The value will be a :ref:`dict` with
:ref:`string` keys representing group names
and :ref:`tuple` contaning glyph names.
"""
)
def _get_base_side2KerningGroups(self):
kerningGroups = self._get_side2KerningGroups()
normalized = {}
for name, members in kerningGroups.items():
name = normalizers.normalizeGroupKey(name)
members = normalizers.normalizeGroupValue(members)
normalized[name] = members
return normalized
def _get_side2KerningGroups(self):
"""
Subclasses may override this method.
"""
found = {}
for name, contents in self.items():
if name.startswith("public.kern2."):
found[name] = contents
return found
# ---------------------
# RoboFab Compatibility
# ---------------------
def remove(self, groupName):
"""
Removes a group from the Groups. **groupName** will be
a :ref:`type-string` that is the group name to
be removed.
This is a backwards compatibility method.
"""
del self[groupName]
def asDict(self):
"""
Return the Groups as a ``dict``.
This is a backwards compatibility method.
"""
d = {}
for k, v in self.items():
d[k] = v
return d
# -------------------
# Inherited Functions
# -------------------
def __contains__(self, groupName):
"""
Tests to see if a group name is in the Groups.
**groupName** will be a :ref:`type-string`.
This returns a ``bool`` indicating if the **groupName**
is in the Groups. ::
>>> "myGroup" in font.groups
True
"""
return super(BaseGroups, self).__contains__(groupName)
def __delitem__(self, groupName):
"""
Removes **groupName** from the Groups. **groupName** is a
:ref:`type-string`.::
>>> del font.groups["myGroup"]
"""
super(BaseGroups, self).__delitem__(groupName)
def __getitem__(self, groupName):
"""
Returns the contents of the named group. **groupName** is a
:ref:`type-string`. The returned value will be a
:ref:`type-immutable-list` of the group contents.::
>>> font.groups["myGroup"]
("A", "B", "C")
It is important to understand that any changes to the returned group
contents will not be reflected in the Groups object. If one wants to
make a change to the group contents, one should do the following::
>>> group = font.groups["myGroup"]
>>> group.remove("A")
>>> font.groups["myGroup"] = group
"""
return super(BaseGroups, self).__getitem__(groupName)
def __iter__(self):
"""
Iterates through the Groups, giving the key for each iteration. The
order that the Groups will iterate though is not fixed nor is it
ordered.::
>>> for groupName in font.groups:
>>> print groupName
"myGroup"
"myGroup3"
"myGroup2"
"""
return super(BaseGroups, self).__iter__()
def __len__(self):
"""
Returns the number of groups in Groups as an ``int``.::
>>> len(font.groups)
5
"""
return super(BaseGroups, self).__len__()
def __setitem__(self, groupName, glyphNames):
"""
Sets the **groupName** to the list of **glyphNames**. **groupName**
is the group name as a :ref:`type-string` and **glyphNames** is a
``list`` of glyph names as :ref:`type-string`.
>>> font.groups["myGroup"] = ["A", "B", "C"]
"""
super(BaseGroups, self).__setitem__(groupName, glyphNames)
def clear(self):
"""
Removes all group information from Groups,
resetting the Groups to an empty dictionary. ::
>>> font.groups.clear()
"""
super(BaseGroups, self).clear()
def get(self, groupName, default=None):
"""
Returns the contents of the named group.
**groupName** is a :ref:`type-string`, and the returned values will
either be :ref:`type-immutable-list` of group contents or ``None``
if no group was found. ::
>>> font.groups["myGroup"]
("A", "B", "C")
It is important to understand that any changes to the returned group
contents will not be reflected in the Groups object. If one wants to
make a change to the group contents, one should do the following::
>>> group = font.groups["myGroup"]
>>> group.remove("A")
>>> font.groups["myGroup"] = group
"""
return super(BaseGroups, self).get(groupName, default)
def items(self):
"""
Returns a list of ``tuple`` of each group name and group members.
Group names are :ref:`type-string` and group members are a
:ref:`type-immutable-list` of :ref:`type-string`. The initial
list will be unordered.
>>> font.groups.items()
[("myGroup", ("A", "B", "C"), ("myGroup2", ("D", "E", "F"))]
"""
return super(BaseGroups, self).items()
def keys(self):
"""
Returns a ``list`` of all the group names in Groups. This list will be
unordered.::
>>> font.groups.keys()
["myGroup4", "myGroup1", "myGroup5"]
"""
return super(BaseGroups, self).keys()
def pop(self, groupName, default=None):
"""
Removes the **groupName** from the Groups and returns the list of
group members. If no group is found, **default** is returned.
**groupName** is a :ref:`type-string`. This must return either
**default** or a :ref:`type-immutable-list` of glyph names as
:ref:`type-string`.
>>> font.groups.pop("myGroup")
("A", "B", "C")
"""
return super(BaseGroups, self).pop(groupName, default)
def update(self, otherGroups):
"""
Updates the Groups based on **otherGroups**. *otherGroups** is a
``dict`` of groups information. If a group from **otherGroups** is in
Groups, the group members will be replaced by the group members from
**otherGroups**. If a group from **otherGroups** is not in the Groups,
it is added to the Groups. If Groups contain a group name that is not
in *otherGroups**, it is not changed.
>>> font.groups.update(newGroups)
"""
super(BaseGroups, self).update(otherGroups)
def values(self):
"""
Returns a ``list`` of each named group's members.
This will be a list of lists, the group members will be a
:ref:`type-immutable-list` of :ref:`type-string`. The initial
list will be unordered.
>>> font.groups.items()
[("A", "B", "C"), ("D", "E", "F")]
"""
return super(BaseGroups, self).values()
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/groups.py",
"copies": "1",
"size": "11147",
"license": "mit",
"hash": 7354033429578311000,
"line_mean": 31.0316091954,
"line_max": 87,
"alpha_frac": 0.5652641966,
"autogenerated": false,
"ratio": 4.364526233359436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5429790429959437,
"avg_score": null,
"num_lines": null
} |
from fontParts.base.base import BaseDict, dynamicProperty, reference
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedLib, RemovedLib
class BaseLib(BaseDict, DeprecatedLib, RemovedLib):
"""
A Lib object. This object normally created as part of a
:class:`BaseFont`. An orphan Lib object can be created like this::
>>> lib = RLib()
This object behaves like a Python dictionary. Most of the dictionary
functionality comes from :class:`BaseDict`, look at that object for the
required environment implementation details.
Lib uses :func:`normalizers.normalizeLibKey` to normalize the key of
the ``dict``, and :func:`normalizers.normalizeLibValue` to normalize the
value of the ``dict``.
"""
keyNormalizer = normalizers.normalizeLibKey
valueNormalizer = normalizers.normalizeLibValue
def _reprContents(self):
contents = []
if self.glyph is not None:
contents.append("in glyph")
contents += self.glyph._reprContents()
if self.font:
contents.append("in font")
contents += self.font._reprContents()
return contents
# -------
# Parents
# -------
# Glyph
_glyph = None
glyph = dynamicProperty("glyph", "The lib's parent glyph.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
if self._font is not None:
raise AssertionError("font for lib already set")
if self._glyph is not None and self._glyph() != glyph:
raise AssertionError("glyph for lib already set and is not same as glyph")
if glyph is not None:
glyph = reference(glyph)
self._glyph = glyph
# Font
_font = None
font = dynamicProperty("font", "The lib's parent font.")
def _get_font(self):
if self._font is not None:
return self._font()
elif self._glyph is not None:
return self.glyph.font
return None
def _set_font(self, font):
if self._font is not None and self._font() != font:
raise AssertionError("font for lib already set and is not same as font")
if self._glyph is not None:
raise AssertionError("glyph for lib already set")
if font is not None:
font = reference(font)
self._font = font
# Layer
layer = dynamicProperty("layer", "The lib's parent layer.")
def _get_layer(self):
if self._glyph is None:
return None
return self.glyph.layer
# ---------------------
# RoboFab Compatibility
# ---------------------
def remove(self, key):
"""
Removes a key from the Lib. **key** will be
a :ref:`type-string` that is the key to
be removed.
This is a backwards compatibility method.
"""
del self[key]
def asDict(self):
"""
Return the Lib as a ``dict``.
This is a backwards compatibility method.
"""
d = {}
for k, v in self.items():
d[k] = v
return d
# -------------------
# Inherited Functions
# -------------------
def __contains__(self, key):
"""
Tests to see if a lib name is in the Lib.
**key** will be a :ref:`type-string`.
This returns a ``bool`` indicating if the **key**
is in the Lib. ::
>>> "public.glyphOrder" in font.lib
True
"""
return super(BaseLib, self).__contains__(key)
def __delitem__(self, key):
"""
Removes **key** from the Lib. **key** is a :ref:`type-string`.::
>>> del font.lib["public.glyphOrder"]
"""
super(BaseLib, self).__delitem__(key)
def __getitem__(self, key):
"""
Returns the contents of the named lib. **key** is a
:ref:`type-string`.
The returned value will be a ``list`` of the lib contents.::
>>> font.lib["public.glyphOrder"]
["A", "B", "C"]
It is important to understand that any changes to the returned lib
contents will not be reflected in the Lib object. If one wants to
make a change to the lib contents, one should do the following::
>>> lib = font.lib["public.glyphOrder"]
>>> lib.remove("A")
>>> font.lib["public.glyphOrder"] = lib
"""
return super(BaseLib, self).__getitem__(key)
def __iter__(self):
"""
Iterates through the Lib, giving the key for each iteration. The
order that the Lib will iterate though is not fixed nor is it
ordered.::
>>> for key in font.lib:
>>> print key
"public.glyphOrder"
"org.robofab.scripts.SomeData"
"public.postscriptNames"
"""
return super(BaseLib, self).__iter__()
def __len__(self):
"""
Returns the number of keys in Lib as an ``int``.::
>>> len(font.lib)
5
"""
return super(BaseLib, self).__len__()
def __setitem__(self, key, items):
"""
Sets the **key** to the list of **items**. **key**
is the lib name as a :ref:`type-string` and **items** is a
``list`` of items as :ref:`type-string`.
>>> font.lib["public.glyphOrder"] = ["A", "B", "C"]
"""
super(BaseLib, self).__setitem__(key, items)
def clear(self):
"""
Removes all keys from Lib,
resetting the Lib to an empty dictionary. ::
>>> font.lib.clear()
"""
super(BaseLib, self).clear()
def get(self, key, default=None):
"""
Returns the contents of the named key.
**key** is a :ref:`type-string`, and the returned values will
either be ``list`` of key contents or ``None`` if no key was
found. ::
>>> font.lib["public.glyphOrder"]
["A", "B", "C"]
It is important to understand that any changes to the returned key
contents will not be reflected in the Lib object. If one wants to
make a change to the key contents, one should do the following::
>>> lib = font.lib["public.glyphOrder"]
>>> lib.remove("A")
>>> font.lib["public.glyphOrder"] = lib
"""
return super(BaseLib, self).get(key, default)
def items(self):
"""
Returns a list of ``tuple`` of each key name and key items.
Keys are :ref:`type-string` and key members are a ``list``
of :ref:`type-string`. The initial list will be unordered.
>>> font.lib.items()
[("public.glyphOrder", ["A", "B", "C"]),
("public.postscriptNames", {'be': 'uni0431', 'ze': 'uni0437'})]
"""
return super(BaseLib, self).items()
def keys(self):
"""
Returns a ``list`` of all the key names in Lib. This list will be
unordered.::
>>> font.lib.keys()
["public.glyphOrder", "org.robofab.scripts.SomeData",
"public.postscriptNames"]
"""
return super(BaseLib, self).keys()
def pop(self, key, default=None):
"""
Removes the **key** from the Lib and returns the ``list`` of
key members. If no key is found, **default** is returned.
**key** is a :ref:`type-string`. This must return either
**default** or a ``list`` of items as :ref:`type-string`.
>>> font.lib.pop("public.glyphOrder")
["A", "B", "C"]
"""
return super(BaseLib, self).pop(key, default)
def update(self, otherLib):
"""
Updates the Lib based on **otherLib**. *otherLib** is a
``dict`` of keys. If a key from **otherLib** is in Lib
the key members will be replaced by the key members from
**otherLib**. If a key from **otherLib** is not in the Lib,
it is added to the Lib. If Lib contain a key name that is not
in *otherLib**, it is not changed.
>>> font.lib.update(newLib)
"""
super(BaseLib, self).update(otherLib)
def values(self):
"""
Returns a ``list`` of each named key's members. This will be a list
of lists, the key members will be a ``list`` of :ref:`type-string`.
The initial list will be unordered.
>>> font.lib.items()
[["A", "B", "C"], {'be': 'uni0431', 'ze': 'uni0437'}]
"""
return super(BaseLib, self).values()
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/lib.py",
"copies": "1",
"size": "8671",
"license": "mit",
"hash": -270218474893853600,
"line_mean": 30.190647482,
"line_max": 86,
"alpha_frac": 0.5450351747,
"autogenerated": false,
"ratio": 4.209223300970874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008366531926588546,
"num_lines": 278
} |
from fontParts.base.base import BaseObject, dynamicProperty, reference
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedFeatures, RemovedFeatures
class BaseFeatures(BaseObject, DeprecatedFeatures, RemovedFeatures):
copyAttributes = ("text",)
def _reprContents(self):
contents = []
if self.font is not None:
contents.append("for font")
contents += self.font._reprContents()
return contents
# -------
# Parents
# -------
# Font
_font = None
font = dynamicProperty("font", "The features' parent :class:`BaseFont`.")
def _get_font(self):
if self._font is None:
return None
return self._font()
def _set_font(self, font):
if self._font is not None and self._font() != font:
raise AssertionError("font for features already set and is not same as font")
if font is not None:
font = reference(font)
self._font = font
# ----
# Text
# ----
text = dynamicProperty(
"base_text",
"""
The `.fea formated
<http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html>`_
text representing the features.
It must be a :ref:`type-string`.
"""
)
def _get_base_text(self):
value = self._get_text()
if value is not None:
value = normalizers.normalizeFeatureText(value)
return value
def _set_base_text(self, value):
if value is not None:
value = normalizers.normalizeFeatureText(value)
self._set_text(value)
def _get_text(self):
"""
This is the environment implementation of
:attr:`BaseFeatures.text`. This must return a
:ref:`type-string`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_text(self, value):
"""
This is the environment implementation of
:attr:`BaseFeatures.text`. **value** will be
a :ref:`type-string`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/features.py",
"copies": "1",
"size": "2207",
"license": "mit",
"hash": 5402496910509630000,
"line_mean": 25.9146341463,
"line_max": 89,
"alpha_frac": 0.5899410965,
"autogenerated": false,
"ratio": 4.335952848722986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013550135501355014,
"num_lines": 82
} |
from fontParts.base.base import dynamicProperty
# ----
# Base
# ----
class BaseCompatibilityReporter(object):
objectName = "Base"
def __init__(self, obj1, obj2):
self._object1 = obj1
self._object2 = obj2
# status
fatal = False
warning = False
def _get_title(self):
title = "{object1Name} + {object2Name}".format(
object1Name=self.object1Name,
object2Name=self.object2Name
)
if self.fatal:
return self.formatFatalString(title)
elif self.warning:
return self.formatWarningString(title)
else:
return self.formatOKString(title)
title = dynamicProperty("title")
# objects
object1 = dynamicProperty("object1")
object1Name = dynamicProperty("object1Name")
def _get_object1(self):
return self._object1
def _get_object1Name(self):
return self._getObjectName(self._object1)
object2 = dynamicProperty("object2")
object2Name = dynamicProperty("object2Name")
def _get_object2(self):
return self._object2
def _get_object2Name(self):
return self._getObjectName(self._object2)
@staticmethod
def _getObjectName(obj):
if hasattr(obj, "name") and obj.name is not None:
return "\"%s\"" % obj.name
elif hasattr(obj, "identifier") and obj.identifier is not None:
return "\"%s\"" % obj.identifier
elif hasattr(obj, "index"):
return "[%s]" % obj.index
else:
return "<%s>" % id(obj)
# Report
def __repr__(self):
return self.report()
def report(self, showOK=False, showWarnings=False):
raise NotImplementedError
def formatFatalString(self, text):
return "[Fatal] {objectName}: ".format(objectName=self.objectName) + text
def formatWarningString(self, text):
return "[Warning] {objectName}: ".format(objectName=self.objectName) + text
def formatOKString(self, text):
return "[OK] {objectName}: ".format(objectName=self.objectName) + text
@staticmethod
def reportSubObjects(reporters, showOK=True, showWarnings=True):
report = []
for reporter in reporters:
if showOK or reporter.fatal or (showWarnings and reporter.warning):
report.append(repr(reporter))
return report
@staticmethod
def reportCountDifference(subObjectName,
object1Name, object1Count,
object2Name, object2Count):
text = ("{object1Name} contains {object1Count} {subObjectName} | "
"{object2Name} contains {object2Count} {subObjectName}").format(
subObjectName=subObjectName,
object1Name=object1Name,
object1Count=object1Count,
object2Name=object2Name,
object2Count=object2Count
)
return text
@staticmethod
def reportOrderDifference(subObjectName,
object1Name, object1Order,
object2Name, object2Order):
text = ("{object1Name} has {subObjectName} ordered {object1Order} | "
"{object2Name} has {object2Order}").format(
subObjectName=subObjectName,
object1Name=object1Name,
object1Order=object1Order,
object2Name=object2Name,
object2Order=object2Order
)
return text
@staticmethod
def reportDifferences(object1Name, subObjectName,
subObjectID, object2Name):
text = ("{object1Name} contains {subObjectName} {subObjectID} "
"not in {object2Name}").format(
object1Name=object1Name,
subObjectName=subObjectName,
subObjectID=subObjectID,
object2Name=object2Name,
)
return text
# ----
# Font
# ----
class FontCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Font"
def __init__(self, font1, font2):
super(FontCompatibilityReporter, self).__init__(font1, font2)
self.guidelineCountDifference = False
self.layerCountDifference = False
self.guidelinesMissingFromFont2 = []
self.guidelinesMissingInFont1 = []
self.layersMissingFromFont2 = []
self.layersMissingInFont1 = []
self.layers = []
font1 = dynamicProperty("object1")
font1Name = dynamicProperty("object1Name")
font2 = dynamicProperty("object2")
font2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
font1 = self.font1
font2 = self.font2
report = []
if self.guidelineCountDifference:
text = self.reportCountDifference(
subObjectName="guidelines",
object1Name=self.font1Name,
object1Count=len(font1.guidelines),
object2Name=self.font2Name,
object2Count=len(font2.guidelines)
)
report.append(self.formatWarningString(text))
for name in self.guidelinesMissingFromFont2:
text = self.reportDifferences(
object1Name=self.font1Name,
subObjectName="guideline",
subObjectID=name,
object2Name=self.font2Name,
)
report.append(self.formatWarningString(text))
for name in self.guidelinesMissingInFont1:
text = self.reportDifferences(
object1Name=self.font2Name,
subObjectName="guideline",
subObjectID=name,
object2Name=self.font1Name,
)
report.append(self.formatWarningString(text))
if self.layerCountDifference:
text = self.reportCountDifference(
subObjectName="layers",
object1Name=self.font1Name,
object1Count=len(font1.layerOrder),
object2Name=self.font2Name,
object2Count=len(font2.layerOrder)
)
report.append(self.formatWarningString(text))
for name in self.layersMissingFromFont2:
text = self.reportDifferences(
object1Name=self.font1Name,
subObjectName="layer",
subObjectID=name,
object2Name=self.font2Name,
)
report.append(self.formatWarningString(text))
for name in self.layersMissingInFont1:
text = self.reportDifferences(
object1Name=self.font2Name,
subObjectName="layer",
subObjectID=name,
object2Name=self.font1Name,
)
report.append(self.formatWarningString(text))
report += self.reportSubObjects(self.layers, showOK=showOK,
showWarnings=showWarnings)
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# -----
# Layer
# -----
class LayerCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Layer"
def __init__(self, layer1, layer2):
super(LayerCompatibilityReporter, self).__init__(layer1, layer2)
self.glyphCountDifference = False
self.glyphsMissingFromLayer2 = []
self.glyphsMissingInLayer1 = []
self.glyphs = []
layer1 = dynamicProperty("object1")
layer1Name = dynamicProperty("object1Name")
layer2 = dynamicProperty("object2")
layer2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
layer1 = self.layer1
layer2 = self.layer2
report = []
if self.glyphCountDifference:
text = self.reportCountDifference(
subObjectName="glyphs",
object1Name=self.layer1Name,
object1Count=len(layer1),
object2Name=self.layer2Name,
object2Count=len(layer2)
)
report.append(self.formatWarningString(text))
for name in self.glyphsMissingFromLayer2:
text = self.reportDifferences(
object1Name=self.layer1Name,
subObjectName="glyph",
subObjectID=name,
object2Name=self.layer2Name,
)
report.append(self.formatWarningString(text))
for name in self.glyphsMissingInLayer1:
text = self.reportDifferences(
object1Name=self.layer2Name,
subObjectName="glyph",
subObjectID=name,
object2Name=self.layer1Name,
)
report.append(self.formatWarningString(text))
report += self.reportSubObjects(self.glyphs,
showOK=showOK,
showWarnings=showWarnings)
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# -----
# Glyph
# -----
class GlyphCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Glyph"
def __init__(self, glyph1, glyph2):
super(GlyphCompatibilityReporter, self).__init__(glyph1, glyph2)
self.contourCountDifference = False
self.componentCountDifference = False
self.guidelineCountDifference = False
self.anchorDifferences = []
self.anchorCountDifference = False
self.anchorOrderDifference = False
self.anchorsMissingFromGlyph1 = []
self.anchorsMissingFromGlyph2 = []
self.componentDifferences = []
self.componentOrderDifference = False
self.componentsMissingFromGlyph1 = []
self.componentsMissingFromGlyph2 = []
self.guidelinesMissingFromGlyph1 = []
self.guidelinesMissingFromGlyph2 = []
self.contours = []
glyph1 = dynamicProperty("object1")
glyph1Name = dynamicProperty("object1Name")
glyph2 = dynamicProperty("object2")
glyph2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
glyph1 = self.glyph1
glyph2 = self.glyph2
report = []
# Contour test
if self.contourCountDifference:
text = self.reportCountDifference(
subObjectName="contours",
object1Name=self.glyph1Name,
object1Count=len(glyph1),
object2Name=self.glyph2Name,
object2Count=len(glyph2)
)
report.append(self.formatFatalString(text))
report += self.reportSubObjects(self.contours,
showOK=showOK,
showWarnings=showWarnings)
# Component test
if self.componentCountDifference:
text = self.reportCountDifference(
subObjectName="components",
object1Name=self.glyph1Name,
object1Count=len(glyph1.components),
object2Name=self.glyph2Name,
object2Count=len(glyph2.components)
)
report.append(self.formatFatalString(text))
elif self.componentOrderDifference:
text = self.reportOrderDifference(
subObjectName="components",
object1Name=self.glyph1Name,
object1Order=[c.baseGlyph for c in glyph1.components],
object2Name=self.glyph2Name,
object2Order=[c.baseGlyph for c in glyph2.components]
)
report.append(self.formatWarningString(text))
for name in self.componentsMissingFromGlyph2:
text = self.reportDifferences(
object1Name=self.glyph1Name,
subObjectName="component",
subObjectID=name,
object2Name=self.glyph2Name,
)
report.append(self.formatWarningString(text))
for name in self.componentsMissingFromGlyph1:
text = self.reportDifferences(
object1Name=self.glyph2Name,
subObjectName="component",
subObjectID=name,
object2Name=self.glyph1Name,
)
report.append(self.formatWarningString(text))
# Anchor test
if self.anchorCountDifference:
text = self.reportCountDifference(
subObjectName="anchors",
object1Name=self.glyph1Name,
object1Count=len(glyph1.anchors),
object2Name=self.glyph2Name,
object2Count=len(glyph2.anchors)
)
report.append(self.formatWarningString(text))
elif self.anchorOrderDifference:
text = self.reportOrderDifference(
subObjectName="anchors",
object1Name=self.glyph1Name,
object1Order=[a.name for a in glyph1.anchors],
object2Name=self.glyph2Name,
object2Order=[a.name for a in glyph2.anchors]
)
report.append(self.formatWarningString(text))
for name in self.anchorsMissingFromGlyph2:
text = self.reportDifferences(
object1Name=self.glyph1Name,
subObjectName="anchor",
subObjectID=name,
object2Name=self.glyph2Name,
)
report.append(self.formatWarningString(text))
for name in self.anchorsMissingFromGlyph1:
text = self.reportDifferences(
object1Name=self.glyph2Name,
subObjectName="anchor",
subObjectID=name,
object2Name=self.glyph1Name,
)
report.append(self.formatWarningString(text))
# Guideline test
if self.guidelineCountDifference:
text = self.reportCountDifference(
subObjectName="guidelines",
object1Name=self.glyph1Name,
object1Count=len(glyph1.guidelines),
object2Name=self.glyph2Name,
object2Count=len(glyph2.guidelines)
)
report.append(self.formatWarningString(text))
for name in self.guidelinesMissingFromGlyph2:
text = self.reportDifferences(
object1Name=self.glyph1Name,
subObjectName="guideline",
subObjectID=name,
object2Name=self.glyph2Name,
)
report.append(self.formatWarningString(text))
for name in self.guidelinesMissingFromGlyph1:
text = self.reportDifferences(
object1Name=self.glyph2Name,
subObjectName="guideline",
subObjectID=name,
object2Name=self.glyph1Name,
)
report.append(self.formatWarningString(text))
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# -------
# Contour
# -------
class ContourCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Contour"
def __init__(self, contour1, contour2):
super(ContourCompatibilityReporter, self).__init__(contour1, contour2)
self.openDifference = False
self.directionDifference = False
self.segmentCountDifference = False
self.segments = []
contour1 = dynamicProperty("object1")
contour1Name = dynamicProperty("object1Name")
contour2 = dynamicProperty("object2")
contour2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
contour1 = self.contour1
contour2 = self.contour2
report = []
if self.segmentCountDifference:
text = self.reportCountDifference(
subObjectName="segments",
object1Name=self.contour1Name,
object1Count=len(contour1),
object2Name=self.contour2Name,
object2Count=len(contour2)
)
report.append(self.formatFatalString(text))
if self.openDifference:
state1 = state2 = "closed"
if contour1.open:
state1 = "open"
if contour2.open:
state2 = "open"
text = "{contour1Name} is {state1} | {contour2Name} is {state2}".format(
contour1Name=self.contour1Name,
state1=state1,
contour2Name=self.contour2Name,
state2=state2
)
report.append(self.formatFatalString(text))
if self.directionDifference:
state1 = state2 = "counter-clockwise"
if contour1.clockwise:
state1 = "clockwise"
if contour2.clockwise:
state2 = "clockwise"
text = "{contour1Name} is {state1} | {contour2Name} is {state2}".format(
contour1Name=self.contour1Name,
state1=state1,
contour2Name=self.contour2Name,
state2=state2
)
report.append(self.formatFatalString(text))
report += self.reportSubObjects(self.segments,
showOK=showOK,
showWarnings=showWarnings)
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# -------
# Segment
# -------
class SegmentCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Segment"
def __init__(self, contour1, contour2):
super(SegmentCompatibilityReporter, self).__init__(contour1, contour2)
self.typeDifference = False
segment1 = dynamicProperty("object1")
segment1Name = dynamicProperty("object1Name")
segment2 = dynamicProperty("object2")
segment2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
segment1 = self.segment1
segment2 = self.segment2
report = []
if self.typeDifference:
type1 = segment1.type
type2 = segment2.type
text = "{segment1Name} is {type1} | {segment2Name} is {type2}".format(
segment1Name=self.segment1Name,
type1=type1,
segment2Name=self.segment2Name,
type2=type2
)
report.append(self.formatFatalString(text))
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# ---------
# Component
# ---------
class ComponentCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Component"
def __init__(self, component1, component2):
super(ComponentCompatibilityReporter, self).__init__(component1,
component2)
self.baseDifference = False
component1 = dynamicProperty("object1")
component1Name = dynamicProperty("object1Name")
component2 = dynamicProperty("object2")
component2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
component1 = self.component1
component2 = self.component2
report = []
if self.baseDifference:
name1 = component1.baseName
name2 = component2.baseName
text = ("{component1Name} has base glyph {name1} | "
"{component2Name} has base glyph {name2}").format(
component1Name=self.component1Name,
name1=name1,
component2Name=self.component2Name,
name2=name2
)
report.append(self.formatWarningString(text))
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# ------
# Anchor
# ------
class AnchorCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Anchor"
def __init__(self, anchor1, anchor2):
super(AnchorCompatibilityReporter, self).__init__(anchor1, anchor2)
self.nameDifference = False
anchor1 = dynamicProperty("object1")
anchor1Name = dynamicProperty("object1Name")
anchor2 = dynamicProperty("object2")
anchor2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
anchor1 = self.anchor1
anchor2 = self.anchor2
report = []
if self.nameDifference:
name1 = anchor1.name
name2 = anchor2.name
text = ("{anchor1Name} has name {name1} | "
"{anchor2Name} has name {name2}").format(
anchor1Name=self.anchor1Name,
name1=name1,
anchor2Name=self.anchor2Name,
name2=name2
)
report.append(self.formatWarningString(text))
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
# ---------
# Guideline
# ---------
class GuidelineCompatibilityReporter(BaseCompatibilityReporter):
objectName = "Guideline"
def __init__(self, guideline1, guideline2):
super(GuidelineCompatibilityReporter, self).__init__(guideline1,
guideline2)
self.nameDifference = False
guideline1 = dynamicProperty("object1")
guideline1Name = dynamicProperty("object1Name")
guideline2 = dynamicProperty("object2")
guideline2Name = dynamicProperty("object2Name")
def report(self, showOK=True, showWarnings=True):
guideline1 = self.guideline1
guideline2 = self.guideline2
report = []
if self.nameDifference:
name1 = guideline1.name
name2 = guideline2.name
text = ("{guideline1Name} has name {name1} | "
"{guideline2Name} has name {name2}").format(
guideline1Name=self.guideline1Name,
name1=name1,
guideline2Name=self.guideline2Name,
name2=name2
)
report.append(self.formatWarningString(text))
if report or showOK:
report.insert(0, self.title)
return "\n".join(report)
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/compatibility.py",
"copies": "1",
"size": "22274",
"license": "mit",
"hash": 3124990156880493600,
"line_mean": 33.6947040498,
"line_max": 84,
"alpha_frac": 0.5840890725,
"autogenerated": false,
"ratio": 4.363173359451518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5447262431951518,
"avg_score": null,
"num_lines": null
} |
from fontParts.base.base import (
BaseDict,
dynamicProperty,
interpolate,
reference
)
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedKerning, RemovedKerning
class BaseKerning(BaseDict, DeprecatedKerning, RemovedKerning):
"""
A Kerning object. This object normally created as part of a
:class:`BaseFont`. An orphan Kerning object can be created
like this::
>>> groups = RKerning()
This object behaves like a Python dictionary. Most of the
dictionary functionality comes from :class:`BaseDict`, look at
that object for the required environment implementation details.
Kerning uses :func:`normalizers.normalizeKerningKey` to normalize the
key of the ``dict``, and :func:`normalizers.normalizeKerningValue`
to normalize the the value of the ``dict``.
"""
keyNormalizer = normalizers.normalizeKerningKey
valueNormalizer = normalizers.normalizeKerningValue
def _reprContents(self):
contents = []
if self.font is not None:
contents.append("for font")
contents += self.font._reprContents()
return contents
# -------
# Parents
# -------
# Font
_font = None
font = dynamicProperty("font", "The Kerning's parent :class:`BaseFont`.")
def _get_font(self):
if self._font is None:
return None
return self._font()
def _set_font(self, font):
if self._font is not None and self._font() != font:
raise AssertionError("font for kerning already set and is not same as font")
if font is not None:
font = reference(font)
self._font = font
# --------------
# Transformation
# --------------
def scaleBy(self, factor):
"""
Scales all kerning values by **factor**. **factor** will be an
:ref:`type-int-float`, ``tuple`` or ``list``. The first value of the
**factor** will be used to scale the kerning values.
>>> myKerning.scaleBy(2)
>>> myKerning.scaleBy((2,3))
"""
factor = normalizers.normalizeTransformationScale(factor)
self._scale(factor)
def _scale(self, factor):
"""
This is the environment implementation of :meth:`BaseKerning.scaleBy`.
**factor** will be a ``tuple``.
Subclasses may override this method.
"""
factor = factor[0]
for k, v in self.items():
v *= factor
self[k] = v
# -------------
# Normalization
# -------------
def round(self, multiple=1):
"""
Rounds the kerning values to increments of **multiple**,
which will be an ``int``.
The default behavior is to round to increments of 1.
"""
if not isinstance(multiple, int):
raise TypeError("The round multiple must be an int not %s."
% multiple.__class__.__name__)
self._round(multiple)
def _round(self, multiple=1):
"""
This is the environment implementation of
:meth:`BaseKerning.round`. **multiple** will be an ``int``.
Subclasses may override this method.
"""
for pair, value in self.items():
value = int(normalizers.normalizeRounding(
value / float(multiple))) * multiple
self[pair] = value
# -------------
# Interpolation
# -------------
def interpolate(self, factor, minKerning, maxKerning, round=True, suppressError=True):
"""
Interpolates all pairs between two :class:`BaseKerning` objects:
**minKerning** and **maxKerning**. The interpolation occurs on a
0 to 1.0 range where **minKerning** is located at 0 and
**maxKerning** is located at 1.0. The kerning data is replaced by
the interpolated kerning.
* **factor** is the interpolation value. It may be less than 0
and greater than 1.0. It may be an :ref:`type-int-float`,
``tuple`` or ``list``. If it is a ``tuple`` or ``list``,
the first number indicates the x factor and the second number
indicates the y factor.
* **round** is a ``bool`` indicating if the result should be rounded to
``int``\s. The default behavior is to round interpolated kerning.
* **suppressError** is a ``bool`` indicating if incompatible data should
be ignored or if an error should be raised when such incompatibilities
are found. The default behavior is to ignore incompatible data.
>>> myKerning.interpolate(kerningOne, kerningTwo)
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minKerning, BaseKerning):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.") % (
self.__class__.__name__, minKerning.__class__.__name__))
if not isinstance(maxKerning, BaseKerning):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.") % (
self.__class__.__name__, maxKerning.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minKerning, maxKerning,
round=round, suppressError=suppressError)
def _interpolate(self, factor, minKerning, maxKerning,
round=True, suppressError=True):
"""
This is the environment implementation of :meth:`BaseKerning.interpolate`.
* **factor** will be an :ref:`type-int-float`, ``tuple`` or ``list``.
* **minKerning** will be a :class:`BaseKerning` object.
* **maxKerning** will be a :class:`BaseKerning` object.
* **round** will be a ``bool`` indicating if the interpolated kerning
should be rounded.
* **suppressError** will be a ``bool`` indicating if incompatible data
should be ignored.
Subclasses may override this method.
"""
import fontMath
kerningGroupCompatibility = self._testKerningGroupCompatibility(
minKerning,
maxKerning,
suppressError=suppressError
)
if not kerningGroupCompatibility:
self.clear()
else:
minKerning = fontMath.MathKerning(
kerning=minKerning, groups=minKerning.font.groups)
maxKerning = fontMath.MathKerning(
kerning=maxKerning, groups=maxKerning.font.groups)
result = interpolate(minKerning, maxKerning, factor)
if round:
result.round()
self.clear()
result.extractKerning(self.font)
@staticmethod
def _testKerningGroupCompatibility(minKerning, maxKerning, suppressError=False):
minGroups = minKerning.font.groups
maxGroups = maxKerning.font.groups
match = True
while match:
for _, sideAttr in (
("side 1", "side1KerningGroups"),
("side 2", "side2KerningGroups")
):
minSideGroups = getattr(minGroups, sideAttr)
maxSideGroups = getattr(maxGroups, sideAttr)
if minSideGroups.keys() != maxSideGroups.keys():
match = False
else:
for name in minSideGroups.keys():
minGroup = minSideGroups[name]
maxGroup = maxSideGroups[name]
if set(minGroup) != set(maxGroup):
match = False
break
if not match and not suppressError:
raise ValueError("The kerning groups must be exactly the same.")
return match
# ---------------------
# RoboFab Compatibility
# ---------------------
def remove(self, pair):
"""
Removes a pair from the Kerning. **pair** will
be a ``tuple`` of two :ref:`type-string`\s.
This is a backwards compatibility method.
"""
del self[pair]
def asDict(self, returnIntegers=True):
"""
Return the Kerning as a ``dict``.
This is a backwards compatibility method.
"""
d = {}
for k, v in self.items():
d[k] = v if not returnIntegers else normalizers.normalizeRounding(v)
return d
# -------------------
# Inherited Functions
# -------------------
def __contains__(self, pair):
"""
Tests to see if a pair is in the Kerning.
**pair** will be a ``tuple`` of two :ref:`type-string`\s.
This returns a ``bool`` indicating if the **pair**
is in the Kerning. ::
>>> ("A", "V") in font.kerning
True
"""
return super(BaseKerning, self).__contains__(pair)
def __delitem__(self, pair):
"""
Removes **pair** from the Kerning. **pair** is a ``tuple`` of two
:ref:`type-string`\s.::
>>> del font.kerning[("A","V")]
"""
super(BaseKerning, self).__delitem__(pair)
def __getitem__(self, pair):
"""
Returns the kerning value of the pair. **pair** is a ``tuple`` of
two :ref:`type-string`\s.
The returned value will be a :ref:`type-int-float`.::
>>> font.kerning[("A", "V")]
-15
It is important to understand that any changes to the returned value
will not be reflected in the Kerning object. If one wants to make a change to
the value, one should do the following::
>>> value = font.kerning[("A", "V")]
>>> value += 10
>>> font.kerning[("A", "V")] = value
"""
return super(BaseKerning, self).__getitem__(pair)
def __iter__(self):
"""
Iterates through the Kerning, giving the pair for each iteration. The order that
the Kerning will iterate though is not fixed nor is it ordered.::
>>> for pair in font.kerning:
>>> print pair
("A", "Y")
("A", "V")
("A", "W")
"""
return super(BaseKerning, self).__iter__()
def __len__(self):
"""
Returns the number of pairs in Kerning as an ``int``.::
>>> len(font.kerning)
5
"""
return super(BaseKerning, self).__len__()
def __setitem__(self, pair, value):
"""
Sets the **pair** to the list of **value**. **pair** is the
pair as a ``tuple`` of two :ref:`type-string`\s and **value**
is a :ref:`type-int-float`.
>>> font.kerning[("A", "V")] = -20
>>> font.kerning[("A", "W")] = -10.5
"""
super(BaseKerning, self).__setitem__(pair, value)
def clear(self):
"""
Removes all information from Kerning,
resetting the Kerning to an empty dictionary. ::
>>> font.kerning.clear()
"""
super(BaseKerning, self).clear()
def get(self, pair, default=None):
"""
Returns the value for the kerning pair.
**pair** is a ``tuple`` of two :ref:`type-string`\s, and the returned
values will either be :ref:`type-int-float` or ``None``
if no pair was found. ::
>>> font.kerning[("A", "V")]
-25
It is important to understand that any changes to the returned value
will not be reflected in the Kerning object. If one wants to make a change to
the value, one should do the following::
>>> value = font.kerning[("A", "V")]
>>> value += 10
>>> font.kerning[("A", "V")] = value
"""
return super(BaseKerning, self).get(pair, default)
def find(self, pair, default=None):
"""
Returns the value for the kerning pair.
**pair** is a ``tuple`` of two :ref:`type-string`\s, and the returned
values will either be :ref:`type-int-float` or ``None``
if no pair was found. ::
>>> font.kerning[("A", "V")]
-25
"""
pair = normalizers.normalizeKerningKey(pair)
value = self._find(pair, default)
if value != default:
value = normalizers.normalizeKerningValue(value)
return value
def _find(self, pair, default=None):
"""
This is the environment implementation of
:attr:`BaseKerning.find`. This must return an
:ref:`type-int-float` or `default`.
"""
from fontTools.ufoLib.kerning import lookupKerningValue
font = self.font
groups = font.groups
return lookupKerningValue(pair, self, groups, fallback=default)
def items(self):
"""
Returns a list of ``tuple``\s of each pair and value. Pairs are a
``tuple`` of two :ref:`type-string`\s and values are :ref:`type-int-float`.
The initial list will be unordered.
>>> font.kerning.items()
[(("A", "V"), -30), (("A", "W"), -10)]
"""
return super(BaseKerning, self).items()
def keys(self):
"""
Returns a ``list`` of all the pairs in Kerning. This list will be
unordered.::
>>> font.kerning.keys()
[("A", "Y"), ("A", "V"), ("A", "W")]
"""
return super(BaseKerning, self).keys()
def pop(self, pair, default=None):
"""
Removes the **pair** from the Kerning and returns the value as an ``int``.
If no pair is found, **default** is returned. **pair** is a
``tuple`` of two :ref:`type-string`\s. This must return either
**default** or a :ref:`type-int-float`.
>>> font.kerning.pop(("A", "V"))
-20
>>> font.kerning.pop(("A", "W"))
-10.5
"""
return super(BaseKerning, self).pop(pair, default)
def update(self, otherKerning):
"""
Updates the Kerning based on **otherKerning**. **otherKerning** is a ``dict`` of
kerning information. If a pair from **otherKerning** is in Kerning, the pair
value will be replaced by the value from **otherKerning**. If a pair
from **otherKerning** is not in the Kerning, it is added to the pairs. If Kerning
contains a pair that is not in **otherKerning**, it is not changed.
>>> font.kerning.update(newKerning)
"""
super(BaseKerning, self).update(otherKerning)
def values(self):
"""
Returns a ``list`` of each pair's values, the values will be
:ref:`type-int-float`\s.
The list will be unordered.
>>> font.kerning.items()
[-20, -15, 5, 3.5]
"""
return super(BaseKerning, self).values()
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/kerning.py",
"copies": "1",
"size": "15208",
"license": "mit",
"hash": 1767238896258897400,
"line_mean": 33.9609195402,
"line_max": 90,
"alpha_frac": 0.5466859548,
"autogenerated": false,
"ratio": 4.184920198128784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5231606152928784,
"avg_score": null,
"num_lines": null
} |
from fontParts.base.base import (
BaseObject,
dynamicProperty,
interpolate,
reference
)
from fontParts.base import normalizers
from fontParts.base.errors import FontPartsError
from fontParts.base.deprecated import DeprecatedInfo, RemovedInfo
class BaseInfo(BaseObject, DeprecatedInfo, RemovedInfo):
from fontTools.ufoLib import fontInfoAttributesVersion3
copyAttributes = set(fontInfoAttributesVersion3)
copyAttributes.remove("guidelines")
copyAttributes = tuple(copyAttributes)
def _reprContents(self):
contents = []
if self.font is not None:
contents.append("for font")
contents += self.font._reprContents()
return contents
# -------
# Parents
# -------
# Font
_font = None
font = dynamicProperty("font", "The info's parent font.")
def _get_font(self):
if self._font is None:
return None
return self._font()
def _set_font(self, font):
if self._font is not None and self._font != font:
raise AssertionError("font for info already set and is not same as font")
if font is not None:
font = reference(font)
self._font = font
# ----------
# Validation
# ----------
@staticmethod
def _validateFontInfoAttributeValue(attr, value):
from fontTools.ufoLib import validateFontInfoVersion3ValueForAttribute
valid = validateFontInfoVersion3ValueForAttribute(attr, value)
if not valid:
raise ValueError("Invalid value %s for attribute '%s'."
% (value, attr))
return value
# ----------
# Attributes
# ----------
# has
def __hasattr__(self, attr):
from fontTools.ufoLib import fontInfoAttributesVersion3
if attr in fontInfoAttributesVersion3:
return True
return super(BaseInfo, self).__hasattr__(attr)
# get
def __getattribute__(self, attr):
from fontTools.ufoLib import fontInfoAttributesVersion3
if attr != "guidelines" and attr in fontInfoAttributesVersion3:
value = self._getAttr(attr)
if value is not None:
value = self._validateFontInfoAttributeValue(attr, value)
return value
return super(BaseInfo, self).__getattribute__(attr)
def _getAttr(self, attr):
"""
Subclasses may override this method.
If a subclass does not override this method,
it must implement '_get_attributeName' methods
for all Info methods.
"""
meth = "_get_%s" % attr
if not hasattr(self, meth):
raise AttributeError("No getter for attribute '%s'." % attr)
meth = getattr(self, meth)
value = meth()
return value
# set
def __setattr__(self, attr, value):
from fontTools.ufoLib import fontInfoAttributesVersion3
if attr != "guidelines" and attr in fontInfoAttributesVersion3:
if value is not None:
value = self._validateFontInfoAttributeValue(attr, value)
return self._setAttr(attr, value)
return super(BaseInfo, self).__setattr__(attr, value)
def _setAttr(self, attr, value):
"""
Subclasses may override this method.
If a subclass does not override this method,
it must implement '_set_attributeName' methods
for all Info methods.
"""
meth = "_set_%s" % attr
if not hasattr(self, meth):
raise AttributeError("No setter for attribute '%s'." % attr)
meth = getattr(self, meth)
meth(value)
# -------------
# Normalization
# -------------
def round(self):
"""
Round the following attributes to integers:
- unitsPerEm
- descender
- xHeight
- capHeight
- ascender
- openTypeHeadLowestRecPPEM
- openTypeHheaAscender
- openTypeHheaDescender
- openTypeHheaLineGap
- openTypeHheaCaretSlopeRise
- openTypeHheaCaretSlopeRun
- openTypeHheaCaretOffset
- openTypeOS2WidthClass
- openTypeOS2WeightClass
- openTypeOS2TypoAscender
- openTypeOS2TypoDescender
- openTypeOS2TypoLineGap
- openTypeOS2WinAscent
- openTypeOS2WinDescent
- openTypeOS2SubscriptXSize
- openTypeOS2SubscriptYSize
- openTypeOS2SubscriptXOffset
- openTypeOS2SubscriptYOffset
- openTypeOS2SuperscriptXSize
- openTypeOS2SuperscriptYSize
- openTypeOS2SuperscriptXOffset
- openTypeOS2SuperscriptYOffset
- openTypeOS2StrikeoutSize
- openTypeOS2StrikeoutPosition
- openTypeVheaVertTypoAscender
- openTypeVheaVertTypoDescender
- openTypeVheaVertTypoLineGap
- openTypeVheaCaretSlopeRise
- openTypeVheaCaretSlopeRun
- openTypeVheaCaretOffset
- postscriptSlantAngle
- postscriptUnderlineThickness
- postscriptUnderlinePosition
- postscriptBlueValues
- postscriptOtherBlues
- postscriptFamilyBlues
- postscriptFamilyOtherBlues
- postscriptStemSnapH
- postscriptStemSnapV
- postscriptBlueFuzz
- postscriptBlueShift
- postscriptDefaultWidthX
- postscriptNominalWidthX
"""
self._round()
def _round(self, **kwargs):
"""
Subclasses may override this method.
"""
mathInfo = self._toMathInfo(guidelines=False)
mathInfo = mathInfo.round()
self._fromMathInfo(mathInfo, guidelines=False)
# -------------
# Interpolation
# -------------
def toMathInfo(self, guidelines=True):
"""
Returns the info as an object that follows the
`MathGlyph protocol <https://github.com/typesupply/fontMath>`_.
>>> mg = font.info.toMathInfo()
"""
return self._toMathInfo(guidelines=guidelines)
def fromMathInfo(self, mathInfo, guidelines=True):
"""
Replaces the contents of this info object with the contents of ``mathInfo``.
>>> font.fromMathInfo(mg)
``mathInfo`` must be an object following the
`MathInfo protocol <https://github.com/typesupply/fontMath>`_.
"""
return self._fromMathInfo(mathInfo, guidelines=guidelines)
def _toMathInfo(self, guidelines=True):
"""
Subclasses may override this method.
"""
import fontMath
# A little trickery is needed here because MathInfo
# handles font level guidelines. Those are not in this
# object so we temporarily fake them just enough for
# MathInfo and then move them back to the proper place.
self.guidelines = []
if guidelines:
for guideline in self.font.guidelines:
d = dict(
x=guideline.x,
y=guideline.y,
angle=guideline.angle,
name=guideline.name,
identifier=guideline.identifier,
color=guideline.color
)
self.guidelines.append(d)
info = fontMath.MathInfo(self)
del self.guidelines
return info
def _fromMathInfo(self, mathInfo, guidelines=True):
"""
Subclasses may override this method.
"""
self.guidelines = []
mathInfo.extractInfo(self)
font = self.font
if guidelines:
for guideline in self.guidelines:
font.appendGuideline(
position=(guideline["x"], guideline["y"]),
angle=guideline["angle"],
name=guideline["name"],
color=guideline["color"]
# XXX identifier is lost
)
del self.guidelines
def interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True):
"""
Interpolate all pairs between minInfo and maxInfo.
The interpolation occurs on a 0 to 1.0 range where minInfo
is located at 0 and maxInfo is located at 1.0.
factor is the interpolation value. It may be less than 0
and greater than 1.0. It may be a number (integer, float)
or a tuple of two numbers. If it is a tuple, the first
number indicates the x factor and the second number
indicates the y factor.
round indicates if the result should be rounded to integers.
suppressError indicates if incompatible data should be ignored
or if an error should be raised when such incompatibilities are found.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minInfo, BaseInfo):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.") %
(self.__class__.__name__, minInfo.__class__.__name__))
if not isinstance(maxInfo, BaseInfo):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.") %
(self.__class__.__name__, maxInfo.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minInfo, maxInfo,
round=round, suppressError=suppressError)
def _interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True):
"""
Subclasses may override this method.
"""
minInfo = minInfo._toMathInfo()
maxInfo = maxInfo._toMathInfo()
result = interpolate(minInfo, maxInfo, factor)
if result is None and not suppressError:
raise FontPartsError(("Info from font '%s' and font '%s' could not be "
"interpolated.")
% (minInfo.font.name, maxInfo.font.name))
if round:
result = result.round()
self._fromMathInfo(result)
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/info.py",
"copies": "1",
"size": "10257",
"license": "mit",
"hash": -9099165899920838000,
"line_mean": 33.0764119601,
"line_max": 85,
"alpha_frac": 0.5957882422,
"autogenerated": false,
"ratio": 4.4479618386816995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.55437500808817,
"avg_score": null,
"num_lines": null
} |
from fontParts.base.base import (
BaseObject,
InterpolationMixin,
SelectionMixin,
dynamicProperty,
reference
)
from fontParts.base import normalizers
from fontParts.base.compatibility import LayerCompatibilityReporter
from fontParts.base.color import Color
from fontParts.base.deprecated import DeprecatedLayer, RemovedLayer
class _BaseGlyphVendor(
BaseObject,
SelectionMixin,
):
"""
This class exists to provide common glyph
interaction code to BaseFont and BaseLayer.
It should not be directly subclassed.
"""
# -----------------
# Glyph Interaction
# -----------------
def _setLayerInGlyph(self, glyph):
if glyph.layer is None:
if isinstance(self, BaseLayer):
layer = self
else:
layer = self.defaultLayer
glyph.layer = layer
def __len__(self):
"""
An ``int`` representing number of glyphs in the layer. ::
>>> len(layer)
256
"""
return self._len()
def _len(self, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__len__` and :meth:`BaseFont.__len__`
This must return an ``int`` indicating
the number of glyphs in the layer.
Subclasses may override this method.
"""
return len(self.keys())
def __iter__(self):
"""
Iterate through the :class:`BaseGlyph` objects in the layer. ::
>>> for glyph in layer:
... glyph.name
"A"
"B"
"C"
"""
return self._iter()
def _iter(self, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__iter__` and :meth:`BaseFont.__iter__`
This must return an iterator that returns
instances of a :class:`BaseGlyph` subclass.
Subclasses may override this method.
"""
for name in self.keys():
yield self[name]
def __getitem__(self, name):
"""
Get the :class:`BaseGlyph` with name from the layer. ::
>>> glyph = layer["A"]
"""
name = normalizers.normalizeGlyphName(name)
if name not in self:
raise KeyError("No glyph named '%s'." % name)
glyph = self._getItem(name)
self._setLayerInGlyph(glyph)
return glyph
def _getItem(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__getitem__` and :meth:`BaseFont.__getitem__`
This must return an instance of a :class:`BaseGlyph`
subclass. **name** will be a :ref:`type-string` representing
a name of a glyph that is in the layer. It will have been
normalized with :func:`normalizers.normalizeGlyphName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def __setitem__(self, name, glyph):
"""
Insert **glyph** into the layer. ::
>>> glyph = layer["A"] = otherGlyph
This will not insert the glyph directly. Rather, a
new glyph will be created and the data from **glyph**
will be copied to the new glyph. **name** indicates
the name that should be assigned to the glyph after
insertion. If **name** is not given, the glyph's original
name must be used. If the glyph does not have a name,
an error must be raised. The data that will be inserted
from **glyph** is the same data as documented in
:meth:`BaseGlyph.copy`.
"""
name = normalizers.normalizeGlyphName(name)
if name in self:
del self[name]
return self._insertGlyph(glyph, name=name)
def __delitem__(self, name):
"""
Remove the glyph with name from the layer. ::
>>> del layer["A"]
"""
name = normalizers.normalizeGlyphName(name)
if name not in self:
raise KeyError("No glyph named '%s'." % name)
self._removeGlyph(name)
def keys(self):
"""
Get a list of all glyphs in the layer. ::
>>> layer.keys()
["B", "C", "A"]
The order of the glyphs is undefined.
"""
return self._keys()
def _keys(self, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.keys` and :meth:`BaseFont.keys`
This must return an :ref:`type-immutable-list`
of the names representing all glyphs in the layer.
The order is not defined.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def __contains__(self, name):
"""
Test if the layer contains a glyph with **name**. ::
>>> "A" in layer
True
"""
name = normalizers.normalizeGlyphName(name)
return self._contains(name)
def _contains(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__contains__` and :meth:`BaseFont.__contains__`
This must return ``bool`` indicating if the
layer has a glyph with the defined name.
**name** will be a :ref-type-string` representing
a glyph name. It will have been normalized with
:func:`normalizers.normalizeGlyphName`.
Subclasses may override this method.
"""
return name in self.keys()
def newGlyph(self, name, clear=True):
"""
Make a new glyph with **name** in the layer. ::
>>> glyph = layer.newGlyph("A")
The newly created :class:`BaseGlyph` will be returned.
If the glyph exists in the layer and clear is set to ``False``,
the existing glyph will be returned, otherwise the default
behavior is to clear the exisiting glyph.
"""
name = normalizers.normalizeGlyphName(name)
if name not in self:
glyph = self._newGlyph(name)
elif clear:
self.removeGlyph(name)
glyph = self._newGlyph(name)
else:
glyph = self._getItem(name)
self._setLayerInGlyph(glyph)
return glyph
def _newGlyph(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.newGlyph` and :meth:`BaseFont.newGlyph`
This must return an instance of a :class:`BaseGlyph` subclass.
**name** will be a :ref:`type-string` representing
a glyph name. It will have been normalized with
:func:`normalizers.normalizeGlyphName`. The
name will have been tested to make sure that
no glyph with the same name exists in the layer.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def removeGlyph(self, name):
"""
Remove the glyph with name from the layer. ::
>>> layer.removeGlyph("A")
This method is deprecated. :meth:`BaseFont.__delitem__` instead.
"""
del self[name]
def _removeGlyph(self, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.removeGlyph` and :meth:`BaseFont.removeGlyph`.
**name** will be a :ref:`type-string` representing a
glyph name of a glyph that is in the layer. It will
have been normalized with :func:`normalizers.normalizeGlyphName`.
The newly created :class:`BaseGlyph` must be returned.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def insertGlyph(self, glyph, name=None):
"""
Insert **glyph** into the layer. ::
>>> glyph = layer.insertGlyph(otherGlyph, name="A")
This method is deprecated. :meth:`BaseFont.__setitem__` instead.
"""
if name is None:
name = glyph.name
self[name] = glyph
def _insertGlyph(self, glyph, name, **kwargs):
"""
This is the environment implementation of
:meth:`BaseLayer.__setitem__` and :meth:`BaseFont.__setitem__`.
This must return an instance of a :class:`BaseGlyph` subclass.
**glyph** will be a glyph object with the attributes necessary
for copying as defined in :meth:`BaseGlyph.copy` An environment
must not insert **glyph** directly. Instead the data from
**glyph** should be copied to a new glyph instead. **name**
will be a :ref:`type-string` representing a glyph name. It
will have been normalized with :func:`normalizers.normalizeGlyphName`.
**name** will have been tested to make sure that no glyph with
the same name exists in the layer.
Subclasses may override this method.
"""
if glyph.name is None or (name != glyph.name and glyph.name in self):
glyph = glyph.copy()
glyph.name = name
dest = self.newGlyph(name, clear=kwargs.get("clear", True))
dest.copyData(glyph)
return dest
# ---------
# Selection
# ---------
selectedGlyphs = dynamicProperty(
"base_selectedGlyphs",
"""
A list of glyphs selected in the layer.
Getting selected glyph objects:
>>> for glyph in layer.selectedGlyphs:
... glyph.markColor = (1, 0, 0, 0.5)
Setting selected glyph objects:
>>> layer.selectedGlyphs = someGlyphs
"""
)
def _get_base_selectedGlyphs(self):
selected = tuple([normalizers.normalizeGlyph(glyph) for glyph in
self._get_selectedGlyphs()])
return selected
def _get_selectedGlyphs(self):
"""
Subclasses may override this method.
"""
return self._getSelectedSubObjects(self)
def _set_base_selectedGlyphs(self, value):
normalized = [normalizers.normalizeGlyph(glyph) for glyph in value]
self._set_selectedGlyphs(normalized)
def _set_selectedGlyphs(self, value):
"""
Subclasses may override this method.
"""
return self._setSelectedSubObjects(self, value)
selectedGlyphNames = dynamicProperty(
"base_selectedGlyphNames",
"""
A list of names of glyphs selected in the layer.
Getting selected glyph names:
>>> for name in layer.selectedGlyphNames:
... print(name)
Setting selected glyph names:
>>> layer.selectedGlyphNames = ["A", "B", "C"]
"""
)
def _get_base_selectedGlyphNames(self):
selected = tuple([normalizers.normalizeGlyphName(name) for name in
self._get_selectedGlyphNames()])
return selected
def _get_selectedGlyphNames(self):
"""
Subclasses may override this method.
"""
selected = [glyph.name for glyph in self.selectedGlyphs]
return selected
def _set_base_selectedGlyphNames(self, value):
normalized = [normalizers.normalizeGlyphName(name) for name in value]
self._set_selectedGlyphNames(normalized)
def _set_selectedGlyphNames(self, value):
"""
Subclasses may override this method.
"""
select = [self[name] for name in value]
self.selectedGlyphs = select
# --------------------
# Legacy Compatibility
# --------------------
has_key = __contains__
class BaseLayer(_BaseGlyphVendor, InterpolationMixin, DeprecatedLayer, RemovedLayer):
def _reprContents(self):
contents = [
"'%s'" % self.name,
]
if self.color:
contents.append("color=%r" % str(self.color))
return contents
# ----
# Copy
# ----
copyAttributes = (
"name",
"color",
"lib"
)
def copy(self):
"""
Copy the layer into a new layer that does not
belong to a font. ::
>>> copiedLayer = layer.copy()
This will copy:
* name
* color
* lib
* glyphs
"""
return super(BaseLayer, self).copy()
def copyData(self, source):
"""
Copy data from **source** into this layer.
Refer to :meth:`BaseLayer.copy` for a list
of values that will be copied.
"""
super(BaseLayer, self).copyData(source)
for name in source.keys():
glyph = self.newGlyph(name)
glyph.copyData(source[name])
# -------
# Parents
# -------
# Font
_font = None
font = dynamicProperty(
"font",
"""
The layer's parent :class:`BaseFont`. ::
>>> font = layer.font
"""
)
def _get_font(self):
if self._font is None:
return None
return self._font()
def _set_font(self, font):
if self._font is not None:
raise AssertionError("font for layer already set")
if font is not None:
font = reference(font)
self._font = font
# --------------
# Identification
# --------------
# name
name = dynamicProperty(
"base_name",
"""
The name of the layer. ::
>>> layer.name
"foreground"
>>> layer.name = "top"
"""
)
def _get_base_name(self):
value = self._get_name()
if value is not None:
value = normalizers.normalizeLayerName(value)
return value
def _set_base_name(self, value):
if value == self.name:
return
value = normalizers.normalizeLayerName(value)
font = self.font
if font is not None:
existing = self.font.layerOrder
if value in existing:
raise ValueError("A layer with the name '%s' already exists."
% value)
self._set_name(value)
def _get_name(self):
"""
This is the environment implementation of :attr:`BaseLayer.name`.
This must return a :ref:`type-string` defining the name of the
layer. If the layer is the default layer, the returned value
must be ``None``. It will be normalized with
:func:`normalizers.normalizeLayerName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_name(self, value, **kwargs):
"""
This is the environment implementation of :attr:`BaseLayer.name`.
**value** will be a :ref:`type-string` defining the name of the
layer. It will have been normalized with
:func:`normalizers.normalizeLayerName`.
No layer with the same name will exist.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# color
color = dynamicProperty(
"base_color",
"""
The layer's color. ::
>>> layer.color
None
>>> layer.color = (1, 0, 0, 0.5)
"""
)
def _get_base_color(self):
value = self._get_color()
if value is not None:
value = normalizers.normalizeColor(value)
value = Color(value)
return value
def _set_base_color(self, value):
if value is not None:
value = normalizers.normalizeColor(value)
self._set_color(value)
def _get_color(self):
"""
This is the environment implementation of :attr:`BaseLayer.color`.
This must return a :ref:`type-color` defining the
color assigned to the layer. If the layer does not
have an assigned color, the returned value must be
``None``. It will be normalized with
:func:`normalizers.normalizeColor`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_color(self, value, **kwargs):
"""
This is the environment implementation of :attr:`BaseLayer.color`.
**value** will be a :ref:`type-color` or ``None`` defining the
color to assign to the layer. It will have been normalized with
:func:`normalizers.normalizeColor`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# -----------
# Sub-Objects
# -----------
# lib
lib = dynamicProperty(
"lib",
"""
The layer's :class:`BaseLib` object. ::
>>> layer.lib["org.robofab.hello"]
"world"
"""
)
def _get_base_lib(self):
lib = self._get_lib()
lib.font = self
return lib
def _get_lib(self):
"""
This is the environment implementation of :attr:`BaseLayer.lib`.
This must return an instance of a :class:`BaseLib` subclass.
"""
self.raiseNotImplementedError()
# -----------------
# Global Operations
# -----------------
def round(self):
"""
Round all approriate data to integers. ::
>>> layer.round()
This is the equivalent of calling the round method on:
* all glyphs in the layer
"""
self._round()
def _round(self):
"""
This is the environment implementation of :meth:`BaseLayer.round`.
Subclasses may override this method.
"""
for glyph in self:
glyph.round()
def autoUnicodes(self):
"""
Use heuristics to set Unicode values in all glyphs. ::
>>> layer.autoUnicodes()
Environments will define their own heuristics for
automatically determining values.
"""
self._autoUnicodes()
def _autoUnicodes(self):
"""
This is the environment implementation of
:meth:`BaseLayer.autoUnicodes`.
Subclasses may override this method.
"""
for glyph in self:
glyph.autoUnicodes()
# -------------
# Interpolation
# -------------
def interpolate(self, factor, minLayer, maxLayer, round=True,
suppressError=True):
"""
Interpolate all possible data in the layer. ::
>>> layer.interpolate(0.5, otherLayer1, otherLayer2)
>>> layer.interpolate((0.5, 2.0), otherLayer1, otherLayer2, round=False)
The interpolation occurs on a 0 to 1.0 range where **minLayer**
is located at 0 and **maxLayer** is located at 1.0. **factor**
is the interpolation value. It may be less than 0 and greater
than 1.0. It may be a :ref:`type-int-float` or a tuple of
two :ref:`type-int-float`. If it is a tuple, the first
number indicates the x factor and the second number indicates
the y factor. **round** indicates if the result should be
rounded to integers. **suppressError** indicates if incompatible
data should be ignored or if an error should be raised when
such incompatibilities are found.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minLayer, BaseLayer):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__, minLayer.__class__.__name__))
if not isinstance(maxLayer, BaseLayer):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__, maxLayer.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minLayer, maxLayer,
round=round, suppressError=suppressError)
def _interpolate(self, factor, minLayer, maxLayer, round=True,
suppressError=True):
"""
This is the environment implementation of
:meth:`BaseLayer.interpolate`.
Subclasses may override this method.
"""
for glyphName in self.keys():
del self[glyphName]
for glyphName in minLayer.keys():
if glyphName not in maxLayer:
continue
minGlyph = minLayer[glyphName]
maxGlyph = maxLayer[glyphName]
dstGlyph = self.newGlyph(glyphName)
dstGlyph.interpolate(factor, minGlyph, maxGlyph,
round=round, suppressError=suppressError)
compatibilityReporterClass = LayerCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**. ::
>>> compat, report = self.isCompatible(otherLayer)
>>> compat
False
>>> report
A
-
[Fatal] The glyphs do not contain the same number of contours.
This will return a ``bool`` indicating if the layer is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseLayer, self).isCompatible(other, BaseLayer)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseLayer.isCompatible`.
Subclasses may override this method.
"""
layer1 = self
layer2 = other
# incompatible number of glyphs
glyphs1 = set(layer1.keys())
glyphs2 = set(layer2.keys())
if len(glyphs1) != len(glyphs2):
reporter.glyphCountDifference = True
reporter.warning = True
if len(glyphs1.difference(glyphs2)) != 0:
reporter.warning = True
reporter.glyphsMissingFromLayer2 = list(glyphs1.difference(glyphs2))
if len(glyphs2.difference(glyphs1)) != 0:
reporter.warning = True
reporter.glyphsMissingInLayer1 = list(glyphs2.difference(glyphs1))
# test glyphs
for glyphName in sorted(glyphs1.intersection(glyphs2)):
glyph1 = layer1[glyphName]
glyph2 = layer2[glyphName]
glyphCompatibility = glyph1.isCompatible(glyph2)[1]
if glyphCompatibility.fatal or glyphCompatibility.warning:
if glyphCompatibility.fatal:
reporter.fatal = True
if glyphCompatibility.warning:
reporter.warning = True
reporter.glyphs.append(glyphCompatibility)
# -------
# mapping
# -------
def getReverseComponentMapping(self):
"""
Create a dictionary of unicode -> [glyphname, ...] mappings.
All glyphs are loaded. Note that one glyph can have multiple
unicode values, and a unicode value can have multiple glyphs
pointing to it.
"""
return self._getReverseComponentMapping()
def _getReverseComponentMapping(self):
"""
This is the environment implementation of
:meth:`BaseFont.getReverseComponentMapping`.
Subclasses may override this method.
"""
self.raiseNotImplementedError()
def getCharacterMapping(self):
"""
Get a reversed map of component references in the font.
{
'A' : ['Aacute', 'Aring']
'acute' : ['Aacute']
'ring' : ['Aring']
etc.
}
"""
return self._getCharacterMapping()
def _getCharacterMapping(self):
"""
This is the environment implementation of
:meth:`BaseFont.getCharacterMapping`.
Subclasses may override this method.
"""
self.raiseNotImplementedError()
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/layer.py",
"copies": "1",
"size": "23959",
"license": "mit",
"hash": -1683322198796777700,
"line_mean": 29.7956298201,
"line_max": 85,
"alpha_frac": 0.5683876623,
"autogenerated": false,
"ratio": 4.602189781021898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5670577443321898,
"avg_score": null,
"num_lines": null
} |
from fontParts.base.errors import FontPartsError
from fontParts.base.base import (
BaseObject,
TransformationMixin,
InterpolationMixin,
SelectionMixin,
dynamicProperty,
reference
)
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedSegment, RemovedSegment
from fontParts.base.compatibility import SegmentCompatibilityReporter
class BaseSegment(
BaseObject,
TransformationMixin,
InterpolationMixin,
SelectionMixin,
DeprecatedSegment,
RemovedSegment
):
def _setPoints(self, points):
if hasattr(self, "_points"):
raise AssertionError("segment has points")
self._points = points
def _reprContents(self):
contents = [
"%s" % self.type,
]
if self.index is not None:
contents.append("index='%r'" % self.index)
return contents
# this class should not be used in hashable
# collections since it is dynamically generated.
__hash__ = None
# -------
# Parents
# -------
# Contour
_contour = None
contour = dynamicProperty("contour", "The segment's parent contour.")
def _get_contour(self):
if self._contour is None:
return None
return self._contour()
def _set_contour(self, contour):
if self._contour is not None:
raise AssertionError("contour for segment already set")
if contour is not None:
contour = reference(contour)
self._contour = contour
# Glyph
glyph = dynamicProperty("glyph", "The segment's parent glyph.")
def _get_glyph(self):
if self._contour is None:
return None
return self.contour.glyph
# Layer
layer = dynamicProperty("layer", "The segment's parent layer.")
def _get_layer(self):
if self._contour is None:
return None
return self.glyph.layer
# Font
font = dynamicProperty("font", "The segment's parent font.")
def _get_font(self):
if self._contour is None:
return None
return self.glyph.font
# --------
# Equality
# --------
def __eq__(self, other):
"""
The :meth:`BaseObject.__eq__` method can't be used here
because the :class:`BaseContour` implementation contructs
segment objects without assigning an underlying ``naked``
object. Therefore, comparisons will always fail. This
method overrides the base method and compares the
:class:`BasePoint` contained by the segment.
Subclasses may override this method.
"""
if isinstance(other, self.__class__):
return self.points == other.points
return NotImplemented
# --------------
# Identification
# --------------
index = dynamicProperty("base_index",
("The index of the segment within the ordered "
"list of the parent contour's segments.")
)
def _get_base_index(self):
if self.contour is None:
return None
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _get_index(self):
"""
Subclasses may override this method.
"""
contour = self.contour
value = contour.segments.index(self)
return value
# ----------
# Attributes
# ----------
type = dynamicProperty("base_type",
("The segment type. The possible types are "
"move, line, curve, qcurve.")
)
def _get_base_type(self):
value = self._get_type()
value = normalizers.normalizeSegmentType(value)
return value
def _set_base_type(self, value):
value = normalizers.normalizeSegmentType(value)
self._set_type(value)
def _get_type(self):
"""
Subclasses may override this method.
"""
value = self.onCurve.type
return value
def _set_type(self, newType):
"""
Subclasses may override this method.
"""
oldType = self.type
if oldType == newType:
return
contour = self.contour
if contour is None:
raise FontPartsError("The segment does not belong to a contour.")
# converting line <-> move
if newType in ("move", "line") and oldType in ("move", "line"):
pass
# converting to a move or line
elif newType not in ("curve", "qcurve"):
offCurves = self.offCurve
for point in offCurves:
contour.removePoint(point)
# converting a line/move to a curve/qcurve
else:
segments = contour.segments
i = segments.index(self)
prev = segments[i - 1].onCurve
on = self.onCurve
x = on.x
y = on.y
points = contour.points
i = points.index(on)
contour.insertPoint(i, (x, y), "offcurve")
off2 = contour.points[i]
contour.insertPoint(i, (prev.x, prev.y), "offcurve")
off1 = contour.points[i]
del self._points
self._setPoints((off1, off2, on))
self.onCurve.type = newType
smooth = dynamicProperty("base_smooth",
("Boolean indicating if the segment is "
"smooth or not.")
)
def _get_base_smooth(self):
value = self._get_smooth()
value = normalizers.normalizeBoolean(value)
return value
def _set_base_smooth(self, value):
value = normalizers.normalizeBoolean(value)
self._set_smooth(value)
def _get_smooth(self):
"""
Subclasses may override this method.
"""
return self.onCurve.smooth
def _set_smooth(self, value):
"""
Subclasses may override this method.
"""
self.onCurve.smooth = value
# ------
# Points
# ------
def __getitem__(self, index):
return self._getItem(index)
def _getItem(self, index):
"""
Subclasses may override this method.
"""
return self.points[index]
def __iter__(self):
return self._iterPoints()
def _iterPoints(self, **kwargs):
"""
Subclasses may override this method.
"""
points = self.points
count = len(points)
index = 0
while count:
yield points[index]
count -= 1
index += 1
def __len__(self):
return self._len()
def _len(self, **kwargs):
"""
Subclasses may override this method.
"""
return len(self.points)
points = dynamicProperty("base_points",
"A list of points in the segment.")
def _get_base_points(self):
return tuple(self._get_points())
def _get_points(self):
"""
Subclasses may override this method.
"""
if not hasattr(self, "_points"):
return tuple()
return tuple(self._points)
onCurve = dynamicProperty("base_onCurve",
"The on curve point in the segment.")
def _get_base_onCurve(self):
return self._get_onCurve()
def _get_onCurve(self):
"""
Subclasses may override this method.
"""
return self.points[-1]
offCurve = dynamicProperty("base_offCurve",
"The off curve points in the segment.")
def _get_base_offCurve(self):
"""
Subclasses may override this method.
"""
return self._get_offCurve()
def _get_offCurve(self):
"""
Subclasses may override this method.
"""
return self.points[:-1]
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
Subclasses may override this method.
"""
for point in self.points:
point.transformBy(matrix)
# -------------
# Interpolation
# -------------
compatibilityReporterClass = SegmentCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**. ::
>>> compatible, report = self.isCompatible(otherSegment)
>>> compatible
False
>>> compatible
[Fatal] Segment: [0] + [0]
[Fatal] Segment: [0] is line | [0] is move
[Fatal] Segment: [1] + [1]
[Fatal] Segment: [1] is line | [1] is qcurve
This will return a ``bool`` indicating if the segment is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseSegment, self).isCompatible(other, BaseSegment)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseSegment.isCompatible`.
Subclasses may override this method.
"""
segment1 = self
segment2 = other
# type
if segment1.type != segment2.type:
# line <-> curve can be converted
if set((segment1.type, segment2.type)) != set(("curve", "line")):
reporter.typeDifference = True
reporter.fatal = True
# ----
# Misc
# ----
def round(self):
"""
Round coordinates in all points.
"""
for point in self.points:
point.round()
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/segment.py",
"copies": "1",
"size": "9867",
"license": "mit",
"hash": 3677537587085465600,
"line_mean": 26.4083333333,
"line_max": 77,
"alpha_frac": 0.5356237965,
"autogenerated": false,
"ratio": 4.654245283018868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 360
} |
from fontParts.base.errors import FontPartsError
from fontParts.base.base import (
BaseObject,
TransformationMixin,
InterpolationMixin,
SelectionMixin,
IdentifierMixin,
dynamicProperty,
reference
)
from fontParts.base import normalizers
from fontParts.base.compatibility import ContourCompatibilityReporter
from fontParts.base.bPoint import absoluteBCPIn, absoluteBCPOut
from fontParts.base.deprecated import DeprecatedContour, RemovedContour
class BaseContour(
BaseObject,
TransformationMixin,
InterpolationMixin,
SelectionMixin,
IdentifierMixin,
DeprecatedContour,
RemovedContour
):
segmentClass = None
bPointClass = None
def _reprContents(self):
contents = []
if self.identifier is not None:
contents.append("identifier='%r'" % self.identifier)
if self.glyph is not None:
contents.append("in glyph")
contents += self.glyph._reprContents()
return contents
def copyData(self, source):
super(BaseContour, self).copyData(source)
for sourcePoint in source.points:
self.appendPoint((0, 0))
selfPoint = self.points[-1]
selfPoint.copyData(sourcePoint)
# -------
# Parents
# -------
# Glyph
_glyph = None
glyph = dynamicProperty("glyph",
"The contour's parent :class:`BaseGlyph`.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
if self._glyph is not None:
raise AssertionError("glyph for contour already set")
if glyph is not None:
glyph = reference(glyph)
self._glyph = glyph
# Font
font = dynamicProperty("font", "The contour's parent font.")
def _get_font(self):
if self._glyph is None:
return None
return self.glyph.font
# Layer
layer = dynamicProperty("layer", "The contour's parent layer.")
def _get_layer(self):
if self._glyph is None:
return None
return self.glyph.layer
# --------------
# Identification
# --------------
# index
index = dynamicProperty(
"base_index",
"""
The index of the contour within the parent glyph's contours.
>>> contour.index
1
>>> contour.index = 0
The value will always be a :ref:`type-int`.
"""
)
def _get_base_index(self):
glyph = self.glyph
if glyph is None:
return None
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _set_base_index(self, value):
glyph = self.glyph
if glyph is None:
raise FontPartsError("The contour does not belong to a glyph.")
value = normalizers.normalizeIndex(value)
contourCount = len(glyph.contours)
if value < 0:
value = -(value % contourCount)
if value >= contourCount:
value = contourCount
self._set_index(value)
def _get_index(self):
"""
Subclasses may override this method.
"""
glyph = self.glyph
return glyph.contours.index(self)
def _set_index(self, value):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# identifier
def getIdentifierForPoint(self, point):
"""
Create a unique identifier for and assign it to ``point``.
If the point already has an identifier, the existing
identifier will be returned.
>>> contour.getIdentifierForPoint(point)
'ILHGJlygfds'
``point`` must be a :class:`BasePoint`. The returned value
will be a :ref:`type-identifier`.
"""
point = normalizers.normalizePoint(point)
return self._getIdentifierforPoint(point)
def _getIdentifierforPoint(self, point):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# ----
# Pens
# ----
def draw(self, pen):
"""
Draw the contour's outline data to the given :ref:`type-pen`.
>>> contour.draw(pen)
"""
self._draw(pen)
def _draw(self, pen, **kwargs):
"""
Subclasses may override this method.
"""
from fontTools.ufoLib.pointPen import PointToSegmentPen
adapter = PointToSegmentPen(pen)
self.drawPoints(adapter)
def drawPoints(self, pen):
"""
Draw the contour's outline data to the given :ref:`type-point-pen`.
>>> contour.drawPoints(pointPen)
"""
self._drawPoints(pen)
def _drawPoints(self, pen, **kwargs):
"""
Subclasses may override this method.
"""
# The try: ... except TypeError: ...
# handles backwards compatibility with
# point pens that have not been upgraded
# to point pen protocol 2.
try:
pen.beginPath(self.identifier)
except TypeError:
pen.beginPath()
for point in self.points:
typ = point.type
if typ == "offcurve":
typ = None
try:
pen.addPoint(pt=(point.x, point.y), segmentType=typ,
smooth=point.smooth, name=point.name,
identifier=point.identifier)
except TypeError:
pen.addPoint(pt=(point.x, point.y), segmentType=typ,
smooth=point.smooth, name=point.name)
pen.endPath()
# ------------------
# Data normalization
# ------------------
def autoStartSegment(self):
"""
Automatically calculate and set the first segment
in this contour.
The behavior of this may vary accross environments.
"""
self._autoStartSegment()
def _autoStartSegment(self, **kwargs):
"""
Subclasses may override this method.
XXX port this from robofab
"""
self.raiseNotImplementedError()
def round(self):
"""
Round coordinates in all points to integers.
"""
self._round()
def _round(self, **kwargs):
"""
Subclasses may override this method.
"""
for point in self.points:
point.round()
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
Subclasses may override this method.
"""
for point in self.points:
point.transformBy(matrix)
# -------------
# Interpolation
# -------------
compatibilityReporterClass = ContourCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**. ::
>>> compatible, report = self.isCompatible(otherContour)
>>> compatible
False
>>> compatible
[Fatal] Contour: [0] + [0]
[Fatal] Contour: [0] contains 4 segments | [0] contains 3 segments
[Fatal] Contour: [0] is closed | [0] is open
This will return a ``bool`` indicating if the contour is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseContour, self).isCompatible(other, BaseContour)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseContour.isCompatible`.
Subclasses may override this method.
"""
contour1 = self
contour2 = other
# open/closed
if contour1.open != contour2.open:
reporter.openDifference = True
# direction
if contour1.clockwise != contour2.clockwise:
reporter.directionDifference = True
# segment count
if len(contour1) != len(contour2.segments):
reporter.segmentCountDifference = True
reporter.fatal = True
# segment pairs
for i in range(min(len(contour1), len(contour2))):
segment1 = contour1[i]
segment2 = contour2[i]
segmentCompatibility = segment1.isCompatible(segment2)[1]
if segmentCompatibility.fatal or segmentCompatibility.warning:
if segmentCompatibility.fatal:
reporter.fatal = True
if segmentCompatibility.warning:
reporter.warning = True
reporter.segments.append(segmentCompatibility)
# ----
# Open
# ----
open = dynamicProperty("base_open",
"Boolean indicating if the contour is open.")
def _get_base_open(self):
value = self._get_open()
value = normalizers.normalizeBoolean(value)
return value
def _get_open(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# ---------
# Direction
# ---------
clockwise = dynamicProperty("base_clockwise",
("Boolean indicating if the contour's "
"winding direction is clockwise."))
def _get_base_clockwise(self):
value = self._get_clockwise()
value = normalizers.normalizeBoolean(value)
return value
def _set_base_clockwise(self, value):
value = normalizers.normalizeBoolean(value)
self._set_clockwise(value)
def _get_clockwise(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_clockwise(self, value):
"""
Subclasses may override this method.
"""
if self.clockwise != value:
self.reverse()
def reverse(self):
"""
Reverse the direction of the contour.
"""
self._reverseContour()
def _reverse(self, **kwargs):
"""
Subclasses may override this method.
"""
self.raiseNotImplementedError()
# ------------------------
# Point and Contour Inside
# ------------------------
def pointInside(self, point):
"""
Determine if ``point`` is in the black or white of the contour.
>>> contour.pointInside((40, 65))
True
``point`` must be a :ref:`type-coordinate`.
"""
point = normalizers.normalizeCoordinateTuple(point)
return self._pointInside(point)
def _pointInside(self, point):
"""
Subclasses may override this method.
"""
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=None, testPoint=point, evenOdd=False)
self.draw(pen)
return pen.getResult()
def contourInside(self, otherContour):
"""
Determine if ``otherContour`` is in the black or white of this contour.
>>> contour.contourInside(otherContour)
True
``contour`` must be a :class:`BaseContour`.
"""
otherContour = normalizers.normalizeContour(otherContour)
return self._contourInside(otherContour)
def _contourInside(self, otherContour):
"""
Subclasses may override this method.
"""
self.raiseNotImplementedError()
# ---------------
# Bounds and Area
# ---------------
bounds = dynamicProperty("bounds",
("The bounds of the contour: "
"(xMin, yMin, xMax, yMax) or None."))
def _get_base_bounds(self):
value = self._get_bounds()
if value is not None:
value = normalizers.normalizeBoundingBox(value)
return value
def _get_bounds(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.boundsPen import BoundsPen
pen = BoundsPen(self.layer)
self.draw(pen)
return pen.bounds
area = dynamicProperty("area",
("The area of the contour: "
"A positive number or None."))
def _get_base_area(self):
value = self._get_area()
if value is not None:
value = normalizers.normalizeArea(value)
return value
def _get_area(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.areaPen import AreaPen
pen = AreaPen(self.layer)
self.draw(pen)
return abs(pen.value)
# --------
# Segments
# --------
# The base class implements the full segment interaction API.
# Subclasses do not need to override anything within the contour
# other than registering segmentClass. Subclasses may choose to
# implement this API independently if desired.
def _setContourInSegment(self, segment):
if segment.contour is None:
segment.contour = self
segments = dynamicProperty("segments")
def _get_segments(self):
"""
Subclasses may override this method.
"""
points = list(self.points)
segments = [[]]
lastWasOffCurve = False
firstIsMove = points[0].type == "move"
for point in points:
segments[-1].append(point)
if point.type != "offcurve":
segments.append([])
lastWasOffCurve = point.type == "offcurve"
if len(segments[-1]) == 0:
del segments[-1]
if lastWasOffCurve and firstIsMove:
# ignore trailing off curves
del segments[-1]
if lastWasOffCurve and not firstIsMove:
segment = segments.pop(-1)
segment.extend(segments[0])
del segments[0]
segments.append(segment)
if not lastWasOffCurve and not firstIsMove:
segment = segments.pop(0)
segments.append(segment)
# wrap into segments
wrapped = []
for points in segments:
s = self.segmentClass()
s._setPoints(points)
self._setContourInSegment(s)
wrapped.append(s)
return wrapped
def __getitem__(self, index):
return self.segments[index]
def __iter__(self):
return self._iterSegments()
def _iterSegments(self):
segments = self.segments
count = len(segments)
index = 0
while count:
yield segments[index]
count -= 1
index += 1
def __len__(self):
return self._len__segments()
def _len__segments(self, **kwargs):
"""
Subclasses may override this method.
"""
return len(self.segments)
def appendSegment(self, type=None, points=None, smooth=False, segment=None):
"""
Append a segment to the contour.
"""
if segment is not None:
if type is not None:
type = segment.type
if points is None:
points = [(point.x, point.y) for point in segment.points]
smooth = segment.smooth
type = normalizers.normalizeSegmentType(type)
pts = []
for pt in points:
pt = normalizers.normalizeCoordinateTuple(pt)
pts.append(pt)
points = pts
smooth = normalizers.normalizeBoolean(smooth)
self._appendSegment(type=type, points=points, smooth=smooth)
def _appendSegment(self, type=None, points=None, smooth=False, **kwargs):
"""
Subclasses may override this method.
"""
self._insertSegment(len(self), type=type, points=points,
smooth=smooth, **kwargs)
def insertSegment(self, index, type=None, points=None, smooth=False, segment=None):
"""
Insert a segment into the contour.
"""
if segment is not None:
if type is not None:
type = segment.type
if points is None:
points = [(point.x, point.y) for point in segment.points]
smooth = segment.smooth
index = normalizers.normalizeIndex(index)
type = normalizers.normalizeSegmentType(type)
pts = []
for pt in points:
pt = normalizers.normalizeCoordinateTuple(pt)
pts.append(pt)
points = pts
smooth = normalizers.normalizeBoolean(smooth)
self._insertSegment(index=index, type=type,
points=points, smooth=smooth)
def _insertSegment(self, index=None, type=None, points=None,
smooth=False, **kwargs):
"""
Subclasses may override this method.
"""
onCurve = points[-1]
offCurve = points[:-1]
segments = self.segments
ptCount = sum([len(segments[s].points) for s in range(index)]) + 1
self.insertPoint(ptCount, onCurve, type=type, smooth=smooth)
for offCurvePoint in reversed(offCurve):
self.insertPoint(ptCount, offCurvePoint, type="offcurve")
def removeSegment(self, segment, preserveCurve=False):
"""
Remove segment from the contour.
If ``preserveCurve`` is set to ``True`` an attempt
will be made to preserve the shape of the curve
if the environment supports that functionality.
"""
if not isinstance(segment, int):
segment = self.segments.index(segment)
segment = normalizers.normalizeIndex(segment)
if segment >= self._len__segments():
raise ValueError("No segment located at index %d." % segment)
preserveCurve = normalizers.normalizeBoolean(preserveCurve)
self._removeSegment(segment, preserveCurve)
def _removeSegment(self, segment, preserveCurve, **kwargs):
"""
segment will be a valid segment index.
preserveCurve will be a boolean.
Subclasses may override this method.
"""
segment = self.segments[segment]
for point in segment.points:
self.removePoint(point, preserveCurve)
def setStartSegment(self, segment):
"""
Set the first segment on the contour.
segment can be a segment object or an index.
"""
segments = self.segments
if not isinstance(segment, int):
segmentIndex = segments.index(segment)
else:
segmentIndex = segment
if len(self.segments) < 2:
return
if segmentIndex == 0:
return
if segmentIndex >= len(segments):
raise ValueError(("The contour does not contain a segment "
"at index %d" % segmentIndex))
self._setStartSegment(segmentIndex)
def _setStartSegment(self, segmentIndex, **kwargs):
"""
Subclasses may override this method.
"""
segments = self.segments
oldStart = segments[-1]
oldLast = segments[0]
# If the contour ends with a curve on top of a move,
# delete the move.
if oldLast.type == "curve" or oldLast.type == "qcurve":
startOn = oldStart.onCurve
lastOn = oldLast.onCurve
if startOn.x == lastOn.x and startOn.y == lastOn.y:
self.removeSegment(0)
# Shift new the start index.
segmentIndex = segmentIndex - 1
segments = self.segments
# If the first point is a move, convert it to a line.
if segments[0].type == "move":
segments[0].type = "line"
# Reorder the points internally.
segments = segments[segmentIndex - 1:] + segments[:segmentIndex - 1]
points = []
for segment in segments:
for point in segment:
points.append(((point.x, point.y), point.type,
point.smooth, point.name, point.identifier))
# Clear the points.
for point in self.points:
self.removePoint(point)
# Add the points.
for point in points:
position, type, smooth, name, identifier = point
self.appendPoint(
position,
type=type,
smooth=smooth,
name=name,
identifier=identifier
)
# -------
# bPoints
# -------
bPoints = dynamicProperty("bPoints")
def _get_bPoints(self):
bPoints = []
for point in self.points:
if point.type not in ("move", "line", "curve"):
continue
bPoint = self.bPointClass()
bPoint.contour = self
bPoint._setPoint(point)
bPoints.append(bPoint)
return tuple(bPoints)
def appendBPoint(self, type=None, anchor=None, bcpIn=None, bcpOut=None, bPoint=None):
"""
Append a bPoint to the contour.
"""
if bPoint is not None:
if type is None:
type = bPoint.type
if anchor is None:
anchor = bPoint.anchor
if bcpIn is None:
bcpIn = bPoint.bcpIn
if bcpOut is None:
bcpOut = bPoint.bcpOut
type = normalizers.normalizeBPointType(type)
anchor = normalizers.normalizeCoordinateTuple(anchor)
if bcpIn is None:
bcpIn = (0, 0)
bcpIn = normalizers.normalizeCoordinateTuple(bcpIn)
if bcpOut is None:
bcpOut = (0, 0)
bcpOut = normalizers.normalizeCoordinateTuple(bcpOut)
self._appendBPoint(type, anchor, bcpIn=bcpIn, bcpOut=bcpOut)
def _appendBPoint(self, type, anchor, bcpIn=None, bcpOut=None, **kwargs):
"""
Subclasses may override this method.
"""
self.insertBPoint(
len(self.bPoints),
type,
anchor,
bcpIn=bcpIn,
bcpOut=bcpOut
)
def insertBPoint(self, index, type=None, anchor=None, bcpIn=None, bcpOut=None, bPoint=None):
"""
Insert a bPoint at index in the contour.
"""
if bPoint is not None:
if type is None:
type = bPoint.type
if anchor is None:
anchor = bPoint.anchor
if bcpIn is None:
bcpIn = bPoint.bcpIn
if bcpOut is None:
bcpOut = bPoint.bcpOut
index = normalizers.normalizeIndex(index)
type = normalizers.normalizeBPointType(type)
anchor = normalizers.normalizeCoordinateTuple(anchor)
if bcpIn is None:
bcpIn = (0, 0)
bcpIn = normalizers.normalizeCoordinateTuple(bcpIn)
if bcpOut is None:
bcpOut = (0, 0)
bcpOut = normalizers.normalizeCoordinateTuple(bcpOut)
self._insertBPoint(index=index, type=type, anchor=anchor,
bcpIn=bcpIn, bcpOut=bcpOut)
def _insertBPoint(self, index, type, anchor, bcpIn, bcpOut, **kwargs):
"""
Subclasses may override this method.
"""
# insert a simple line segment at the given anchor
# look it up as a bPoint and change the bcpIn and bcpOut there
# this avoids code duplication
self._insertSegment(index=index, type="line",
points=[anchor], smooth=False)
bPoints = self.bPoints
index += 1
if index >= len(bPoints):
# its an append instead of an insert
# so take the last bPoint
index = -1
bPoint = bPoints[index]
bPoint.bcpIn = bcpIn
bPoint.bcpOut = bcpOut
bPoint.type = type
def removeBPoint(self, bPoint):
"""
Remove the bpoint from the contour.
bpoint can be a point object or an index.
"""
if not isinstance(bPoint, int):
bPoint = bPoint.index
bPoint = normalizers.normalizeIndex(bPoint)
if bPoint >= self._len__points():
raise ValueError("No bPoint located at index %d." % bPoint)
self._removeBPoint(bPoint)
def _removeBPoint(self, index, **kwargs):
"""
index will be a valid index.
Subclasses may override this method.
"""
bPoint = self.bPoints[index]
nextSegment = bPoint._nextSegment
offCurves = nextSegment.offCurve
if offCurves:
offCurve = offCurves[0]
self.removePoint(offCurve)
segment = bPoint._segment
offCurves = segment.offCurve
if offCurves:
offCurve = offCurves[-1]
self.removePoint(offCurve)
self.removePoint(bPoint._point)
# ------
# Points
# ------
def _setContourInPoint(self, point):
if point.contour is None:
point.contour = self
points = dynamicProperty("points")
def _get_points(self):
"""
Subclasses may override this method.
"""
return tuple([self._getitem__points(i)
for i in range(self._len__points())])
def _len__points(self):
return self._lenPoints()
def _lenPoints(self, **kwargs):
"""
This must return an integer indicating
the number of points in the contour.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _getitem__points(self, index):
index = normalizers.normalizeIndex(index)
if index >= self._len__points():
raise ValueError("No point located at index %d." % index)
point = self._getPoint(index)
self._setContourInPoint(point)
return point
def _getPoint(self, index, **kwargs):
"""
This must return a wrapped point.
index will be a valid index.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _getPointIndex(self, point):
for i, other in enumerate(self.points):
if point == other:
return i
raise FontPartsError("The point could not be found.")
def appendPoint(self, position=None, type="line", smooth=False, name=None, identifier=None, point=None):
"""
Append a point to the contour.
"""
if point is not None:
if position is None:
position = point.position
type = point.type
smooth = point.smooth
if name is None:
name = point.name
if identifier is not None:
identifier = point.identifier
self.insertPoint(
len(self.points),
position=position,
type=type,
smooth=smooth,
name=name,
identifier=identifier
)
def insertPoint(self, index, position=None, type="line", smooth=False, name=None, identifier=None, point=None):
"""
Insert a point into the contour.
"""
if point is not None:
if position is None:
position = point.position
type = point.type
smooth = point.smooth
if name is None:
name = point.name
if identifier is not None:
identifier = point.identifier
index = normalizers.normalizeIndex(index)
position = normalizers.normalizeCoordinateTuple(position)
type = normalizers.normalizePointType(type)
smooth = normalizers.normalizeBoolean(smooth)
if name is not None:
name = normalizers.normalizePointName(name)
if identifier is not None:
identifier = normalizers.normalizeIdentifier(identifier)
self._insertPoint(
index,
position=position,
type=type,
smooth=smooth,
name=name,
identifier=identifier
)
def _insertPoint(self, index, position, type="line",
smooth=False, name=None, identifier=None, **kwargs):
"""
position will be a valid position (x, y).
type will be a valid type.
smooth will be a valid boolean.
name will be a valid name or None.
identifier will be a valid identifier or None.
The identifier will not have been tested for uniqueness.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def removePoint(self, point, preserveCurve=False):
"""
Remove the point from the contour.
point can be a point object or an index.
If ``preserveCurve`` is set to ``True`` an attempt
will be made to preserve the shape of the curve
if the environment supports that functionality.
"""
if not isinstance(point, int):
point = self.points.index(point)
point = normalizers.normalizeIndex(point)
if point >= self._len__points():
raise ValueError("No point located at index %d." % point)
preserveCurve = normalizers.normalizeBoolean(preserveCurve)
self._removePoint(point, preserveCurve)
def _removePoint(self, index, preserveCurve, **kwargs):
"""
index will be a valid index. preserveCurve will be a boolean.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# ---------
# Selection
# ---------
# segments
selectedSegments = dynamicProperty(
"base_selectedSegments",
"""
A list of segments selected in the contour.
Getting selected segment objects:
>>> for segment in contour.selectedSegments:
... segment.move((10, 20))
Setting selected segment objects:
>>> contour.selectedSegments = someSegments
Setting also supports segment indexes:
>>> contour.selectedSegments = [0, 2]
"""
)
def _get_base_selectedSegments(self):
selected = tuple([normalizers.normalizeSegment(segment)
for segment in self._get_selectedSegments()])
return selected
def _get_selectedSegments(self):
"""
Subclasses may override this method.
"""
return self._getSelectedSubObjects(self.segments)
def _set_base_selectedSegments(self, value):
normalized = []
for i in value:
if isinstance(i, int):
i = normalizers.normalizeSegmentIndex(i)
else:
i = normalizers.normalizeSegment(i)
normalized.append(i)
self._set_selectedSegments(normalized)
def _set_selectedSegments(self, value):
"""
Subclasses may override this method.
"""
return self._setSelectedSubObjects(self.segments, value)
# points
selectedPoints = dynamicProperty(
"base_selectedPoints",
"""
A list of points selected in the contour.
Getting selected point objects:
>>> for point in contour.selectedPoints:
... point.move((10, 20))
Setting selected point objects:
>>> contour.selectedPoints = somePoints
Setting also supports point indexes:
>>> contour.selectedPoints = [0, 2]
"""
)
def _get_base_selectedPoints(self):
selected = tuple([normalizers.normalizePoint(point)
for point in self._get_selectedPoints()])
return selected
def _get_selectedPoints(self):
"""
Subclasses may override this method.
"""
return self._getSelectedSubObjects(self.points)
def _set_base_selectedPoints(self, value):
normalized = []
for i in value:
if isinstance(i, int):
i = normalizers.normalizePointIndex(i)
else:
i = normalizers.normalizePoint(i)
normalized.append(i)
self._set_selectedPoints(normalized)
def _set_selectedPoints(self, value):
"""
Subclasses may override this method.
"""
return self._setSelectedSubObjects(self.points, value)
# bPoints
selectedBPoints = dynamicProperty(
"base_selectedBPoints",
"""
A list of bPoints selected in the contour.
Getting selected bPoint objects:
>>> for bPoint in contour.selectedBPoints:
... bPoint.move((10, 20))
Setting selected bPoint objects:
>>> contour.selectedBPoints = someBPoints
Setting also supports bPoint indexes:
>>> contour.selectedBPoints = [0, 2]
"""
)
def _get_base_selectedBPoints(self):
selected = tuple([normalizers.normalizeBPoint(bPoint)
for bPoint in self._get_selectedBPoints()])
return selected
def _get_selectedBPoints(self):
"""
Subclasses may override this method.
"""
return self._getSelectedSubObjects(self.bPoints)
def _set_base_selectedBPoints(self, value):
normalized = []
for i in value:
if isinstance(i, int):
i = normalizers.normalizeBPointIndex(i)
else:
i = normalizers.normalizeBPoint(i)
normalized.append(i)
self._set_selectedBPoints(normalized)
def _set_selectedBPoints(self, value):
"""
Subclasses may override this method.
"""
return self._setSelectedSubObjects(self.bPoints, value)
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/contour.py",
"copies": "1",
"size": "33847",
"license": "mit",
"hash": -323010208646112060,
"line_mean": 29.6863100635,
"line_max": 115,
"alpha_frac": 0.5626791148,
"autogenerated": false,
"ratio": 4.547494289936854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005704869122337854,
"num_lines": 1103
} |
from fontParts.world import _EnvironmentDispatcher
def AskString(message, value='', title='FontParts'):
"""
An ask a string dialog, a `message` is required.
Optionally a `value` and `title` can be provided.
::
from fontParts.ui import AskString
print(AskString("who are you?"))
"""
return dispatcher["AskString"](message=message, value=value, title=title)
def AskYesNoCancel(message, title='FontParts', default=0, informativeText=""):
"""
An ask yes, no or cancel dialog, a `message` is required.
Optionally a `title`, `default` and `informativeText` can be provided.
The `default` option is to indicate which button is the default button.
::
from fontParts.ui import AskYesNoCancel
print(AskYesNoCancel("who are you?"))
"""
return dispatcher["AskYesNoCancel"](message=message, title=title,
default=default, informativeText=informativeText)
def FindGlyph(aFont, message="Search for a glyph:", title='FontParts'):
"""
A dialog to search a glyph for a provided font.
Optionally a `message`, `title` and `allFonts` can be provided.
from fontParts.ui import FindGlyph
from fontParts.world import CurrentFont
glyph = FindGlyph(CurrentFont())
print(glyph)
"""
return dispatcher["FindGlyph"](aFont=aFont, message=message, title=title)
def GetFile(message=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None):
"""
An get file dialog.
Optionally a `message`, `title`, `directory`, `fileName` and
`allowsMultipleSelection` can be provided.
::
from fontParts.ui import GetFile
print(GetFile())
"""
return dispatcher["GetFile"](message=message, title=title, directory=directory,
fileName=fileName,
allowsMultipleSelection=allowsMultipleSelection,
fileTypes=fileTypes)
def GetFileOrFolder(message=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None):
"""
An get file or folder dialog.
Optionally a `message`, `title`, `directory`, `fileName`,
`allowsMultipleSelection` and `fileTypes` can be provided.
::
from fontParts.ui import GetFileOrFolder
print(GetFileOrFolder())
"""
return dispatcher["GetFileOrFolder"](message=message, title=title,
directory=directory, fileName=fileName,
allowsMultipleSelection=allowsMultipleSelection,
fileTypes=fileTypes)
def Message(message, title='FontParts', informativeText=""):
"""
An message dialog.
Optionally a `message`, `title` and `informativeText` can be provided.
::
from fontParts.ui import Message
print(Message("This is a message"))
"""
return dispatcher["Message"](message=message, title=title,
informativeText=informativeText)
def PutFile(message=None, fileName=None):
"""
An put file dialog.
Optionally a `message` and `fileName` can be provided.
::
from fontParts.ui import PutFile
print(PutFile())
"""
return dispatcher["PutFile"](message=message, fileName=fileName)
def SearchList(items, message="Select an item:", title='FontParts'):
"""
A dialgo to search a given list.
Optionally a `message`, `title` and `allFonts` can be provided.
::
from fontParts.ui import SearchList
result = SearchList(["a", "b", "c"])
print(result)
"""
return dispatcher["SearchList"](items=items, message=message, title=title)
def SelectFont(message="Select a font:", title='FontParts', allFonts=None):
"""
Select a font from all open fonts.
Optionally a `message`, `title` and `allFonts` can be provided.
If `allFonts` is `None` it will list all open fonts.
::
from fontParts.ui import SelectFont
font = SelectFont()
print(font)
"""
return dispatcher["SelectFont"](message=message, title=title, allFonts=allFonts)
def SelectGlyph(aFont, message="Select a glyph:", title='FontParts'):
"""
Select a glyph for a given font.
Optionally a `message` and `title` can be provided.
::
from fontParts.ui import SelectGlyph
font = CurrentFont()
glyph = SelectGlyph(font)
print(glyph)
"""
return dispatcher["SelectGlyph"](aFont=aFont, message=message, title=title)
def ProgressBar(title="RoboFab...", ticks=None, label=""):
"""
A progess bar dialog.
Optionally a `title`, `ticks` and `label` can be provided.
::
from fontParts.ui import ProgressBar
bar = ProgressBar()
# do something
bar.close()
"""
return dispatcher["ProgressBar"](title=title, ticks=ticks, label=label)
# ----------
# Dispatcher
# ----------
dispatcher = _EnvironmentDispatcher([
"AskString",
"AskYesNoCancel",
"FindGlyph",
"GetFile",
"GetFolder",
"GetFileOrFolder",
"Message",
"OneList",
"PutFile",
"SearchList",
"SelectFont",
"SelectGlyph",
"ProgressBar",
])
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/ui.py",
"copies": "1",
"size": "5377",
"license": "mit",
"hash": 7642763138063346000,
"line_mean": 26.1565656566,
"line_max": 89,
"alpha_frac": 0.6215361726,
"autogenerated": false,
"ratio": 4.257323832145685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5378860004745685,
"avg_score": null,
"num_lines": null
} |
from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound
from fontTools.feaLib.location import FeatureLibLocation
import re
import os
class Lexer(object):
NUMBER = "NUMBER"
HEXADECIMAL = "HEXADECIMAL"
OCTAL = "OCTAL"
NUMBERS = (NUMBER, HEXADECIMAL, OCTAL)
FLOAT = "FLOAT"
STRING = "STRING"
NAME = "NAME"
FILENAME = "FILENAME"
GLYPHCLASS = "GLYPHCLASS"
CID = "CID"
SYMBOL = "SYMBOL"
COMMENT = "COMMENT"
NEWLINE = "NEWLINE"
ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
CHAR_DIGIT_ = "0123456789"
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-"
RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$")
MODE_NORMAL_ = "NORMAL"
MODE_FILENAME_ = "FILENAME"
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
self.mode_ = Lexer.MODE_NORMAL_
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type != Lexer.NEWLINE:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return FeatureLibLocation(self.filename_ or "<features>", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += 2 if next_char == "\n" else 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "#":
self.scan_until_(Lexer.CHAR_NEWLINE_)
return (Lexer.COMMENT, text[start : self.pos_], location)
if self.mode_ is Lexer.MODE_FILENAME_:
if cur_char != "(":
raise FeatureLibError("Expected '(' before file name", location)
self.scan_until_(")")
cur_char = text[self.pos_] if self.pos_ < limit else None
if cur_char != ")":
raise FeatureLibError("Expected ')' after file name", location)
self.pos_ += 1
self.mode_ = Lexer.MODE_NORMAL_
return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location)
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location)
if cur_char == "@":
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
glyphclass = text[start + 1 : self.pos_]
if len(glyphclass) < 1:
raise FeatureLibError("Expected glyph class name", location)
if len(glyphclass) > 63:
raise FeatureLibError(
"Glyph class names must not be longer than 63 characters", location
)
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
raise FeatureLibError(
"Glyph class names must consist of letters, digits, "
"underscore, period or hyphen",
location,
)
return (Lexer.GLYPHCLASS, glyphclass, location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start : self.pos_]
if token == "include":
self.mode_ = Lexer.MODE_FILENAME_
return (Lexer.NAME, token, location)
if cur_char == "0" and next_char in "xX":
self.pos_ += 2
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location)
if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.OCTAL, int(text[start : self.pos_], 8), location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
if self.pos_ >= limit or text[self.pos_] != ".":
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
self.scan_over_(".")
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
if cur_char in Lexer.CHAR_SYMBOL_:
self.pos_ += 1
return (Lexer.SYMBOL, cur_char, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
# strip newlines embedded within a string
string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1])
return (Lexer.STRING, string, location)
else:
raise FeatureLibError("Expected '\"' to terminate string", location)
raise FeatureLibError("Unexpected character: %r" % cur_char, location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
def scan_anonymous_block(self, tag):
location = self.location_()
tag = tag.strip()
self.scan_until_(Lexer.CHAR_NEWLINE_)
self.scan_over_(Lexer.CHAR_NEWLINE_)
regexp = r"}\s*" + tag + r"\s*;"
split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1)
if len(split) != 2:
raise FeatureLibError(
"Expected '} %s;' to terminate anonymous block" % tag, location
)
self.pos_ += len(split[0])
return (Lexer.ANONYMOUS_BLOCK, split[0], location)
class IncludingLexer(object):
"""A Lexer that follows include statements.
The OpenType feature file specification states that due to
historical reasons, relative imports should be resolved in this
order:
1. If the source font is UFO format, then relative to the UFO's
font directory
2. relative to the top-level include file
3. relative to the parent include file
We only support 1 (via includeDir) and 2.
"""
def __init__(self, featurefile, *, includeDir=None):
"""Initializes an IncludingLexer.
Behavior:
If includeDir is passed, it will be used to determine the top-level
include directory to use for all encountered include statements. If it is
not passed, ``os.path.dirname(featurefile)`` will be considered the
include directory.
"""
self.lexers_ = [self.make_lexer_(featurefile)]
self.featurefilepath = self.lexers_[0].filename_
self.includeDir = includeDir
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while self.lexers_:
lexer = self.lexers_[-1]
try:
token_type, token, location = next(lexer)
except StopIteration:
self.lexers_.pop()
continue
if token_type is Lexer.NAME and token == "include":
fname_type, fname_token, fname_location = lexer.next()
if fname_type is not Lexer.FILENAME:
raise FeatureLibError("Expected file name", fname_location)
# semi_type, semi_token, semi_location = lexer.next()
# if semi_type is not Lexer.SYMBOL or semi_token != ";":
# raise FeatureLibError("Expected ';'", semi_location)
if os.path.isabs(fname_token):
path = fname_token
else:
if self.includeDir is not None:
curpath = self.includeDir
elif self.featurefilepath is not None:
curpath = os.path.dirname(self.featurefilepath)
else:
# if the IncludingLexer was initialized from an in-memory
# file-like stream, it doesn't have a 'name' pointing to
# its filesystem path, therefore we fall back to using the
# current working directory to resolve relative includes
curpath = os.getcwd()
path = os.path.join(curpath, fname_token)
if len(self.lexers_) >= 5:
raise FeatureLibError("Too many recursive includes", fname_location)
try:
self.lexers_.append(self.make_lexer_(path))
except FileNotFoundError as err:
raise IncludedFeaNotFound(fname_token, fname_location) from err
else:
return (token_type, token, location)
raise StopIteration()
@staticmethod
def make_lexer_(file_or_path):
if hasattr(file_or_path, "read"):
fileobj, closing = file_or_path, False
else:
filename, closing = file_or_path, True
fileobj = open(filename, "r", encoding="utf-8")
data = fileobj.read()
filename = getattr(fileobj, "name", None)
if closing:
fileobj.close()
return Lexer(data, filename)
def scan_anonymous_block(self, tag):
return self.lexers_[-1].scan_anonymous_block(tag)
class NonIncludingLexer(IncludingLexer):
"""Lexer that does not follow `include` statements, emits them as-is."""
def __next__(self): # Python 3
return next(self.lexers_[0])
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.py",
"copies": "5",
"size": "11139",
"license": "apache-2.0",
"hash": -843143872667458400,
"line_mean": 38.0842105263,
"line_max": 88,
"alpha_frac": 0.5454708681,
"autogenerated": false,
"ratio": 3.999640933572711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7045111801672711,
"avg_score": null,
"num_lines": null
} |
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer
from fontTools.misc.encodingTools import getEncoding
from fontTools.misc.py23 import bytechr, tobytes, tostr
import fontTools.feaLib.ast as ast
import logging
import os
import re
log = logging.getLogger(__name__)
class Parser(object):
"""Initializes a Parser object.
Example:
.. code:: python
from fontTools.feaLib.parser import Parser
parser = Parser(file, font.getReverseGlyphMap())
parsetree = parser.parse()
Note: the ``glyphNames`` iterable serves a double role to help distinguish
glyph names from ranges in the presence of hyphens and to ensure that glyph
names referenced in a feature file are actually part of a font's glyph set.
If the iterable is left empty, no glyph name in glyph set checking takes
place, and all glyph tokens containing hyphens are treated as literal glyph
names, not as ranges. (Adding a space around the hyphen can, in any case,
help to disambiguate ranges from glyph names containing hyphens.)
By default, the parser will follow ``include()`` statements in the feature
file. To turn this off, pass ``followIncludes=False``. Pass a directory string as
``includeDir`` to explicitly declare a directory to search included feature files
in.
"""
extensions = {}
ast = ast
SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)}
CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)}
def __init__(
self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs
):
if "glyphMap" in kwargs:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead")
if glyphNames:
raise TypeError(
"'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive"
)
glyphNames = kwargs.pop("glyphMap")
if kwargs:
raise TypeError(
"unsupported keyword argument%s: %s"
% ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs))
)
self.glyphNames_ = set(glyphNames)
self.doc_ = self.ast.FeatureFile()
self.anchors_ = SymbolTable()
self.glyphclasses_ = SymbolTable()
self.lookups_ = SymbolTable()
self.valuerecords_ = SymbolTable()
self.symbol_tables_ = {self.anchors_, self.valuerecords_}
self.next_token_type_, self.next_token_ = (None, None)
self.cur_comments_ = []
self.next_token_location_ = None
lexerClass = IncludingLexer if followIncludes else NonIncludingLexer
self.lexer_ = lexerClass(featurefile, includeDir=includeDir)
self.advance_lexer_(comments=True)
def parse(self):
"""Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile`
object representing the root of the abstract syntax tree containing the
parsed contents of the file."""
statements = self.doc_.statements
while self.next_token_type_ is not None or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("include"):
statements.append(self.parse_include_())
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_(("anon", "anonymous")):
statements.append(self.parse_anonymous_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_("languagesystem"):
statements.append(self.parse_languagesystem_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical=False))
elif self.is_cur_keyword_("markClass"):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_block_())
elif self.is_cur_keyword_("table"):
statements.append(self.parse_table_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(self.parse_valuerecord_definition_(vertical=False))
elif (
self.cur_token_type_ is Lexer.NAME
and self.cur_token_ in self.extensions
):
statements.append(self.extensions[self.cur_token_](self))
elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected feature, languagesystem, lookup, markClass, "
'table, or glyph class definition, got {} "{}"'.format(
self.cur_token_type_, self.cur_token_
),
self.cur_token_location_,
)
return self.doc_
def parse_anchor_(self):
# Parses an anchor in any of the four formats given in the feature
# file specification (2.e.vii).
self.expect_symbol_("<")
self.expect_keyword_("anchor")
location = self.cur_token_location_
if self.next_token_ == "NULL": # Format D
self.expect_keyword_("NULL")
self.expect_symbol_(">")
return None
if self.next_token_type_ == Lexer.NAME: # Format E
name = self.expect_name_()
anchordef = self.anchors_.resolve(name)
if anchordef is None:
raise FeatureLibError(
'Unknown anchor "%s"' % name, self.cur_token_location_
)
self.expect_symbol_(">")
return self.ast.Anchor(
anchordef.x,
anchordef.y,
name=name,
contourpoint=anchordef.contourpoint,
xDeviceTable=None,
yDeviceTable=None,
location=location,
)
x, y = self.expect_number_(), self.expect_number_()
contourpoint = None
if self.next_token_ == "contourpoint": # Format B
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
if self.next_token_ == "<": # Format C
xDeviceTable = self.parse_device_()
yDeviceTable = self.parse_device_()
else:
xDeviceTable, yDeviceTable = None, None
self.expect_symbol_(">")
return self.ast.Anchor(
x,
y,
name=None,
contourpoint=contourpoint,
xDeviceTable=xDeviceTable,
yDeviceTable=yDeviceTable,
location=location,
)
def parse_anchor_marks_(self):
# Parses a sequence of ``[<anchor> mark @MARKCLASS]*.``
anchorMarks = [] # [(self.ast.Anchor, markClassName)*]
while self.next_token_ == "<":
anchor = self.parse_anchor_()
if anchor is None and self.next_token_ != "mark":
continue # <anchor NULL> without mark, eg. in GPOS type 5
self.expect_keyword_("mark")
markClass = self.expect_markClass_reference_()
anchorMarks.append((anchor, markClass))
return anchorMarks
def parse_anchordef_(self):
# Parses a named anchor definition (`section 2.e.viii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.vii>`_).
assert self.is_cur_keyword_("anchorDef")
location = self.cur_token_location_
x, y = self.expect_number_(), self.expect_number_()
contourpoint = None
if self.next_token_ == "contourpoint":
self.expect_keyword_("contourpoint")
contourpoint = self.expect_number_()
name = self.expect_name_()
self.expect_symbol_(";")
anchordef = self.ast.AnchorDefinition(
name, x, y, contourpoint=contourpoint, location=location
)
self.anchors_.define(name, anchordef)
return anchordef
def parse_anonymous_(self):
# Parses an anonymous data block (`section 10 <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#10>`_).
assert self.is_cur_keyword_(("anon", "anonymous"))
tag = self.expect_tag_()
_, content, location = self.lexer_.scan_anonymous_block(tag)
self.advance_lexer_()
self.expect_symbol_("}")
end_tag = self.expect_tag_()
assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()"
self.expect_symbol_(";")
return self.ast.AnonymousBlock(tag, content, location=location)
def parse_attach_(self):
# Parses a GDEF Attach statement (`section 9.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.b>`_)
assert self.is_cur_keyword_("Attach")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
contourPoints = {self.expect_number_()}
while self.next_token_ != ";":
contourPoints.add(self.expect_number_())
self.expect_symbol_(";")
return self.ast.AttachStatement(glyphs, contourPoints, location=location)
def parse_enumerate_(self, vertical):
# Parse an enumerated pair positioning rule (`section 6.b.ii <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_).
assert self.cur_token_ in {"enumerate", "enum"}
self.advance_lexer_()
return self.parse_position_(enumerated=True, vertical=vertical)
def parse_GlyphClassDef_(self):
# Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'
assert self.is_cur_keyword_("GlyphClassDef")
location = self.cur_token_location_
if self.next_token_ != ",":
baseGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
baseGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ",":
ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
ligatureGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ",":
markGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
markGlyphs = None
self.expect_symbol_(",")
if self.next_token_ != ";":
componentGlyphs = self.parse_glyphclass_(accept_glyphname=False)
else:
componentGlyphs = None
self.expect_symbol_(";")
return self.ast.GlyphClassDefStatement(
baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location
)
def parse_glyphclass_definition_(self):
# Parses glyph class definitions such as '@UPPERCASE = [A-Z];'
location, name = self.cur_token_location_, self.cur_token_
self.expect_symbol_("=")
glyphs = self.parse_glyphclass_(accept_glyphname=False)
self.expect_symbol_(";")
glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location)
self.glyphclasses_.define(name, glyphclass)
return glyphclass
def split_glyph_range_(self, name, location):
# Since v1.20, the OpenType Feature File specification allows
# for dashes in glyph names. A sequence like "a-b-c-d" could
# therefore mean a single glyph whose name happens to be
# "a-b-c-d", or it could mean a range from glyph "a" to glyph
# "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a
# range from glyph "a-b-c" to glyph "d".Technically, this
# example could be resolved because the (pretty complex)
# definition of glyph ranges renders most of these splits
# invalid. But the specification does not say that a compiler
# should try to apply such fancy heuristics. To encourage
# unambiguous feature files, we therefore try all possible
# splits and reject the feature file if there are multiple
# splits possible. It is intentional that we don't just emit a
# warning; warnings tend to get ignored. To fix the problem,
# font designers can trivially add spaces around the intended
# split point, and we emit a compiler error that suggests
# how exactly the source should be rewritten to make things
# unambiguous.
parts = name.split("-")
solutions = []
for i in range(len(parts)):
start, limit = "-".join(parts[0:i]), "-".join(parts[i:])
if start in self.glyphNames_ and limit in self.glyphNames_:
solutions.append((start, limit))
if len(solutions) == 1:
start, limit = solutions[0]
return start, limit
elif len(solutions) == 0:
raise FeatureLibError(
'"%s" is not a glyph in the font, and it can not be split '
"into a range of known glyphs" % name,
location,
)
else:
ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions])
raise FeatureLibError(
'Ambiguous glyph range "%s"; '
"please use %s to clarify what you mean" % (name, ranges),
location,
)
def parse_glyphclass_(self, accept_glyphname, accept_null=False):
# Parses a glyph class, either named or anonymous, or (if
# ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then
# also accept the special NULL glyph.
if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID):
if accept_null and self.next_token_ == "NULL":
# If you want a glyph called NULL, you should escape it.
self.advance_lexer_()
return self.ast.NullGlyph(location=self.cur_token_location_)
glyph = self.expect_glyph_()
self.check_glyph_name_in_glyph_set(glyph)
return self.ast.GlyphName(glyph, location=self.cur_token_location_)
if self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_,
)
if isinstance(gc, self.ast.MarkClass):
return self.ast.MarkClassName(gc, location=self.cur_token_location_)
else:
return self.ast.GlyphClassName(gc, location=self.cur_token_location_)
self.expect_symbol_("[")
location = self.cur_token_location_
glyphs = self.ast.GlyphClass(location=location)
while self.next_token_ != "]":
if self.next_token_type_ is Lexer.NAME:
glyph = self.expect_glyph_()
location = self.cur_token_location_
if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_:
start, limit = self.split_glyph_range_(glyph, location)
self.check_glyph_name_in_glyph_set(start, limit)
glyphs.add_range(
start, limit, self.make_glyph_range_(location, start, limit)
)
elif self.next_token_ == "-":
start = glyph
self.expect_symbol_("-")
limit = self.expect_glyph_()
self.check_glyph_name_in_glyph_set(start, limit)
glyphs.add_range(
start, limit, self.make_glyph_range_(location, start, limit)
)
else:
if "-" in glyph and not self.glyphNames_:
log.warning(
str(
FeatureLibError(
f"Ambiguous glyph name that looks like a range: {glyph!r}",
location,
)
)
)
self.check_glyph_name_in_glyph_set(glyph)
glyphs.append(glyph)
elif self.next_token_type_ is Lexer.CID:
glyph = self.expect_glyph_()
if self.next_token_ == "-":
range_location = self.cur_token_location_
range_start = self.cur_token_
self.expect_symbol_("-")
range_end = self.expect_cid_()
self.check_glyph_name_in_glyph_set(
f"cid{range_start:05d}",
f"cid{range_end:05d}",
)
glyphs.add_cid_range(
range_start,
range_end,
self.make_cid_range_(range_location, range_start, range_end),
)
else:
glyph_name = f"cid{self.cur_token_:05d}"
self.check_glyph_name_in_glyph_set(glyph_name)
glyphs.append(glyph_name)
elif self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_,
)
if isinstance(gc, self.ast.MarkClass):
gc = self.ast.MarkClassName(gc, location=self.cur_token_location_)
else:
gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_)
glyphs.add_class(gc)
else:
raise FeatureLibError(
"Expected glyph name, glyph range, "
f"or glyph class reference, found {self.next_token_!r}",
self.next_token_location_,
)
self.expect_symbol_("]")
return glyphs
def parse_glyph_pattern_(self, vertical):
# Parses a glyph pattern, including lookups and context, e.g.::
#
# a b
# a b c' d e
# a b c' lookup ChangeC d e
prefix, glyphs, lookups, values, suffix = ([], [], [], [], [])
hasMarks = False
while self.next_token_ not in {"by", "from", ";", ","}:
gc = self.parse_glyphclass_(accept_glyphname=True)
marked = False
if self.next_token_ == "'":
self.expect_symbol_("'")
hasMarks = marked = True
if marked:
if suffix:
# makeotf also reports this as an error, while FontForge
# silently inserts ' in all the intervening glyphs.
# https://github.com/fonttools/fonttools/pull/1096
raise FeatureLibError(
"Unsupported contextual target sequence: at most "
"one run of marked (') glyph/class names allowed",
self.cur_token_location_,
)
glyphs.append(gc)
elif glyphs:
suffix.append(gc)
else:
prefix.append(gc)
if self.is_next_value_():
values.append(self.parse_valuerecord_(vertical))
else:
values.append(None)
lookuplist = None
while self.next_token_ == "lookup":
if lookuplist is None:
lookuplist = []
self.expect_keyword_("lookup")
if not marked:
raise FeatureLibError(
"Lookups can only follow marked glyphs",
self.cur_token_location_,
)
lookup_name = self.expect_name_()
lookup = self.lookups_.resolve(lookup_name)
if lookup is None:
raise FeatureLibError(
'Unknown lookup "%s"' % lookup_name, self.cur_token_location_
)
lookuplist.append(lookup)
if marked:
lookups.append(lookuplist)
if not glyphs and not suffix: # eg., "sub f f i by"
assert lookups == []
return ([], prefix, [None] * len(prefix), values, [], hasMarks)
else:
assert not any(values[: len(prefix)]), values
format1 = values[len(prefix) :][: len(glyphs)]
format2 = values[(len(prefix) + len(glyphs)) :][: len(suffix)]
values = (
format2
if format2 and isinstance(format2[0], self.ast.ValueRecord)
else format1
)
return (prefix, glyphs, lookups, values, suffix, hasMarks)
def parse_chain_context_(self):
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
vertical=False
)
chainContext = [(prefix, glyphs, suffix)]
hasLookups = any(lookups)
while self.next_token_ == ",":
self.expect_symbol_(",")
(
prefix,
glyphs,
lookups,
values,
suffix,
hasMarks,
) = self.parse_glyph_pattern_(vertical=False)
chainContext.append((prefix, glyphs, suffix))
hasLookups = hasLookups or any(lookups)
self.expect_symbol_(";")
return chainContext, hasLookups
def parse_ignore_(self):
# Parses an ignore sub/pos rule.
assert self.is_cur_keyword_("ignore")
location = self.cur_token_location_
self.advance_lexer_()
if self.cur_token_ in ["substitute", "sub"]:
chainContext, hasLookups = self.parse_chain_context_()
if hasLookups:
raise FeatureLibError(
'No lookups can be specified for "ignore sub"', location
)
return self.ast.IgnoreSubstStatement(chainContext, location=location)
if self.cur_token_ in ["position", "pos"]:
chainContext, hasLookups = self.parse_chain_context_()
if hasLookups:
raise FeatureLibError(
'No lookups can be specified for "ignore pos"', location
)
return self.ast.IgnorePosStatement(chainContext, location=location)
raise FeatureLibError(
'Expected "substitute" or "position"', self.cur_token_location_
)
def parse_include_(self):
assert self.cur_token_ == "include"
location = self.cur_token_location_
filename = self.expect_filename_()
# self.expect_symbol_(";")
return ast.IncludeStatement(filename, location=location)
def parse_language_(self):
assert self.is_cur_keyword_("language")
location = self.cur_token_location_
language = self.expect_language_tag_()
include_default, required = (True, False)
if self.next_token_ in {"exclude_dflt", "include_dflt"}:
include_default = self.expect_name_() == "include_dflt"
if self.next_token_ == "required":
self.expect_keyword_("required")
required = True
self.expect_symbol_(";")
return self.ast.LanguageStatement(
language, include_default, required, location=location
)
def parse_ligatureCaretByIndex_(self):
assert self.is_cur_keyword_("LigatureCaretByIndex")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
carets = [self.expect_number_()]
while self.next_token_ != ";":
carets.append(self.expect_number_())
self.expect_symbol_(";")
return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location)
def parse_ligatureCaretByPos_(self):
assert self.is_cur_keyword_("LigatureCaretByPos")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
carets = [self.expect_number_()]
while self.next_token_ != ";":
carets.append(self.expect_number_())
self.expect_symbol_(";")
return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location)
def parse_lookup_(self, vertical):
# Parses a ``lookup`` - either a lookup block, or a lookup reference
# inside a feature.
assert self.is_cur_keyword_("lookup")
location, name = self.cur_token_location_, self.expect_name_()
if self.next_token_ == ";":
lookup = self.lookups_.resolve(name)
if lookup is None:
raise FeatureLibError(
'Unknown lookup "%s"' % name, self.cur_token_location_
)
self.expect_symbol_(";")
return self.ast.LookupReferenceStatement(lookup, location=location)
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = self.ast.LookupBlock(name, use_extension, location=location)
self.parse_block_(block, vertical)
self.lookups_.define(name, block)
return block
def parse_lookupflag_(self):
# Parses a ``lookupflag`` statement, either specified by number or
# in words.
assert self.is_cur_keyword_("lookupflag")
location = self.cur_token_location_
# format B: "lookupflag 6;"
if self.next_token_type_ == Lexer.NUMBER:
value = self.expect_number_()
self.expect_symbol_(";")
return self.ast.LookupFlagStatement(value, location=location)
# format A: "lookupflag RightToLeft MarkAttachmentType @M;"
value_seen = False
value, markAttachment, markFilteringSet = 0, None, None
flags = {
"RightToLeft": 1,
"IgnoreBaseGlyphs": 2,
"IgnoreLigatures": 4,
"IgnoreMarks": 8,
}
seen = set()
while self.next_token_ != ";":
if self.next_token_ in seen:
raise FeatureLibError(
"%s can be specified only once" % self.next_token_,
self.next_token_location_,
)
seen.add(self.next_token_)
if self.next_token_ == "MarkAttachmentType":
self.expect_keyword_("MarkAttachmentType")
markAttachment = self.parse_glyphclass_(accept_glyphname=False)
elif self.next_token_ == "UseMarkFilteringSet":
self.expect_keyword_("UseMarkFilteringSet")
markFilteringSet = self.parse_glyphclass_(accept_glyphname=False)
elif self.next_token_ in flags:
value_seen = True
value = value | flags[self.expect_name_()]
else:
raise FeatureLibError(
'"%s" is not a recognized lookupflag' % self.next_token_,
self.next_token_location_,
)
self.expect_symbol_(";")
if not any([value_seen, markAttachment, markFilteringSet]):
raise FeatureLibError(
"lookupflag must have a value", self.next_token_location_
)
return self.ast.LookupFlagStatement(
value,
markAttachment=markAttachment,
markFilteringSet=markFilteringSet,
location=location,
)
def parse_markClass_(self):
assert self.is_cur_keyword_("markClass")
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")
markClass = self.doc_.markClasses.get(name)
if markClass is None:
markClass = self.ast.MarkClass(name)
self.doc_.markClasses[name] = markClass
self.glyphclasses_.define(name, markClass)
mcdef = self.ast.MarkClassDefinition(
markClass, anchor, glyphs, location=location
)
markClass.addDefinition(mcdef)
return mcdef
def parse_position_(self, enumerated, vertical):
assert self.cur_token_ in {"position", "pos"}
if self.next_token_ == "cursive": # GPOS type 3
return self.parse_position_cursive_(enumerated, vertical)
elif self.next_token_ == "base": # GPOS type 4
return self.parse_position_base_(enumerated, vertical)
elif self.next_token_ == "ligature": # GPOS type 5
return self.parse_position_ligature_(enumerated, vertical)
elif self.next_token_ == "mark": # GPOS type 6
return self.parse_position_mark_(enumerated, vertical)
location = self.cur_token_location_
prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_(
vertical
)
self.expect_symbol_(";")
if any(lookups):
# GPOS type 8: Chaining contextual positioning; explicit lookups
if any(values):
raise FeatureLibError(
'If "lookup" is present, no values must be specified', location
)
return self.ast.ChainContextPosStatement(
prefix, glyphs, suffix, lookups, location=location
)
# Pair positioning, format A: "pos V 10 A -10;"
# Pair positioning, format B: "pos V A -20;"
if not prefix and not suffix and len(glyphs) == 2 and not hasMarks:
if values[0] is None: # Format B: "pos V A -20;"
values.reverse()
return self.ast.PairPosStatement(
glyphs[0],
values[0],
glyphs[1],
values[1],
enumerated=enumerated,
location=location,
)
if enumerated:
raise FeatureLibError(
'"enumerate" is only allowed with pair positionings', location
)
return self.ast.SinglePosStatement(
list(zip(glyphs, values)),
prefix,
suffix,
forceChain=hasMarks,
location=location,
)
def parse_position_cursive_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("cursive")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with ' "cursive attachment positioning",
location,
)
glyphclass = self.parse_glyphclass_(accept_glyphname=True)
entryAnchor = self.parse_anchor_()
exitAnchor = self.parse_anchor_()
self.expect_symbol_(";")
return self.ast.CursivePosStatement(
glyphclass, entryAnchor, exitAnchor, location=location
)
def parse_position_base_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("base")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
"mark-to-base attachment positioning",
location,
)
base = self.parse_glyphclass_(accept_glyphname=True)
marks = self.parse_anchor_marks_()
self.expect_symbol_(";")
return self.ast.MarkBasePosStatement(base, marks, location=location)
def parse_position_ligature_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("ligature")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
"mark-to-ligature attachment positioning",
location,
)
ligatures = self.parse_glyphclass_(accept_glyphname=True)
marks = [self.parse_anchor_marks_()]
while self.next_token_ == "ligComponent":
self.expect_keyword_("ligComponent")
marks.append(self.parse_anchor_marks_())
self.expect_symbol_(";")
return self.ast.MarkLigPosStatement(ligatures, marks, location=location)
def parse_position_mark_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("mark")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
"mark-to-mark attachment positioning",
location,
)
baseMarks = self.parse_glyphclass_(accept_glyphname=True)
marks = self.parse_anchor_marks_()
self.expect_symbol_(";")
return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location)
def parse_script_(self):
assert self.is_cur_keyword_("script")
location, script = self.cur_token_location_, self.expect_script_tag_()
self.expect_symbol_(";")
return self.ast.ScriptStatement(script, location=location)
def parse_substitute_(self):
assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
location = self.cur_token_location_
reverse = self.cur_token_ in {"reversesub", "rsub"}
(
old_prefix,
old,
lookups,
values,
old_suffix,
hasMarks,
) = self.parse_glyph_pattern_(vertical=False)
if any(values):
raise FeatureLibError(
"Substitution statements cannot contain values", location
)
new = []
if self.next_token_ == "by":
keyword = self.expect_keyword_("by")
while self.next_token_ != ";":
gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True)
new.append(gc)
elif self.next_token_ == "from":
keyword = self.expect_keyword_("from")
new = [self.parse_glyphclass_(accept_glyphname=False)]
else:
keyword = None
self.expect_symbol_(";")
if len(new) == 0 and not any(lookups):
raise FeatureLibError(
'Expected "by", "from" or explicit lookup references',
self.cur_token_location_,
)
# GSUB lookup type 3: Alternate substitution.
# Format: "substitute a from [a.1 a.2 a.3];"
if keyword == "from":
if reverse:
raise FeatureLibError(
'Reverse chaining substitutions do not support "from"', location
)
if len(old) != 1 or len(old[0].glyphSet()) != 1:
raise FeatureLibError('Expected a single glyph before "from"', location)
if len(new) != 1:
raise FeatureLibError(
'Expected a single glyphclass after "from"', location
)
return self.ast.AlternateSubstStatement(
old_prefix, old[0], old_suffix, new[0], location=location
)
num_lookups = len([l for l in lookups if l is not None])
is_deletion = False
if len(new) == 1 and len(new[0].glyphSet()) == 0:
new = [] # Deletion
is_deletion = True
# GSUB lookup type 1: Single substitution.
# Format A: "substitute a by a.sc;"
# Format B: "substitute [one.fitted one.oldstyle] by one;"
# Format C: "substitute [a-d] by [A.sc-D.sc];"
if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0:
glyphs = list(old[0].glyphSet())
replacements = list(new[0].glyphSet())
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
"but found a glyph class with %d elements"
% (len(glyphs), len(replacements)),
location,
)
return self.ast.SingleSubstStatement(
old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location
)
# GSUB lookup type 2: Multiple substitution.
# Format: "substitute f_f_i by f f i;"
if (
not reverse
and len(old) == 1
and len(old[0].glyphSet()) == 1
and (
(len(new) > 1 and max([len(n.glyphSet()) for n in new]) == 1)
or len(new) == 0
)
and num_lookups == 0
):
return self.ast.MultipleSubstStatement(
old_prefix,
tuple(old[0].glyphSet())[0],
old_suffix,
tuple([list(n.glyphSet())[0] for n in new]),
forceChain=hasMarks,
location=location,
)
# GSUB lookup type 4: Ligature substitution.
# Format: "substitute f f i by f_f_i;"
if (
not reverse
and len(old) > 1
and len(new) == 1
and len(new[0].glyphSet()) == 1
and num_lookups == 0
):
return self.ast.LigatureSubstStatement(
old_prefix,
old,
old_suffix,
list(new[0].glyphSet())[0],
forceChain=hasMarks,
location=location,
)
# GSUB lookup type 8: Reverse chaining substitution.
if reverse:
if len(old) != 1:
raise FeatureLibError(
"In reverse chaining single substitutions, "
"only a single glyph or glyph class can be replaced",
location,
)
if len(new) != 1:
raise FeatureLibError(
"In reverse chaining single substitutions, "
'the replacement (after "by") must be a single glyph '
"or glyph class",
location,
)
if num_lookups != 0:
raise FeatureLibError(
"Reverse chaining substitutions cannot call named lookups", location
)
glyphs = sorted(list(old[0].glyphSet()))
replacements = sorted(list(new[0].glyphSet()))
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
"but found a glyph class with %d elements"
% (len(glyphs), len(replacements)),
location,
)
return self.ast.ReverseChainSingleSubstStatement(
old_prefix, old_suffix, old, new, location=location
)
if len(old) > 1 and len(new) > 1:
raise FeatureLibError(
"Direct substitution of multiple glyphs by multiple glyphs "
"is not supported",
location,
)
# If there are remaining glyphs to parse, this is an invalid GSUB statement
if len(new) != 0 or is_deletion:
raise FeatureLibError("Invalid substitution statement", location)
# GSUB lookup type 6: Chaining contextual substitution.
rule = self.ast.ChainContextSubstStatement(
old_prefix, old, old_suffix, lookups, location=location
)
return rule
def parse_subtable_(self):
assert self.is_cur_keyword_("subtable")
location = self.cur_token_location_
self.expect_symbol_(";")
return self.ast.SubtableStatement(location=location)
def parse_size_parameters_(self):
# Parses a ``parameters`` statement used in ``size`` features. See
# `section 8.b <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.b>`_.
assert self.is_cur_keyword_("parameters")
location = self.cur_token_location_
DesignSize = self.expect_decipoint_()
SubfamilyID = self.expect_number_()
RangeStart = 0.
RangeEnd = 0.
if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0:
RangeStart = self.expect_decipoint_()
RangeEnd = self.expect_decipoint_()
self.expect_symbol_(";")
return self.ast.SizeParameters(
DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location
)
def parse_size_menuname_(self):
assert self.is_cur_keyword_("sizemenuname")
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
return self.ast.FeatureNameStatement(
"size", platformID, platEncID, langID, string, location=location
)
def parse_table_(self):
assert self.is_cur_keyword_("table")
location, name = self.cur_token_location_, self.expect_tag_()
table = self.ast.TableBlock(name, location=location)
self.expect_symbol_("{")
handler = {
"GDEF": self.parse_table_GDEF_,
"head": self.parse_table_head_,
"hhea": self.parse_table_hhea_,
"vhea": self.parse_table_vhea_,
"name": self.parse_table_name_,
"BASE": self.parse_table_BASE_,
"OS/2": self.parse_table_OS_2_,
"STAT": self.parse_table_STAT_,
}.get(name)
if handler:
handler(table)
else:
raise FeatureLibError(
'"table %s" is not supported' % name.strip(), location
)
self.expect_symbol_("}")
end_tag = self.expect_tag_()
if end_tag != name:
raise FeatureLibError(
'Expected "%s"' % name.strip(), self.cur_token_location_
)
self.expect_symbol_(";")
return table
def parse_table_GDEF_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("Attach"):
statements.append(self.parse_attach_())
elif self.is_cur_keyword_("GlyphClassDef"):
statements.append(self.parse_GlyphClassDef_())
elif self.is_cur_keyword_("LigatureCaretByIndex"):
statements.append(self.parse_ligatureCaretByIndex_())
elif self.is_cur_keyword_("LigatureCaretByPos"):
statements.append(self.parse_ligatureCaretByPos_())
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos",
self.cur_token_location_,
)
def parse_table_head_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("FontRevision"):
statements.append(self.parse_FontRevision_())
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected FontRevision", self.cur_token_location_)
def parse_table_hhea_(self, table):
statements = table.statements
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
key = self.cur_token_.lower()
value = self.expect_number_()
statements.append(
self.ast.HheaField(key, value, location=self.cur_token_location_)
)
if self.next_token_ != ";":
raise FeatureLibError(
"Incomplete statement", self.next_token_location_
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected CaretOffset, Ascender, " "Descender or LineGap",
self.cur_token_location_,
)
def parse_table_vhea_(self, table):
statements = table.statements
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields:
key = self.cur_token_.lower()
value = self.expect_number_()
statements.append(
self.ast.VheaField(key, value, location=self.cur_token_location_)
)
if self.next_token_ != ";":
raise FeatureLibError(
"Incomplete statement", self.next_token_location_
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected VertTypoAscender, "
"VertTypoDescender or VertTypoLineGap",
self.cur_token_location_,
)
def parse_table_name_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("nameid"):
statement = self.parse_nameid_()
if statement:
statements.append(statement)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError("Expected nameid", self.cur_token_location_)
def parse_name_(self):
"""Parses a name record. See `section 9.e <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_."""
platEncID = None
langID = None
if self.next_token_type_ in Lexer.NUMBERS:
platformID = self.expect_any_number_()
location = self.cur_token_location_
if platformID not in (1, 3):
raise FeatureLibError("Expected platform id 1 or 3", location)
if self.next_token_type_ in Lexer.NUMBERS:
platEncID = self.expect_any_number_()
langID = self.expect_any_number_()
else:
platformID = 3
location = self.cur_token_location_
if platformID == 1: # Macintosh
platEncID = platEncID or 0 # Roman
langID = langID or 0 # English
else: # 3, Windows
platEncID = platEncID or 1 # Unicode
langID = langID or 0x0409 # English
string = self.expect_string_()
self.expect_symbol_(";")
encoding = getEncoding(platformID, platEncID, langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", location)
unescaped = self.unescape_string_(string, encoding)
return platformID, platEncID, langID, unescaped
def parse_stat_name_(self):
platEncID = None
langID = None
if self.next_token_type_ in Lexer.NUMBERS:
platformID = self.expect_any_number_()
location = self.cur_token_location_
if platformID not in (1, 3):
raise FeatureLibError("Expected platform id 1 or 3", location)
if self.next_token_type_ in Lexer.NUMBERS:
platEncID = self.expect_any_number_()
langID = self.expect_any_number_()
else:
platformID = 3
location = self.cur_token_location_
if platformID == 1: # Macintosh
platEncID = platEncID or 0 # Roman
langID = langID or 0 # English
else: # 3, Windows
platEncID = platEncID or 1 # Unicode
langID = langID or 0x0409 # English
string = self.expect_string_()
encoding = getEncoding(platformID, platEncID, langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", location)
unescaped = self.unescape_string_(string, encoding)
return platformID, platEncID, langID, unescaped
def parse_nameid_(self):
assert self.cur_token_ == "nameid", self.cur_token_
location, nameID = self.cur_token_location_, self.expect_any_number_()
if nameID > 32767:
raise FeatureLibError(
"Name id value cannot be greater than 32767", self.cur_token_location_
)
if 1 <= nameID <= 6:
log.warning(
"Name id %d cannot be set from the feature file. "
"Ignoring record" % nameID
)
self.parse_name_() # skip to the next record
return None
platformID, platEncID, langID, string = self.parse_name_()
return self.ast.NameRecord(
nameID, platformID, platEncID, langID, string, location=location
)
def unescape_string_(self, string, encoding):
if encoding == "utf_16_be":
s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string)
else:
unescape = lambda m: self.unescape_byte_(m, encoding)
s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string)
# We now have a Unicode string, but it might contain surrogate pairs.
# We convert surrogates to actual Unicode by round-tripping through
# Python's UTF-16 codec in a special mode.
utf16 = tobytes(s, "utf_16_be", "surrogatepass")
return tostr(utf16, "utf_16_be")
@staticmethod
def unescape_unichr_(match):
n = match.group(0)[1:]
return chr(int(n, 16))
@staticmethod
def unescape_byte_(match, encoding):
n = match.group(0)[1:]
return bytechr(int(n, 16)).decode(encoding)
def parse_table_BASE_(self, table):
statements = table.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("HorizAxis.BaseTagList"):
horiz_bases = self.parse_base_tag_list_()
elif self.is_cur_keyword_("HorizAxis.BaseScriptList"):
horiz_scripts = self.parse_base_script_list_(len(horiz_bases))
statements.append(
self.ast.BaseAxis(
horiz_bases,
horiz_scripts,
False,
location=self.cur_token_location_,
)
)
elif self.is_cur_keyword_("VertAxis.BaseTagList"):
vert_bases = self.parse_base_tag_list_()
elif self.is_cur_keyword_("VertAxis.BaseScriptList"):
vert_scripts = self.parse_base_script_list_(len(vert_bases))
statements.append(
self.ast.BaseAxis(
vert_bases,
vert_scripts,
True,
location=self.cur_token_location_,
)
)
elif self.cur_token_ == ";":
continue
def parse_table_OS_2_(self, table):
statements = table.statements
numbers = (
"FSType",
"TypoAscender",
"TypoDescender",
"TypoLineGap",
"winAscent",
"winDescent",
"XHeight",
"CapHeight",
"WeightClass",
"WidthClass",
"LowerOpSize",
"UpperOpSize",
)
ranges = ("UnicodeRange", "CodePageRange")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME:
key = self.cur_token_.lower()
value = None
if self.cur_token_ in numbers:
value = self.expect_number_()
elif self.is_cur_keyword_("Panose"):
value = []
for i in range(10):
value.append(self.expect_number_())
elif self.cur_token_ in ranges:
value = []
while self.next_token_ != ";":
value.append(self.expect_number_())
elif self.is_cur_keyword_("Vendor"):
value = self.expect_string_()
statements.append(
self.ast.OS2Field(key, value, location=self.cur_token_location_)
)
elif self.cur_token_ == ";":
continue
def parse_STAT_ElidedFallbackName(self):
assert self.is_cur_keyword_("ElidedFallbackName")
self.expect_symbol_("{")
names = []
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_()
if self.is_cur_keyword_("name"):
platformID, platEncID, langID, string = self.parse_stat_name_()
nameRecord = self.ast.STATNameStatement(
"stat",
platformID,
platEncID,
langID,
string,
location=self.cur_token_location_,
)
names.append(nameRecord)
else:
if self.cur_token_ != ";":
raise FeatureLibError(
f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName",
self.cur_token_location_,
)
self.expect_symbol_("}")
if not names:
raise FeatureLibError('Expected "name"', self.cur_token_location_)
return names
def parse_STAT_design_axis(self):
assert self.is_cur_keyword_("DesignAxis")
names = []
axisTag = self.expect_tag_()
if (
axisTag not in ("ital", "opsz", "slnt", "wdth", "wght")
and not axisTag.isupper()
):
log.warning(f"Unregistered axis tag {axisTag} should be uppercase.")
axisOrder = self.expect_number_()
self.expect_symbol_("{")
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_()
if self.cur_token_type_ is Lexer.COMMENT:
continue
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_stat_name_()
name = self.ast.STATNameStatement(
"stat", platformID, platEncID, langID, string, location=location
)
names.append(name)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
f'Expected "name", got {self.cur_token_}', self.cur_token_location_
)
self.expect_symbol_("}")
return self.ast.STATDesignAxisStatement(
axisTag, axisOrder, names, self.cur_token_location_
)
def parse_STAT_axis_value_(self):
assert self.is_cur_keyword_("AxisValue")
self.expect_symbol_("{")
locations = []
names = []
flags = 0
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
continue
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_stat_name_()
name = self.ast.STATNameStatement(
"stat", platformID, platEncID, langID, string, location=location
)
names.append(name)
elif self.is_cur_keyword_("location"):
location = self.parse_STAT_location()
locations.append(location)
elif self.is_cur_keyword_("flag"):
flags = self.expect_stat_flags()
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
f"Unexpected token {self.cur_token_} " f"in AxisValue",
self.cur_token_location_,
)
self.expect_symbol_("}")
if not names:
raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_)
if not locations:
raise FeatureLibError('Expected "Axis location"', self.cur_token_location_)
if len(locations) > 1:
for location in locations:
if len(location.values) > 1:
raise FeatureLibError(
"Only one value is allowed in a "
"Format 4 Axis Value Record, but "
f"{len(location.values)} were found.",
self.cur_token_location_,
)
format4_tags = []
for location in locations:
tag = location.tag
if tag in format4_tags:
raise FeatureLibError(
f"Axis tag {tag} already " "defined.", self.cur_token_location_
)
format4_tags.append(tag)
return self.ast.STATAxisValueStatement(
names, locations, flags, self.cur_token_location_
)
def parse_STAT_location(self):
values = []
tag = self.expect_tag_()
if len(tag.strip()) != 4:
raise FeatureLibError(
f"Axis tag {self.cur_token_} must be 4 " "characters",
self.cur_token_location_,
)
while self.next_token_ != ";":
if self.next_token_type_ is Lexer.FLOAT:
value = self.expect_float_()
values.append(value)
elif self.next_token_type_ is Lexer.NUMBER:
value = self.expect_number_()
values.append(value)
else:
raise FeatureLibError(
f'Unexpected value "{self.next_token_}". '
"Expected integer or float.",
self.next_token_location_,
)
if len(values) == 3:
nominal, min_val, max_val = values
if nominal < min_val or nominal > max_val:
raise FeatureLibError(
f"Default value {nominal} is outside "
f"of specified range "
f"{min_val}-{max_val}.",
self.next_token_location_,
)
return self.ast.AxisValueLocationStatement(tag, values)
def parse_table_STAT_(self, table):
statements = table.statements
design_axes = []
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.NAME:
if self.is_cur_keyword_("ElidedFallbackName"):
names = self.parse_STAT_ElidedFallbackName()
statements.append(self.ast.ElidedFallbackName(names))
elif self.is_cur_keyword_("ElidedFallbackNameID"):
value = self.expect_number_()
statements.append(self.ast.ElidedFallbackNameID(value))
self.expect_symbol_(";")
elif self.is_cur_keyword_("DesignAxis"):
designAxis = self.parse_STAT_design_axis()
design_axes.append(designAxis.tag)
statements.append(designAxis)
self.expect_symbol_(";")
elif self.is_cur_keyword_("AxisValue"):
axisValueRecord = self.parse_STAT_axis_value_()
for location in axisValueRecord.locations:
if location.tag not in design_axes:
# Tag must be defined in a DesignAxis before it
# can be referenced
raise FeatureLibError(
"DesignAxis not defined for " f"{location.tag}.",
self.cur_token_location_,
)
statements.append(axisValueRecord)
self.expect_symbol_(";")
else:
raise FeatureLibError(
f"Unexpected token {self.cur_token_}", self.cur_token_location_
)
elif self.cur_token_ == ";":
continue
def parse_base_tag_list_(self):
# Parses BASE table entries. (See `section 9.a <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.a>`_)
assert self.cur_token_ in (
"HorizAxis.BaseTagList",
"VertAxis.BaseTagList",
), self.cur_token_
bases = []
while self.next_token_ != ";":
bases.append(self.expect_script_tag_())
self.expect_symbol_(";")
return bases
def parse_base_script_list_(self, count):
assert self.cur_token_ in (
"HorizAxis.BaseScriptList",
"VertAxis.BaseScriptList",
), self.cur_token_
scripts = [(self.parse_base_script_record_(count))]
while self.next_token_ == ",":
self.expect_symbol_(",")
scripts.append(self.parse_base_script_record_(count))
self.expect_symbol_(";")
return scripts
def parse_base_script_record_(self, count):
script_tag = self.expect_script_tag_()
base_tag = self.expect_script_tag_()
coords = [self.expect_number_() for i in range(count)]
return script_tag, base_tag, coords
def parse_device_(self):
result = None
self.expect_symbol_("<")
self.expect_keyword_("device")
if self.next_token_ == "NULL":
self.expect_keyword_("NULL")
else:
result = [(self.expect_number_(), self.expect_number_())]
while self.next_token_ == ",":
self.expect_symbol_(",")
result.append((self.expect_number_(), self.expect_number_()))
result = tuple(result) # make it hashable
self.expect_symbol_(">")
return result
def is_next_value_(self):
return self.next_token_type_ is Lexer.NUMBER or self.next_token_ == "<"
def parse_valuerecord_(self, vertical):
if self.next_token_type_ is Lexer.NUMBER:
number, location = self.expect_number_(), self.cur_token_location_
if vertical:
val = self.ast.ValueRecord(
yAdvance=number, vertical=vertical, location=location
)
else:
val = self.ast.ValueRecord(
xAdvance=number, vertical=vertical, location=location
)
return val
self.expect_symbol_("<")
location = self.cur_token_location_
if self.next_token_type_ is Lexer.NAME:
name = self.expect_name_()
if name == "NULL":
self.expect_symbol_(">")
return self.ast.ValueRecord()
vrd = self.valuerecords_.resolve(name)
if vrd is None:
raise FeatureLibError(
'Unknown valueRecordDef "%s"' % name, self.cur_token_location_
)
value = vrd.value
xPlacement, yPlacement = (value.xPlacement, value.yPlacement)
xAdvance, yAdvance = (value.xAdvance, value.yAdvance)
else:
xPlacement, yPlacement, xAdvance, yAdvance = (
self.expect_number_(),
self.expect_number_(),
self.expect_number_(),
self.expect_number_(),
)
if self.next_token_ == "<":
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (
self.parse_device_(),
self.parse_device_(),
self.parse_device_(),
self.parse_device_(),
)
allDeltas = sorted(
[
delta
for size, delta in (xPlaDevice if xPlaDevice else ())
+ (yPlaDevice if yPlaDevice else ())
+ (xAdvDevice if xAdvDevice else ())
+ (yAdvDevice if yAdvDevice else ())
]
)
if allDeltas[0] < -128 or allDeltas[-1] > 127:
raise FeatureLibError(
"Device value out of valid range (-128..127)",
self.cur_token_location_,
)
else:
xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None)
self.expect_symbol_(">")
return self.ast.ValueRecord(
xPlacement,
yPlacement,
xAdvance,
yAdvance,
xPlaDevice,
yPlaDevice,
xAdvDevice,
yAdvDevice,
vertical=vertical,
location=location,
)
def parse_valuerecord_definition_(self, vertical):
# Parses a named value record definition. (See section `2.e.v <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#2.e.v>`_)
assert self.is_cur_keyword_("valueRecordDef")
location = self.cur_token_location_
value = self.parse_valuerecord_(vertical)
name = self.expect_name_()
self.expect_symbol_(";")
vrd = self.ast.ValueRecordDefinition(name, value, location=location)
self.valuerecords_.define(name, vrd)
return vrd
def parse_languagesystem_(self):
assert self.cur_token_ == "languagesystem"
location = self.cur_token_location_
script = self.expect_script_tag_()
language = self.expect_language_tag_()
self.expect_symbol_(";")
return self.ast.LanguageSystemStatement(script, language, location=location)
def parse_feature_block_(self):
assert self.cur_token_ == "feature"
location = self.cur_token_location_
tag = self.expect_tag_()
vertical = tag in {"vkrn", "vpal", "vhal", "valt"}
stylisticset = None
cv_feature = None
size_feature = False
if tag in self.SS_FEATURE_TAGS:
stylisticset = tag
elif tag in self.CV_FEATURE_TAGS:
cv_feature = tag
elif tag == "size":
size_feature = True
use_extension = False
if self.next_token_ == "useExtension":
self.expect_keyword_("useExtension")
use_extension = True
block = self.ast.FeatureBlock(
tag, use_extension=use_extension, location=location
)
self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature)
return block
def parse_feature_reference_(self):
assert self.cur_token_ == "feature", self.cur_token_
location = self.cur_token_location_
featureName = self.expect_tag_()
self.expect_symbol_(";")
return self.ast.FeatureReferenceStatement(featureName, location=location)
def parse_featureNames_(self, tag):
"""Parses a ``featureNames`` statement found in stylistic set features.
See section `8.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.c>`_."""
assert self.cur_token_ == "featureNames", self.cur_token_
block = self.ast.NestedBlock(
tag, self.cur_token_, location=self.cur_token_location_
)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
block.statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
block.statements.append(
self.ast.FeatureNameStatement(
tag, platformID, platEncID, langID, string, location=location
)
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError('Expected "name"', self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvParameters_(self, tag):
# Parses a ``cvParameters`` block found in Character Variant features.
# See section `8.d <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#8.d>`_.
assert self.cur_token_ == "cvParameters", self.cur_token_
block = self.ast.NestedBlock(
tag, self.cur_token_, location=self.cur_token_location_
)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_(
{
"FeatUILabelNameID",
"FeatUITooltipTextNameID",
"SampleTextNameID",
"ParamUILabelNameID",
}
):
statements.append(self.parse_cvNameIDs_(tag, self.cur_token_))
elif self.is_cur_keyword_("Character"):
statements.append(self.parse_cvCharacter_(tag))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected statement: got {} {}".format(
self.cur_token_type_, self.cur_token_
),
self.cur_token_location_,
)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvNameIDs_(self, tag, block_name):
assert self.cur_token_ == block_name, self.cur_token_
block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_)
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
block.statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.is_cur_keyword_("name"):
location = self.cur_token_location_
platformID, platEncID, langID, string = self.parse_name_()
block.statements.append(
self.ast.CVParametersNameStatement(
tag,
platformID,
platEncID,
langID,
string,
block_name,
location=location,
)
)
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError('Expected "name"', self.cur_token_location_)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
self.expect_symbol_(";")
return block
def parse_cvCharacter_(self, tag):
assert self.cur_token_ == "Character", self.cur_token_
location, character = self.cur_token_location_, self.expect_any_number_()
self.expect_symbol_(";")
if not (0xFFFFFF >= character >= 0):
raise FeatureLibError(
"Character value must be between "
"{:#x} and {:#x}".format(0, 0xFFFFFF),
location,
)
return self.ast.CharacterStatement(character, tag, location=location)
def parse_FontRevision_(self):
# Parses a ``FontRevision`` statement found in the head table. See
# `section 9.c <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.c>`_.
assert self.cur_token_ == "FontRevision", self.cur_token_
location, version = self.cur_token_location_, self.expect_float_()
self.expect_symbol_(";")
if version <= 0:
raise FeatureLibError("Font revision numbers must be positive", location)
return self.ast.FontRevisionStatement(version, location=location)
def parse_block_(
self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None
):
self.expect_symbol_("{")
for symtab in self.symbol_tables_:
symtab.enter_scope()
statements = block.statements
while self.next_token_ != "}" or self.cur_comments_:
self.advance_lexer_(comments=True)
if self.cur_token_type_ is Lexer.COMMENT:
statements.append(
self.ast.Comment(self.cur_token_, location=self.cur_token_location_)
)
elif self.cur_token_type_ is Lexer.GLYPHCLASS:
statements.append(self.parse_glyphclass_definition_())
elif self.is_cur_keyword_("anchorDef"):
statements.append(self.parse_anchordef_())
elif self.is_cur_keyword_({"enum", "enumerate"}):
statements.append(self.parse_enumerate_(vertical=vertical))
elif self.is_cur_keyword_("feature"):
statements.append(self.parse_feature_reference_())
elif self.is_cur_keyword_("ignore"):
statements.append(self.parse_ignore_())
elif self.is_cur_keyword_("language"):
statements.append(self.parse_language_())
elif self.is_cur_keyword_("lookup"):
statements.append(self.parse_lookup_(vertical))
elif self.is_cur_keyword_("lookupflag"):
statements.append(self.parse_lookupflag_())
elif self.is_cur_keyword_("markClass"):
statements.append(self.parse_markClass_())
elif self.is_cur_keyword_({"pos", "position"}):
statements.append(
self.parse_position_(enumerated=False, vertical=vertical)
)
elif self.is_cur_keyword_("script"):
statements.append(self.parse_script_())
elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}):
statements.append(self.parse_substitute_())
elif self.is_cur_keyword_("subtable"):
statements.append(self.parse_subtable_())
elif self.is_cur_keyword_("valueRecordDef"):
statements.append(self.parse_valuerecord_definition_(vertical))
elif stylisticset and self.is_cur_keyword_("featureNames"):
statements.append(self.parse_featureNames_(stylisticset))
elif cv_feature and self.is_cur_keyword_("cvParameters"):
statements.append(self.parse_cvParameters_(cv_feature))
elif size_feature and self.is_cur_keyword_("parameters"):
statements.append(self.parse_size_parameters_())
elif size_feature and self.is_cur_keyword_("sizemenuname"):
statements.append(self.parse_size_menuname_())
elif (
self.cur_token_type_ is Lexer.NAME
and self.cur_token_ in self.extensions
):
statements.append(self.extensions[self.cur_token_](self))
elif self.cur_token_ == ";":
continue
else:
raise FeatureLibError(
"Expected glyph class definition or statement: got {} {}".format(
self.cur_token_type_, self.cur_token_
),
self.cur_token_location_,
)
self.expect_symbol_("}")
for symtab in self.symbol_tables_:
symtab.exit_scope()
name = self.expect_name_()
if name != block.name.strip():
raise FeatureLibError(
'Expected "%s"' % block.name.strip(), self.cur_token_location_
)
self.expect_symbol_(";")
# A multiple substitution may have a single destination, in which case
# it will look just like a single substitution. So if there are both
# multiple and single substitutions, upgrade all the single ones to
# multiple substitutions.
# Check if we have a mix of non-contextual singles and multiples.
has_single = False
has_multiple = False
for s in statements:
if isinstance(s, self.ast.SingleSubstStatement):
has_single = not any([s.prefix, s.suffix, s.forceChain])
elif isinstance(s, self.ast.MultipleSubstStatement):
has_multiple = not any([s.prefix, s.suffix, s.forceChain])
# Upgrade all single substitutions to multiple substitutions.
if has_single and has_multiple:
statements = []
for s in block.statements:
if isinstance(s, self.ast.SingleSubstStatement):
glyphs = s.glyphs[0].glyphSet()
replacements = s.replacements[0].glyphSet()
if len(replacements) == 1:
replacements *= len(glyphs)
for i, glyph in enumerate(glyphs):
statements.append(
self.ast.MultipleSubstStatement(
s.prefix,
glyph,
s.suffix,
[replacements[i]],
s.forceChain,
location=s.location,
)
)
else:
statements.append(s)
block.statements = statements
def is_cur_keyword_(self, k):
if self.cur_token_type_ is Lexer.NAME:
if isinstance(k, type("")): # basestring is gone in Python3
return self.cur_token_ == k
else:
return self.cur_token_ in k
return False
def expect_class_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.GLYPHCLASS:
raise FeatureLibError("Expected @NAME", self.cur_token_location_)
return self.cur_token_
def expect_cid_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.CID:
return self.cur_token_
raise FeatureLibError("Expected a CID", self.cur_token_location_)
def expect_filename_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.FILENAME:
raise FeatureLibError("Expected file name", self.cur_token_location_)
return self.cur_token_
def expect_glyph_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
self.cur_token_ = self.cur_token_.lstrip("\\")
if len(self.cur_token_) > 63:
raise FeatureLibError(
"Glyph names must not be longer than 63 characters",
self.cur_token_location_,
)
return self.cur_token_
elif self.cur_token_type_ is Lexer.CID:
return "cid%05d" % self.cur_token_
raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_)
def check_glyph_name_in_glyph_set(self, *names):
"""Raises if glyph name (just `start`) or glyph names of a
range (`start` and `end`) are not in the glyph set.
If no glyph set is present, does nothing.
"""
if self.glyphNames_:
missing = [name for name in names if name not in self.glyphNames_]
if missing:
raise FeatureLibError(
"The following glyph names are referenced but are missing from the "
f"glyph set: {', '.join(missing)}",
self.cur_token_location_,
)
def expect_markClass_reference_(self):
name = self.expect_class_name_()
mc = self.glyphclasses_.resolve(name)
if mc is None:
raise FeatureLibError(
"Unknown markClass @%s" % name, self.cur_token_location_
)
if not isinstance(mc, self.ast.MarkClass):
raise FeatureLibError(
"@%s is not a markClass" % name, self.cur_token_location_
)
return mc
def expect_tag_(self):
self.advance_lexer_()
if self.cur_token_type_ is not Lexer.NAME:
raise FeatureLibError("Expected a tag", self.cur_token_location_)
if len(self.cur_token_) > 4:
raise FeatureLibError(
"Tags cannot be longer than 4 characters", self.cur_token_location_
)
return (self.cur_token_ + " ")[:4]
def expect_script_tag_(self):
tag = self.expect_tag_()
if tag == "dflt":
raise FeatureLibError(
'"dflt" is not a valid script tag; use "DFLT" instead',
self.cur_token_location_,
)
return tag
def expect_language_tag_(self):
tag = self.expect_tag_()
if tag == "DFLT":
raise FeatureLibError(
'"DFLT" is not a valid language tag; use "dflt" instead',
self.cur_token_location_,
)
return tag
def expect_symbol_(self, symbol):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol:
return symbol
raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_)
def expect_keyword_(self, keyword):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
return self.cur_token_
raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_)
def expect_name_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NAME:
return self.cur_token_
raise FeatureLibError("Expected a name", self.cur_token_location_)
def expect_number_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.NUMBER:
return self.cur_token_
raise FeatureLibError("Expected a number", self.cur_token_location_)
def expect_any_number_(self):
self.advance_lexer_()
if self.cur_token_type_ in Lexer.NUMBERS:
return self.cur_token_
raise FeatureLibError(
"Expected a decimal, hexadecimal or octal number", self.cur_token_location_
)
def expect_float_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.FLOAT:
return self.cur_token_
raise FeatureLibError(
"Expected a floating-point number", self.cur_token_location_
)
def expect_decipoint_(self):
if self.next_token_type_ == Lexer.FLOAT:
return self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
return self.expect_number_() / 10
else:
raise FeatureLibError(
"Expected an integer or floating-point number", self.cur_token_location_
)
def expect_stat_flags(self):
value = 0
flags = {
"OlderSiblingFontAttribute": 1,
"ElidableAxisValueName": 2,
}
while self.next_token_ != ";":
if self.next_token_ in flags:
name = self.expect_name_()
value = value | flags[name]
else:
raise FeatureLibError(
f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_
)
return value
def expect_stat_values_(self):
if self.next_token_type_ == Lexer.FLOAT:
return self.expect_float_()
elif self.next_token_type_ is Lexer.NUMBER:
return self.expect_number_()
else:
raise FeatureLibError(
"Expected an integer or floating-point number", self.cur_token_location_
)
def expect_string_(self):
self.advance_lexer_()
if self.cur_token_type_ is Lexer.STRING:
return self.cur_token_
raise FeatureLibError("Expected a string", self.cur_token_location_)
def advance_lexer_(self, comments=False):
if comments and self.cur_comments_:
self.cur_token_type_ = Lexer.COMMENT
self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0)
return
else:
self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
self.next_token_type_,
self.next_token_,
self.next_token_location_,
)
while True:
try:
(
self.next_token_type_,
self.next_token_,
self.next_token_location_,
) = next(self.lexer_)
except StopIteration:
self.next_token_type_, self.next_token_ = (None, None)
if self.next_token_type_ != Lexer.COMMENT:
break
self.cur_comments_.append((self.next_token_, self.next_token_location_))
@staticmethod
def reverse_string_(s):
"""'abc' --> 'cba'"""
return "".join(reversed(list(s)))
def make_cid_range_(self, location, start, limit):
"""(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]"""
result = list()
if start > limit:
raise FeatureLibError(
"Bad range: start should be less than limit", location
)
for cid in range(start, limit + 1):
result.append("cid%05d" % cid)
return result
def make_glyph_range_(self, location, start, limit):
"""(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]"""
result = list()
if len(start) != len(limit):
raise FeatureLibError(
'Bad range: "%s" and "%s" should have the same length' % (start, limit),
location,
)
rev = self.reverse_string_
prefix = os.path.commonprefix([start, limit])
suffix = rev(os.path.commonprefix([rev(start), rev(limit)]))
if len(suffix) > 0:
start_range = start[len(prefix) : -len(suffix)]
limit_range = limit[len(prefix) : -len(suffix)]
else:
start_range = start[len(prefix) :]
limit_range = limit[len(prefix) :]
if start_range >= limit_range:
raise FeatureLibError(
"Start of range must be smaller than its end", location
)
uppercase = re.compile(r"^[A-Z]$")
if uppercase.match(start_range) and uppercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.append("%s%c%s" % (prefix, c, suffix))
return result
lowercase = re.compile(r"^[a-z]$")
if lowercase.match(start_range) and lowercase.match(limit_range):
for c in range(ord(start_range), ord(limit_range) + 1):
result.append("%s%c%s" % (prefix, c, suffix))
return result
digits = re.compile(r"^[0-9]{1,3}$")
if digits.match(start_range) and digits.match(limit_range):
for i in range(int(start_range, 10), int(limit_range, 10) + 1):
number = ("000" + str(i))[-len(start_range) :]
result.append("%s%s%s" % (prefix, number, suffix))
return result
raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location)
class SymbolTable(object):
def __init__(self):
self.scopes_ = [{}]
def enter_scope(self):
self.scopes_.append({})
def exit_scope(self):
self.scopes_.pop()
def define(self, name, item):
self.scopes_[-1][name] = item
def resolve(self, name):
for scope in reversed(self.scopes_):
item = scope.get(name)
if item:
return item
return None
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/feaLib/parser.py",
"copies": "3",
"size": "92184",
"license": "apache-2.0",
"hash": -4052249581342377500,
"line_mean": 40.5805142084,
"line_max": 160,
"alpha_frac": 0.5406361191,
"autogenerated": false,
"ratio": 4.266790094885443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6307426213985443,
"avg_score": null,
"num_lines": null
} |
from fontTools.feaLib import ast
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.builder import Builder
from fontTools.feaLib.lexer import IncludingLexer, Lexer
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.ast import asFea
import StringIO, itertools
def asFea(g):
if hasattr(g, 'asClassFea'):
return g.asClassFea()
elif hasattr(g, 'asFea'):
return g.asFea()
elif isinstance(g, tuple) and len(g) == 2:
return asFea(g[0]) + "-" + asFea(g[1]) # a range
elif g.lower() in ast.fea_keywords:
return "\\" + g
else:
return g
ast.asFea = asFea
class ast_MarkClass(ast.MarkClass):
# This is better fixed upstream in parser.parse_glyphclass_ to handle MarkClasses
def asClassFea(self, indent=""):
return "[" + " ".join(map(asFea, self.glyphs)) + "]"
class ast_BaseClass(ast_MarkClass) :
def asFea(self, indent="") :
return "@" + self.name + " = [" + " ".join(map(asFea, self.glyphs.keys())) + "];"
class ast_BaseClassDefinition(ast.MarkClassDefinition):
def asFea(self, indent="") :
# like base class asFea
return "# {}baseClass {} {} @{};".format(indent, self.glyphs.asFea(),
self.anchor.asFea(), self.markClass.name)
class ast_MarkBasePosStatement(ast.MarkBasePosStatement):
def asFea(self, indent=""):
# handles members added by parse_position_base_ with feax syntax
if isinstance(self.base, ast.MarkClassName): # flattens pos @BASECLASS mark @MARKCLASS
res = ""
for bcd in self.base.markClass.definitions:
if res != "":
res += "\n{}".format(indent)
res += "pos base {} {}".format(bcd.glyphs.asFea(), bcd.anchor.asFea())
res += "".join(" mark @{}".format(m.name) for m in self.marks)
res += ";"
else: # like base class method
res = "pos base {}".format(self.base.asFea())
res += "".join(" {} mark @{}".format(a.asFea(), m.name) for a, m in self.marks)
res += ";"
return res
def build(self, builder) :
#TODO: do the right thing here (write to ttf?)
pass
class ast_MarkMarkPosStatement(ast.MarkMarkPosStatement):
# super class __init__() for reference
# def __init__(self, location, baseMarks, marks):
# Statement.__init__(self, location)
# self.baseMarks, self.marks = baseMarks, marks
def asFea(self, indent=""):
# handles members added by parse_position_base_ with feax syntax
if isinstance(self.baseMarks, ast.MarkClassName): # flattens pos @MARKCLASS mark @MARKCLASS
res = ""
for mcd in self.baseMarks.markClass.definitions:
if res != "":
res += "\n{}".format(indent)
res += "pos mark {} {}".format(mcd.glyphs.asFea(), mcd.anchor.asFea())
for m in self.marks:
res += " mark @{}".format(m.name)
res += ";"
else: # like base class method
res = "pos mark {}".format(self.baseMarks.asFea())
for a, m in self.marks:
res += " {} mark @{}".format(a.asFea() if a else "<anchor NULL>", m.name)
res += ";"
return res
def build(self, builder):
# builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks)
#TODO: do the right thing
pass
class ast_CursivePosStatement(ast.CursivePosStatement):
# super class __init__() for reference
# def __init__(self, location, glyphclass, entryAnchor, exitAnchor):
# Statement.__init__(self, location)
# self.glyphclass = glyphclass
# self.entryAnchor, self.exitAnchor = entryAnchor, exitAnchor
def asFea(self, indent=""):
if isinstance(self.exitAnchor, ast.MarkClass): # pos cursive @BASE1 @BASE2
res = ""
allglyphs = set(self.glyphclass.glyphSet())
allglyphs.update(self.exitAnchor.glyphSet())
for g in sorted(allglyphs):
entry = self.glyphclass.glyphs.get(g, None)
exit = self.exitAnchor.glyphs.get(g, None)
if res != "":
res += "\n{}".format(indent)
res += "pos cursive {} {} {};".format(g,
(entry.anchor.asFea() if entry else "<anchor NULL>"),
(exit.anchor.asFea() if exit else "<anchor NULL>"))
else:
res = super(ast_CursivePosStatement, self).asFea(indent)
return res
def build(self, builder) :
#TODO: do the right thing here (write to ttf?)
pass
#similar to ast.MultipleSubstStatement
#one-to-many substitution, one glyph class is on LHS, multiple glyph classes may be on RHS
# equivalent to generation of one stmt for each glyph in the LHS class
# that's matched to corresponding glyphs in the RHS classes
#prefix and suffx are for contextual lookups and do not need processing
#replacement could contain multiple slots
#TODO: below only supports one RHS class?
class ast_MultipleSubstStatement(ast.Statement):
def __init__(self, location, prefix, glyph, suffix, replacement):
ast.Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = prefix, glyph, suffix
self.replacement = replacement
if len(self.glyph.glyphSet()) > 1 :
for i, r in enumerate(self.replacement) :
if len(r.glyphSet()) > 1 :
self.multindex = i #first RHS slot with a glyph class
break
else :
self.multindex = 0
def build(self, builder):
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
glyphs = self.glyph.glyphSet()
replacements = self.replacement[self.multindex].glyphSet()
for i in range(min(len(glyphs), len(replacements))) :
builder.add_multiple_subst(
self.location, prefix, glyphs[i], suffix,
self.replacement[0:self.multindex] + [replacements[i]] + self.replacement[self.multindex+1:])
def asFea(self, indent=""):
res = ""
pres = " ".join(map(asFea, self.prefix)) if len(self.prefix) else ""
sufs = " ".join(map(asFea, self.suffix)) if len(self.suffix) else ""
glyphs = self.glyph.glyphSet()
replacements = self.replacement[self.multindex].glyphSet()
for i in range(min(len(glyphs), len(replacements))) :
res += ("\n" + indent if i > 0 else "") + "sub "
if len(self.prefix) > 0 or len(self.suffix) > 0 :
if len(self.prefix) :
res += pres + " "
res += asFea(glyphs[i]) + "'"
if len(self.suffix) :
res += " " + sufs
else :
res += asFea(glyphs[i])
res += " by "
res += " ".join(map(asFea, self.replacement[0:self.multindex] + [replacements[i]] + self.replacement[self.multindex+1:]))
res += ";"
return res
# similar to ast.LigatureSubstStatement
# many-to-one substitution, one glyph class is on RHS, multiple glyph classes may be on LHS
# equivalent to generation of one stmt for each glyph in the RHS class
# that's matched to corresponding glyphs in the LHS classes
# it's unclear which LHS class should correspond to the RHS class
# prefix and suffx are for contextual lookups and do not need processing
# replacement could contain multiple slots
#TODO: below only supports one LHS class?
class ast_LigatureSubstStatement(ast.Statement):
def __init__(self, location, prefix, glyphs, suffix, replacement,
forceChain):
ast.Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
self.replacement, self.forceChain = replacement, forceChain
if len(self.replacement.glyphSet()) > 1:
for i, g in enumerate(self.glyphs):
if len(g.glyphSet()) > 1:
self.multindex = i #first LHS slot with a glyph class
break
else:
self.multindex = 0
def build(self, builder):
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
replacements = self.replacement.glyphSet()
glyphs = self.glyphs[self.multindex].glyphSet()
for i in range(min(len(glyphs), len(replacements))):
builder.add_ligature_subst(
self.location, prefix,
self.glyphs[:self.multindex] + glyphs[i] + self.glyphs[self.multindex+1:],
suffix, replacements[i], self.forceChain)
def asFea(self, indent=""):
res = ""
pres = " ".join(map(asFea, self.prefix)) if len(self.prefix) else ""
sufs = " ".join(map(asFea, self.suffix)) if len(self.suffix) else ""
glyphs = self.glyphs[self.multindex].glyphSet()
replacements = self.replacement.glyphSet()
for i in range(min(len(glyphs), len(replacements))) :
res += ("\n" + indent if i > 0 else "") + "sub "
if len(self.prefix) > 0 or len(self.suffix) > 0 :
if len(self.prefix) :
res += pres + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs[:self.multindex] + [glyphs[i]] + self.glyphs[self.multindex+1:])
if len(self.suffix) :
res += " " + sufs
else :
res += " ".join(map(asFea, self.glyphs[:self.multindex] + [glyphs[i]] + self.glyphs[self.multindex+1:]))
res += " by "
res += asFea(replacements[i])
res += ";"
return res
class feaplus_ast(object) :
MarkBasePosStatement = ast_MarkBasePosStatement
MarkMarkPosStatement = ast_MarkMarkPosStatement
CursivePosStatement = ast_CursivePosStatement
BaseClass = ast_BaseClass
MarkClass = ast_MarkClass
BaseClassDefinition = ast_BaseClassDefinition
MultipleSubstStatement = ast_MultipleSubstStatement
LigatureSubstStatement = ast_LigatureSubstStatement
def __getattr__(self, name):
return getattr(ast, name) # retrieve undefined attrs from imported fontTools.feaLib ast module
class feaplus_parser(Parser) :
extensions = {
'baseClass' : lambda s : s.parseBaseClass()
}
ast = feaplus_ast()
def __init__(self, filename, glyphmap) :
if filename is None :
empty_file = StringIO.StringIO("")
super(feaplus_parser, self).__init__(empty_file, glyphmap)
else :
super(feaplus_parser, self).__init__(filename, glyphmap)
def parse(self, filename=None) :
if filename is not None :
self.lexer_ = IncludingLexer(filename)
self.advance_lexer_(comments=True)
return super(feaplus_parser, self).parse()
# methods to limit layer violations
def define_glyphclass(self, ap_nm, gc) :
self.glyphclasses_.define(ap_nm, gc)
def resolve_glyphclass(self, ap_nm):
return self.glyphclasses_.resolve(ap_nm)
def add_statement(self, val) :
self.doc_.statements.append(val)
def set_baseclass(self, ap_nm) :
gc = self.ast.BaseClass(ap_nm)
if not hasattr(self.doc_, 'baseClasses') :
self.doc_.baseClasses = {}
self.doc_.baseClasses[ap_nm] = gc
self.define_glyphclass(ap_nm, gc)
return gc
def set_markclass(self, ap_nm) :
gc = self.ast.MarkClass(ap_nm)
if not hasattr(self.doc_, 'markClasses') :
self.doc_.markClasses = {}
self.doc_.markClasses[ap_nm] = gc
self.define_glyphclass(ap_nm, gc)
return gc
# like base class parse_position_base_ & overrides it
def parse_position_base_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("base")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'mark-to-base attachment positioning',
location)
base = self.parse_glyphclass_(accept_glyphname=True)
if self.next_token_ == "<": # handle pos base [glyphs] <anchor> mark @MARKCLASS
marks = self.parse_anchor_marks_()
else: # handle pos base @BASECLASS mark @MARKCLASS; like base class parse_anchor_marks_
marks = []
while self.next_token_ == "mark": #TODO: is more than one 'mark' meaningful?
self.expect_keyword_("mark")
m = self.expect_markClass_reference_()
marks.append(m)
self.expect_symbol_(";")
return self.ast.MarkBasePosStatement(location, base, marks)
# like base class parse_position_mark_ & overrides it
def parse_position_mark_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("mark")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'mark-to-mark attachment positioning',
location)
baseMarks = self.parse_glyphclass_(accept_glyphname=True)
if self.next_token_ == "<": # handle pos mark [glyphs] <anchor> mark @MARKCLASS
marks = self.parse_anchor_marks_()
else: # handle pos mark @MARKCLASS mark @MARKCLASS; like base class parse_anchor_marks_
marks = []
while self.next_token_ == "mark": #TODO: is more than one 'mark' meaningful?
self.expect_keyword_("mark")
m = self.expect_markClass_reference_()
marks.append(m)
self.expect_symbol_(";")
return self.ast.MarkMarkPosStatement(location, baseMarks, marks)
def parse_position_cursive_(self, enumerated, vertical):
location = self.cur_token_location_
self.expect_keyword_("cursive")
if enumerated:
raise FeatureLibError(
'"enumerate" is not allowed with '
'cursive attachment positioning',
location)
glyphclass = self.parse_glyphclass_(accept_glyphname=True)
if self.next_token_ == "<": # handle pos cursive @glyphClass <anchor entry> <anchor exit>
entryAnchor = self.parse_anchor_()
exitAnchor = self.parse_anchor_()
self.expect_symbol_(";")
return self.ast.CursivePosStatement(
location, glyphclass, entryAnchor, exitAnchor)
else: # handle pos cursive @baseClass @baseClass;
mc = self.expect_markClass_reference_()
return self.ast.CursivePosStatement(location, glyphclass.markClass, None, mc)
# like base class parseMarkClass
# but uses BaseClass and BaseClassDefinition which subclass Mark counterparts
def parseBaseClass(self):
if not hasattr(self.doc_, 'baseClasses'):
self.doc_.baseClasses = {}
location = self.cur_token_location_
glyphs = self.parse_glyphclass_(accept_glyphname=True)
anchor = self.parse_anchor_()
name = self.expect_class_name_()
self.expect_symbol_(";")
baseClass = self.doc_.baseClasses.get(name)
if baseClass is None:
baseClass = self.ast.BaseClass(name)
self.doc_.baseClasses[name] = baseClass
self.glyphclasses_.define(name, baseClass)
bcdef = self.ast.BaseClassDefinition(location, baseClass, anchor, glyphs)
baseClass.addDefinition(bcdef)
return bcdef
#similar to and overrides parser.parse_substitute_
def parse_substitute_(self):
assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"}
location = self.cur_token_location_
reverse = self.cur_token_ in {"reversesub", "rsub"}
old_prefix, old, lookups, values, old_suffix, hasMarks = \
self.parse_glyph_pattern_(vertical=False)
if any(values):
raise FeatureLibError(
"Substitution statements cannot contain values", location)
new = []
if self.next_token_ == "by":
keyword = self.expect_keyword_("by")
while self.next_token_ != ";":
gc = self.parse_glyphclass_(accept_glyphname=True)
new.append(gc)
elif self.next_token_ == "from":
keyword = self.expect_keyword_("from")
new = [self.parse_glyphclass_(accept_glyphname=False)]
else:
keyword = None
self.expect_symbol_(";")
if len(new) is 0 and not any(lookups):
raise FeatureLibError(
'Expected "by", "from" or explicit lookup references',
self.cur_token_location_)
# GSUB lookup type 3: Alternate substitution.
# Format: "substitute a from [a.1 a.2 a.3];"
if keyword == "from":
if reverse:
raise FeatureLibError(
'Reverse chaining substitutions do not support "from"',
location)
if len(old) != 1 or len(old[0].glyphSet()) != 1:
raise FeatureLibError(
'Expected a single glyph before "from"',
location)
if len(new) != 1:
raise FeatureLibError(
'Expected a single glyphclass after "from"',
location)
return self.ast.AlternateSubstStatement(
location, old_prefix, old[0], old_suffix, new[0])
num_lookups = len([l for l in lookups if l is not None])
# GSUB lookup type 1: Single substitution.
# Format A: "substitute a by a.sc;"
# Format B: "substitute [one.fitted one.oldstyle] by one;"
# Format C: "substitute [a-d] by [A.sc-D.sc];"
if (not reverse and len(old) == 1 and len(new) == 1 and
num_lookups == 0):
glyphs = list(old[0].glyphSet())
replacements = list(new[0].glyphSet())
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
'but found a glyph class with %d elements' %
(len(glyphs), len(replacements)), location)
return self.ast.SingleSubstStatement(
location, old, new,
old_prefix, old_suffix,
forceChain=hasMarks
)
# GSUB lookup type 2: Multiple substitution.
# Format: "substitute f_f_i by f f i;"
if (not reverse and
len(old) == 1 and len(new) > 1 and num_lookups == 0):
return self.ast.MultipleSubstStatement(location, old_prefix, old[0], old_suffix, new)
# GSUB lookup type 4: Ligature substitution.
# Format: "substitute f f i by f_f_i;"
if (not reverse and
len(old) > 1 and len(new) == 1 and num_lookups == 0):
return self.ast.LigatureSubstStatement(location, old_prefix, old, old_suffix, new[0], forceChain=hasMarks)
# GSUB lookup type 8: Reverse chaining substitution.
if reverse:
if len(old) != 1:
raise FeatureLibError(
"In reverse chaining single substitutions, "
"only a single glyph or glyph class can be replaced",
location)
if len(new) != 1:
raise FeatureLibError(
'In reverse chaining single substitutions, '
'the replacement (after "by") must be a single glyph '
'or glyph class', location)
if num_lookups != 0:
raise FeatureLibError(
"Reverse chaining substitutions cannot call named lookups",
location)
glyphs = sorted(list(old[0].glyphSet()))
replacements = sorted(list(new[0].glyphSet()))
if len(replacements) == 1:
replacements = replacements * len(glyphs)
if len(glyphs) != len(replacements):
raise FeatureLibError(
'Expected a glyph class with %d elements after "by", '
'but found a glyph class with %d elements' %
(len(glyphs), len(replacements)), location)
return self.ast.ReverseChainSingleSubstStatement(
location, old_prefix, old_suffix, old, new)
# GSUB lookup type 6: Chaining contextual substitution.
assert len(new) == 0, new
rule = self.ast.ChainContextSubstStatement(
location, old_prefix, old, old_suffix, lookups)
return rule
def parse_glyphclass_(self, accept_glyphname):
if (accept_glyphname and
self.next_token_type_ in (Lexer.NAME, Lexer.CID)):
glyph = self.expect_glyph_()
return self.ast.GlyphName(self.cur_token_location_, glyph)
if self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_)
if isinstance(gc, self.ast.MarkClass):
return self.ast.MarkClassName(self.cur_token_location_, gc)
else:
return self.ast.GlyphClassName(self.cur_token_location_, gc)
self.expect_symbol_("[")
location = self.cur_token_location_
glyphs = self.ast.GlyphClass(location)
while self.next_token_ != "]":
if self.next_token_type_ is Lexer.NAME:
glyph = self.expect_glyph_()
location = self.cur_token_location_
if '-' in glyph and glyph not in self.glyphNames_:
start, limit = self.split_glyph_range_(glyph, location)
glyphs.add_range(
start, limit,
self.make_glyph_range_(location, start, limit))
elif self.next_token_ == "-":
start = glyph
self.expect_symbol_("-")
limit = self.expect_glyph_()
glyphs.add_range(
start, limit,
self.make_glyph_range_(location, start, limit))
else:
glyphs.append(glyph)
elif self.next_token_type_ is Lexer.CID:
glyph = self.expect_glyph_()
if self.next_token_ == "-":
range_location = self.cur_token_location_
range_start = self.cur_token_
self.expect_symbol_("-")
range_end = self.expect_cid_()
glyphs.add_cid_range(range_start, range_end,
self.make_cid_range_(range_location,
range_start, range_end))
else:
glyphs.append("cid%05d" % self.cur_token_)
elif self.next_token_type_ is Lexer.GLYPHCLASS:
self.advance_lexer_()
gc = self.glyphclasses_.resolve(self.cur_token_)
if gc is None:
raise FeatureLibError(
"Unknown glyph class @%s" % self.cur_token_,
self.cur_token_location_)
# fix bug don't output class definition, just the name.
if isinstance(gc, self.ast.MarkClass):
gcn = self.ast.MarkClassName(self.cur_token_location_, gc)
else:
gcn = self.ast.GlyphClassName(self.cur_token_location_, gc)
glyphs.add_class(gcn)
else:
raise FeatureLibError(
"Expected glyph name, glyph range, "
"or glyph class reference",
self.next_token_location_)
self.expect_symbol_("]")
return glyphs
| {
"repo_name": "moyogo/pysilfont",
"path": "lib/silfont/feaplus.py",
"copies": "1",
"size": "24464",
"license": "mit",
"hash": 4379677378010336000,
"line_mean": 43.7239488117,
"line_max": 133,
"alpha_frac": 0.565647482,
"autogenerated": false,
"ratio": 4.0296491517048265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095296633704827,
"avg_score": null,
"num_lines": null
} |
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
# VariationStore
def buildVarRegionAxis(axisSupport):
self = ot.VarRegionAxis()
self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
return self
def buildVarRegion(support, axisTags):
assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags)
self = ot.VarRegion()
self.VarRegionAxis = []
for tag in axisTags:
self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0))))
return self
def buildVarRegionList(supports, axisTags):
self = ot.VarRegionList()
self.RegionAxisCount = len(axisTags)
self.Region = []
for support in supports:
self.Region.append(buildVarRegion(support, axisTags))
self.RegionCount = len(self.Region)
return self
def _reorderItem(lst, narrows, zeroes):
out = []
count = len(lst)
for i in range(count):
if i not in narrows:
out.append(lst[i])
for i in range(count):
if i in narrows and i not in zeroes:
out.append(lst[i])
return out
def VarData_calculateNumShorts(self, optimize=False):
count = self.VarRegionCount
items = self.Item
narrows = set(range(count))
zeroes = set(range(count))
for item in items:
wides = [i for i in narrows if not (-128 <= item[i] <= 127)]
narrows.difference_update(wides)
nonzeroes = [i for i in zeroes if item[i]]
zeroes.difference_update(nonzeroes)
if not narrows and not zeroes:
break
if optimize:
# Reorder columns such that all SHORT columns come before UINT8
self.VarRegionIndex = _reorderItem(self.VarRegionIndex, narrows, zeroes)
self.VarRegionCount = len(self.VarRegionIndex)
for i in range(len(items)):
items[i] = _reorderItem(items[i], narrows, zeroes)
self.NumShorts = count - len(narrows)
else:
wides = set(range(count)) - narrows
self.NumShorts = 1+max(wides) if wides else 0
self.VarRegionCount = len(self.VarRegionIndex)
return self
ot.VarData.calculateNumShorts = VarData_calculateNumShorts
def VarData_CalculateNumShorts(self, optimize=True):
"""Deprecated name for VarData_calculateNumShorts() which
defaults to optimize=True. Use varData.calculateNumShorts()
or varData.optimize()."""
return VarData_calculateNumShorts(self, optimize=optimize)
def VarData_optimize(self):
return VarData_calculateNumShorts(self, optimize=True)
ot.VarData.optimize = VarData_optimize
def buildVarData(varRegionIndices, items, optimize=True):
self = ot.VarData()
self.VarRegionIndex = list(varRegionIndices)
regionCount = self.VarRegionCount = len(self.VarRegionIndex)
records = self.Item = []
if items:
for item in items:
assert len(item) == regionCount
records.append(list(item))
self.ItemCount = len(self.Item)
self.calculateNumShorts(optimize=optimize)
return self
def buildVarStore(varRegionList, varDataList):
self = ot.VarStore()
self.Format = 1
self.VarRegionList = varRegionList
self.VarData = list(varDataList)
self.VarDataCount = len(self.VarData)
return self
# Variation helpers
def buildVarIdxMap(varIdxes, glyphOrder):
self = ot.VarIdxMap()
self.mapping = {g:v for g,v in zip(glyphOrder, varIdxes)}
return self
def buildVarDevTable(varIdx):
self = ot.Device()
self.DeltaFormat = 0x8000
self.StartSize = varIdx >> 16
self.EndSize = varIdx & 0xFFFF
return self
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/varLib/builder.py",
"copies": "4",
"size": "3312",
"license": "apache-2.0",
"hash": -4239802829874663400,
"line_mean": 28.0526315789,
"line_max": 102,
"alpha_frac": 0.746678744,
"autogenerated": false,
"ratio": 2.9206349206349205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5667313664634921,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.arrayTools import pairwise
from fontTools.pens.filterPen import ContourFilterPen
__all__ = ["reversedContour", "ReverseContourPen"]
class ReverseContourPen(ContourFilterPen):
"""Filter pen that passes outline data to another pen, but reversing
the winding direction of all contours. Components are simply passed
through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def filterContour(self, contour):
return reversedContour(contour)
def reversedContour(contour):
""" Generator that takes a list of pen's (operator, operands) tuples,
and yields them with the winding direction reversed.
"""
if not contour:
return # nothing to do, stop iteration
# valid contours must have at least a starting and ending command,
# can't have one without the other
assert len(contour) > 1, "invalid contour"
# the type of the last command determines if the contour is closed
contourType = contour.pop()[0]
assert contourType in ("endPath", "closePath")
closed = contourType == "closePath"
firstType, firstPts = contour.pop(0)
assert firstType in ("moveTo", "qCurveTo"), (
"invalid initial segment type: %r" % firstType)
firstOnCurve = firstPts[-1]
if firstType == "qCurveTo":
# special case for TrueType paths contaning only off-curve points
assert firstOnCurve is None, (
"off-curve only paths must end with 'None'")
assert not contour, (
"only one qCurveTo allowed per off-curve path")
firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) +
(None,))
if not contour:
# contour contains only one segment, nothing to reverse
if firstType == "moveTo":
closed = False # single-point paths can't be closed
else:
closed = True # off-curve paths are closed by definition
yield firstType, firstPts
else:
lastType, lastPts = contour[-1]
lastOnCurve = lastPts[-1]
if closed:
# for closed paths, we keep the starting point
yield firstType, firstPts
if firstOnCurve != lastOnCurve:
# emit an implied line between the last and first points
yield "lineTo", (lastOnCurve,)
contour[-1] = (lastType,
tuple(lastPts[:-1]) + (firstOnCurve,))
if len(contour) > 1:
secondType, secondPts = contour[0]
else:
# contour has only two points, the second and last are the same
secondType, secondPts = lastType, lastPts
# if a lineTo follows the initial moveTo, after reversing it
# will be implied by the closePath, so we don't emit one;
# unless the lineTo and moveTo overlap, in which case we keep the
# duplicate points
if secondType == "lineTo" and firstPts != secondPts:
del contour[0]
if contour:
contour[-1] = (lastType,
tuple(lastPts[:-1]) + secondPts)
else:
# for open paths, the last point will become the first
yield firstType, (lastOnCurve,)
contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,))
# we iterate over all segment pairs in reverse order, and yield
# each one with the off-curve points reversed (if any), and
# with the on-curve point of the following segment
for (curType, curPts), (_, nextPts) in pairwise(
contour, reverse=True):
yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],)
yield "closePath" if closed else "endPath", ()
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/reverseContourPen.py",
"copies": "5",
"size": "3849",
"license": "apache-2.0",
"hash": -2068318858528209700,
"line_mean": 39.5157894737,
"line_max": 79,
"alpha_frac": 0.6084697324,
"autogenerated": false,
"ratio": 4.257743362831858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7366213095231858,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect
from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds
from fontTools.pens.basePen import BasePen
__all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points
on their extremes.
When the shape has been drawn, the bounds are available as the
'bounds' attribute of the pen object. It's a 4-tuple:
(xMin, yMin, xMax, yMax).
If 'ignoreSinglePoints' is True, single points are ignored.
"""
def __init__(self, glyphSet, ignoreSinglePoints=False):
BasePen.__init__(self, glyphSet)
self.ignoreSinglePoints = ignoreSinglePoints
self.init()
def init(self):
self.bounds = None
self._start = None
def _moveTo(self, pt):
self._start = pt
if not self.ignoreSinglePoints:
self._addMoveTo()
def _addMoveTo(self):
if self._start is None:
return
bounds = self.bounds
if bounds:
self.bounds = updateBounds(bounds, self._start)
else:
x, y = self._start
self.bounds = (x, y, x, y)
self._start = None
def _lineTo(self, pt):
self._addMoveTo()
self.bounds = updateBounds(self.bounds, pt)
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
class BoundsPen(ControlBoundsPen):
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
than the "control bounds".
When the shape has been drawn, the bounds are available as the
'bounds' attribute of the pen object. It's a 4-tuple:
(xMin, yMin, xMax, yMax)
"""
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(bounds, calcCubicBounds(
self._getCurrentPoint(), bcp1, bcp2, pt))
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(bounds, calcQuadraticBounds(
self._getCurrentPoint(), bcp, pt))
self.bounds = bounds
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py",
"copies": "5",
"size": "2714",
"license": "apache-2.0",
"hash": 4611838374350142000,
"line_mean": 27.2708333333,
"line_max": 75,
"alpha_frac": 0.7151805453,
"autogenerated": false,
"ratio": 3.1159586681974742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6331139213497474,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
otRound,
)
from fontTools.misc.textTools import safeEval
import array
from collections import Counter, defaultdict
import io
import logging
import struct
import sys
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
EMBEDDED_PEAK_TUPLE = 0x8000
INTERMEDIATE_REGION = 0x4000
PRIVATE_POINT_NUMBERS = 0x2000
DELTAS_ARE_ZERO = 0x80
DELTAS_ARE_WORDS = 0x40
DELTA_RUN_COUNT_MASK = 0x3f
POINTS_ARE_WORDS = 0x80
POINT_RUN_COUNT_MASK = 0x7f
TUPLES_SHARE_POINT_NUMBERS = 0x8000
TUPLE_COUNT_MASK = 0x0fff
TUPLE_INDEX_MASK = 0x0fff
log = logging.getLogger(__name__)
class TupleVariation(object):
def __init__(self, axes, coordinates):
self.axes = axes.copy()
self.coordinates = list(coordinates)
def __repr__(self):
axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]))
return "<TupleVariation %s %s>" % (axes, self.coordinates)
def __eq__(self, other):
return self.coordinates == other.coordinates and self.axes == other.axes
def getUsedPoints(self):
# Empty set means "all points used".
if None not in self.coordinates:
return frozenset()
used = frozenset([i for i,p in enumerate(self.coordinates) if p is not None])
# Return None if no points used.
return used if used else None
def hasImpact(self):
"""Returns True if this TupleVariation has any visible impact.
If the result is False, the TupleVariation can be omitted from the font
without making any visible difference.
"""
return any(c is not None for c in self.coordinates)
def toXML(self, writer, axisTags):
writer.begintag("tuple")
writer.newline()
for axis in axisTags:
value = self.axes.get(axis)
if value is not None:
minValue, value, maxValue = value
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
if minValue == defaultMinValue and maxValue == defaultMaxValue:
writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
else:
attrs = [
("axis", axis),
("min", fl2str(minValue, 14)),
("value", fl2str(value, 14)),
("max", fl2str(maxValue, 14)),
]
writer.simpletag("coord", attrs)
writer.newline()
wrote_any_deltas = False
for i, delta in enumerate(self.coordinates):
if type(delta) == tuple and len(delta) == 2:
writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
writer.newline()
wrote_any_deltas = True
elif type(delta) == int:
writer.simpletag("delta", cvt=i, value=delta)
writer.newline()
wrote_any_deltas = True
elif delta is not None:
log.error("bad delta format")
writer.comment("bad delta #%d" % i)
writer.newline()
wrote_any_deltas = True
if not wrote_any_deltas:
writer.comment("no deltas")
writer.newline()
writer.endtag("tuple")
writer.newline()
def fromXML(self, name, attrs, _content):
if name == "coord":
axis = attrs["axis"]
value = str2fl(attrs["value"], 14)
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
minValue = str2fl(attrs.get("min", defaultMinValue), 14)
maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
self.axes[axis] = (minValue, value, maxValue)
elif name == "delta":
if "pt" in attrs:
point = safeEval(attrs["pt"])
x = safeEval(attrs["x"])
y = safeEval(attrs["y"])
self.coordinates[point] = (x, y)
elif "cvt" in attrs:
cvt = safeEval(attrs["cvt"])
value = safeEval(attrs["value"])
self.coordinates[cvt] = value
else:
log.warning("bad delta format: %s" %
", ".join(sorted(attrs.keys())))
def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
assert set(self.axes.keys()) <= set(axisTags), ("Unknown axis tag found.", self.axes.keys(), axisTags)
tupleData = []
auxData = []
if pointData is None:
usedPoints = self.getUsedPoints()
if usedPoints is None: # Nothing to encode
return b'', b''
pointData = self.compilePoints(usedPoints)
coord = self.compileCoord(axisTags)
flags = sharedCoordIndices.get(coord)
if flags is None:
flags = EMBEDDED_PEAK_TUPLE
tupleData.append(coord)
intermediateCoord = self.compileIntermediateCoord(axisTags)
if intermediateCoord is not None:
flags |= INTERMEDIATE_REGION
tupleData.append(intermediateCoord)
# pointData of b'' implies "use shared points".
if pointData:
flags |= PRIVATE_POINT_NUMBERS
auxData.append(pointData)
auxData.append(self.compileDeltas())
auxData = b''.join(auxData)
tupleData.insert(0, struct.pack('>HH', len(auxData), flags))
return b''.join(tupleData), auxData
def compileCoord(self, axisTags):
result = bytearray()
axes = self.axes
for axis in axisTags:
triple = axes.get(axis)
if triple is None:
result.extend(b'\0\0')
else:
result.extend(struct.pack(">h", fl2fi(triple[1], 14)))
return bytes(result)
def compileIntermediateCoord(self, axisTags):
needed = False
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
needed = True
break
if not needed:
return None
minCoords = bytearray()
maxCoords = bytearray()
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
minCoords.extend(struct.pack(">h", fl2fi(minValue, 14)))
maxCoords.extend(struct.pack(">h", fl2fi(maxValue, 14)))
return minCoords + maxCoords
@staticmethod
def decompileCoord_(axisTags, data, offset):
coord = {}
pos = offset
for axis in axisTags:
coord[axis] = fi2fl(struct.unpack(">h", data[pos:pos+2])[0], 14)
pos += 2
return coord, pos
@staticmethod
def compilePoints(points):
# If the set consists of all points in the glyph, it gets encoded with
# a special encoding: a single zero byte.
#
# To use this optimization, points passed in must be empty set.
# The following two lines are not strictly necessary as the main code
# below would emit the same. But this is most common and faster.
if not points:
return b'\0'
# In the 'gvar' table, the packing of point numbers is a little surprising.
# It consists of multiple runs, each being a delta-encoded list of integers.
# For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
# [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
# There are two types of runs, with values being either 8 or 16 bit unsigned
# integers.
points = list(points)
points.sort()
numPoints = len(points)
result = bytearray()
# The binary representation starts with the total number of points in the set,
# encoded into one or two bytes depending on the value.
if numPoints < 0x80:
result.append(numPoints)
else:
result.append((numPoints >> 8) | 0x80)
result.append(numPoints & 0xff)
MAX_RUN_LENGTH = 127
pos = 0
lastValue = 0
while pos < numPoints:
runLength = 0
headerPos = len(result)
result.append(0)
useByteEncoding = None
while pos < numPoints and runLength <= MAX_RUN_LENGTH:
curValue = points[pos]
delta = curValue - lastValue
if useByteEncoding is None:
useByteEncoding = 0 <= delta <= 0xff
if useByteEncoding and (delta > 0xff or delta < 0):
# we need to start a new run (which will not use byte encoding)
break
# TODO This never switches back to a byte-encoding from a short-encoding.
# That's suboptimal.
if useByteEncoding:
result.append(delta)
else:
result.append(delta >> 8)
result.append(delta & 0xff)
lastValue = curValue
pos += 1
runLength += 1
if useByteEncoding:
result[headerPos] = runLength - 1
else:
result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
return result
@staticmethod
def decompilePoints_(numPoints, data, offset, tableTag):
"""(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
assert tableTag in ('cvar', 'gvar')
pos = offset
numPointsInData = data[pos]
pos += 1
if (numPointsInData & POINTS_ARE_WORDS) != 0:
numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
pos += 1
if numPointsInData == 0:
return (range(numPoints), pos)
result = []
while len(result) < numPointsInData:
runHeader = data[pos]
pos += 1
numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
point = 0
if (runHeader & POINTS_ARE_WORDS) != 0:
points = array.array("H")
pointsSize = numPointsInRun * 2
else:
points = array.array("B")
pointsSize = numPointsInRun
points.frombytes(data[pos:pos+pointsSize])
if sys.byteorder != "big": points.byteswap()
assert len(points) == numPointsInRun
pos += pointsSize
result.extend(points)
# Convert relative to absolute
absolute = []
current = 0
for delta in result:
current += delta
absolute.append(current)
result = absolute
del absolute
badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
if badPoints:
log.warning("point %s out of range in '%s' table" %
(",".join(sorted(badPoints)), tableTag))
return (result, pos)
def compileDeltas(self):
deltaX = []
deltaY = []
if self.getCoordWidth() == 2:
for c in self.coordinates:
if c is None:
continue
deltaX.append(c[0])
deltaY.append(c[1])
else:
for c in self.coordinates:
if c is None:
continue
deltaX.append(c)
bytearr = bytearray()
self.compileDeltaValues_(deltaX, bytearr)
self.compileDeltaValues_(deltaY, bytearr)
return bytearr
@staticmethod
def compileDeltaValues_(deltas, bytearr=None):
"""[value1, value2, value3, ...] --> bytearray
Emits a sequence of runs. Each run starts with a
byte-sized header whose 6 least significant bits
(header & 0x3F) indicate how many values are encoded
in this run. The stored length is the actual length
minus one; run lengths are thus in the range [1..64].
If the header byte has its most significant bit (0x80)
set, all values in this run are zero, and no data
follows. Otherwise, the header byte is followed by
((header & 0x3F) + 1) signed values. If (header &
0x40) is clear, the delta values are stored as signed
bytes; if (header & 0x40) is set, the delta values are
signed 16-bit integers.
""" # Explaining the format because the 'gvar' spec is hard to understand.
if bytearr is None:
bytearr = bytearray()
pos = 0
numDeltas = len(deltas)
while pos < numDeltas:
value = deltas[pos]
if value == 0:
pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
elif -128 <= value <= 127:
pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
else:
pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
return bytearr
@staticmethod
def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
while pos < numDeltas and deltas[pos] == 0:
pos += 1
runLength = pos - offset
while runLength >= 64:
bytearr.append(DELTAS_ARE_ZERO | 63)
runLength -= 64
if runLength:
bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
return pos
@staticmethod
def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
while pos < numDeltas:
value = deltas[pos]
if not (-128 <= value <= 127):
break
# Within a byte-encoded run of deltas, a single zero
# is best stored literally as 0x00 value. However,
# if are two or more zeroes in a sequence, it is
# better to start a new run. For example, the sequence
# of deltas [15, 15, 0, 15, 15] becomes 6 bytes
# (04 0F 0F 00 0F 0F) when storing the zero value
# literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
# when starting a new run.
if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0:
break
pos += 1
runLength = pos - offset
while runLength >= 64:
bytearr.append(63)
bytearr.extend(array.array('b', deltas[offset:offset+64]))
offset += 64
runLength -= 64
if runLength:
bytearr.append(runLength - 1)
bytearr.extend(array.array('b', deltas[offset:pos]))
return pos
@staticmethod
def encodeDeltaRunAsWords_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
while pos < numDeltas:
value = deltas[pos]
# Within a word-encoded run of deltas, it is easiest
# to start a new run (with a different encoding)
# whenever we encounter a zero value. For example,
# the sequence [0x6666, 0, 0x7777] needs 7 bytes when
# storing the zero literally (42 66 66 00 00 77 77),
# and equally 7 bytes when starting a new run
# (40 66 66 80 40 77 77).
if value == 0:
break
# Within a word-encoded run of deltas, a single value
# in the range (-128..127) should be encoded literally
# because it is more compact. For example, the sequence
# [0x6666, 2, 0x7777] becomes 7 bytes when storing
# the value literally (42 66 66 00 02 77 77), but 8 bytes
# when starting a new run (40 66 66 00 02 40 77 77).
if (-128 <= value <= 127) and pos+1 < numDeltas and (-128 <= deltas[pos+1] <= 127):
break
pos += 1
runLength = pos - offset
while runLength >= 64:
bytearr.append(DELTAS_ARE_WORDS | 63)
a = array.array('h', deltas[offset:offset+64])
if sys.byteorder != "big": a.byteswap()
bytearr.extend(a)
offset += 64
runLength -= 64
if runLength:
bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
a = array.array('h', deltas[offset:pos])
if sys.byteorder != "big": a.byteswap()
bytearr.extend(a)
return pos
@staticmethod
def decompileDeltas_(numDeltas, data, offset):
"""(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
result = []
pos = offset
while len(result) < numDeltas:
runHeader = data[pos]
pos += 1
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
if (runHeader & DELTAS_ARE_ZERO) != 0:
result.extend([0] * numDeltasInRun)
else:
if (runHeader & DELTAS_ARE_WORDS) != 0:
deltas = array.array("h")
deltasSize = numDeltasInRun * 2
else:
deltas = array.array("b")
deltasSize = numDeltasInRun
deltas.frombytes(data[pos:pos+deltasSize])
if sys.byteorder != "big": deltas.byteswap()
assert len(deltas) == numDeltasInRun
pos += deltasSize
result.extend(deltas)
assert len(result) == numDeltas
return (result, pos)
@staticmethod
def getTupleSize_(flags, axisCount):
size = 4
if (flags & EMBEDDED_PEAK_TUPLE) != 0:
size += axisCount * 2
if (flags & INTERMEDIATE_REGION) != 0:
size += axisCount * 4
return size
def getCoordWidth(self):
""" Return 2 if coordinates are (x, y) as in gvar, 1 if single values
as in cvar, or 0 if empty.
"""
firstDelta = next((c for c in self.coordinates if c is not None), None)
if firstDelta is None:
return 0 # empty or has no impact
if type(firstDelta) in (int, float):
return 1
if type(firstDelta) is tuple and len(firstDelta) == 2:
return 2
raise TypeError(
"invalid type of delta; expected (int or float) number, or "
"Tuple[number, number]: %r" % firstDelta
)
def scaleDeltas(self, scalar):
if scalar == 1.0:
return # no change
coordWidth = self.getCoordWidth()
self.coordinates = [
None
if d is None
else d * scalar
if coordWidth == 1
else (d[0] * scalar, d[1] * scalar)
for d in self.coordinates
]
def roundDeltas(self):
coordWidth = self.getCoordWidth()
self.coordinates = [
None
if d is None
else otRound(d)
if coordWidth == 1
else (otRound(d[0]), otRound(d[1]))
for d in self.coordinates
]
def calcInferredDeltas(self, origCoords, endPts):
from fontTools.varLib.iup import iup_delta
if self.getCoordWidth() == 1:
raise TypeError(
"Only 'gvar' TupleVariation can have inferred deltas"
)
if None in self.coordinates:
if len(self.coordinates) != len(origCoords):
raise ValueError(
"Expected len(origCoords) == %d; found %d"
% (len(self.coordinates), len(origCoords))
)
self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
from fontTools.varLib.iup import iup_delta_optimize
if None in self.coordinates:
return # already optimized
deltaOpt = iup_delta_optimize(
self.coordinates, origCoords, endPts, tolerance=tolerance
)
if None in deltaOpt:
if isComposite and all(d is None for d in deltaOpt):
# Fix for macOS composites
# https://github.com/fonttools/fonttools/issues/1381
deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
# Use "optimized" version only if smaller...
varOpt = TupleVariation(self.axes, deltaOpt)
# Shouldn't matter that this is different from fvar...?
axisTags = sorted(self.axes.keys())
tupleData, auxData = self.compile(axisTags)
unoptimizedLength = len(tupleData) + len(auxData)
tupleData, auxData = varOpt.compile(axisTags)
optimizedLength = len(tupleData) + len(auxData)
if optimizedLength < unoptimizedLength:
self.coordinates = varOpt.coordinates
def __iadd__(self, other):
if not isinstance(other, TupleVariation):
return NotImplemented
deltas1 = self.coordinates
length = len(deltas1)
deltas2 = other.coordinates
if len(deltas2) != length:
raise ValueError(
"cannot sum TupleVariation deltas with different lengths"
)
# 'None' values have different meanings in gvar vs cvar TupleVariations:
# within the gvar, when deltas are not provided explicitly for some points,
# they need to be inferred; whereas for the 'cvar' table, if deltas are not
# provided for some CVT values, then no adjustments are made (i.e. None == 0).
# Thus, we cannot sum deltas for gvar TupleVariations if they contain
# inferred inferred deltas (the latter need to be computed first using
# 'calcInferredDeltas' method), but we can treat 'None' values in cvar
# deltas as if they are zeros.
if self.getCoordWidth() == 2:
for i, d2 in zip(range(length), deltas2):
d1 = deltas1[i]
try:
deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
except TypeError:
raise ValueError(
"cannot sum gvar deltas with inferred points"
)
else:
for i, d2 in zip(range(length), deltas2):
d1 = deltas1[i]
if d1 is not None and d2 is not None:
deltas1[i] = d1 + d2
elif d1 is None and d2 is not None:
deltas1[i] = d2
# elif d2 is None do nothing
return self
def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
result = []
for _ in range(sharedTupleCount):
t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
result.append(t)
return result
def compileSharedTuples(axisTags, variations,
MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1):
coordCount = Counter()
for var in variations:
coord = var.compileCoord(axisTags)
coordCount[coord] += 1
sharedCoords = coordCount.most_common(MAX_NUM_SHARED_COORDS)
return [c[0] for c in sharedCoords if c[1] > 1]
def compileTupleVariationStore(variations, pointCount,
axisTags, sharedTupleIndices,
useSharedPoints=True):
newVariations = []
pointDatas = []
# Compile all points and figure out sharing if desired
sharedPoints = None
# Collect, count, and compile point-sets for all variation sets
pointSetCount = defaultdict(int)
for v in variations:
points = v.getUsedPoints()
if points is None: # Empty variations
continue
pointSetCount[points] += 1
newVariations.append(v)
pointDatas.append(points)
variations = newVariations
del newVariations
if not variations:
return (0, b"", b"")
n = len(variations[0].coordinates)
assert all(len(v.coordinates) == n for v in variations), "Variation sets have different sizes"
compiledPoints = {pointSet:TupleVariation.compilePoints(pointSet)
for pointSet in pointSetCount}
tupleVariationCount = len(variations)
tuples = []
data = []
if useSharedPoints:
# Find point-set which saves most bytes.
def key(pn):
pointSet = pn[0]
count = pn[1]
return len(compiledPoints[pointSet]) * (count - 1)
sharedPoints = max(pointSetCount.items(), key=key)[0]
data.append(compiledPoints[sharedPoints])
tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
# b'' implies "use shared points"
pointDatas = [compiledPoints[points] if points != sharedPoints else b''
for points in pointDatas]
for v,p in zip(variations, pointDatas):
thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
tuples.append(thisTuple)
data.append(thisData)
tuples = b''.join(tuples)
data = b''.join(data)
return tupleVariationCount, tuples, data
def decompileTupleVariationStore(tableTag, axisTags,
tupleVariationCount, pointCount, sharedTuples,
data, pos, dataPos):
numAxes = len(axisTags)
result = []
if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
sharedPoints, dataPos = TupleVariation.decompilePoints_(
pointCount, data, dataPos, tableTag)
else:
sharedPoints = []
for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
dataSize, flags = struct.unpack(">HH", data[pos:pos+4])
tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
tupleData = data[pos : pos + tupleSize]
pointDeltaData = data[dataPos : dataPos + dataSize]
result.append(decompileTupleVariation_(
pointCount, sharedTuples, sharedPoints,
tableTag, axisTags, tupleData, pointDeltaData))
pos += tupleSize
dataPos += dataSize
return result
def decompileTupleVariation_(pointCount, sharedTuples, sharedPoints,
tableTag, axisTags, data, tupleData):
assert tableTag in ("cvar", "gvar"), tableTag
flags = struct.unpack(">H", data[2:4])[0]
pos = 4
if (flags & EMBEDDED_PEAK_TUPLE) == 0:
peak = sharedTuples[flags & TUPLE_INDEX_MASK]
else:
peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
if (flags & INTERMEDIATE_REGION) != 0:
start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
else:
start, end = inferRegion_(peak)
axes = {}
for axis in axisTags:
region = start[axis], peak[axis], end[axis]
if region != (0.0, 0.0, 0.0):
axes[axis] = region
pos = 0
if (flags & PRIVATE_POINT_NUMBERS) != 0:
points, pos = TupleVariation.decompilePoints_(
pointCount, tupleData, pos, tableTag)
else:
points = sharedPoints
deltas = [None] * pointCount
if tableTag == "cvar":
deltas_cvt, pos = TupleVariation.decompileDeltas_(
len(points), tupleData, pos)
for p, delta in zip(points, deltas_cvt):
if 0 <= p < pointCount:
deltas[p] = delta
elif tableTag == "gvar":
deltas_x, pos = TupleVariation.decompileDeltas_(
len(points), tupleData, pos)
deltas_y, pos = TupleVariation.decompileDeltas_(
len(points), tupleData, pos)
for p, x, y in zip(points, deltas_x, deltas_y):
if 0 <= p < pointCount:
deltas[p] = (x, y)
return TupleVariation(axes, deltas)
def inferRegion_(peak):
"""Infer start and end for a (non-intermediate) region
This helper function computes the applicability region for
variation tuples whose INTERMEDIATE_REGION flag is not set in the
TupleVariationHeader structure. Variation tuples apply only to
certain regions of the variation space; outside that region, the
tuple has no effect. To make the binary encoding more compact,
TupleVariationHeaders can omit the intermediateStartTuple and
intermediateEndTuple fields.
"""
start, end = {}, {}
for (axis, value) in peak.items():
start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
return (start, end)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/TupleVariation.py",
"copies": "3",
"size": "24095",
"license": "apache-2.0",
"hash": 4583951698497547300,
"line_mean": 30.4146023468,
"line_max": 104,
"alpha_frac": 0.6764058933,
"autogenerated": false,
"ratio": 3.063572790845518,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5239978684145519,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import bezierTools
from defcon.objects.base import BaseObject
from defcon.tools import bezierMath
class Contour(BaseObject):
"""
This object represents a contour and it contains a list of points.
**This object posts the following notifications:**
=============== ====
Name Note
=============== ====
Contour.Changed Posted when the *dirty* attribute is set.
=============== ====
The Contour object has list like behavior. This behavior allows you to interact
with point data directly. For example, to get a particular point::
point = contour[0]
To iterate over all points::
for point in contour:
To get the number of points::
pointCount = len(contour)
To interact with components or anchors in a similar way,
use the ``components`` and ``anchors`` attributes.
"""
changeNotificationName = "Contour.Changed"
def __init__(self, pointClass=None):
super(Contour, self).__init__()
self._points = []
self._boundsCache = None
self._controlPointBoundsCache = None
self._clockwiseCache = None
if pointClass is None:
from point import Point
pointClass = Point
self._pointClass = pointClass
def _destroyBoundsCache(self):
self._boundsCache = None
self._controlPointBoundsCache = None
# ----------
# Attributes
# ----------
def _get_bounds(self):
from robofab.pens.boundsPen import BoundsPen
if self._boundsCache is None:
pen = BoundsPen(None)
self.draw(pen)
self._boundsCache = pen.bounds
return self._boundsCache
bounds = property(_get_bounds, doc="The bounds of the contour's outline expressed as a tuple of form (xMin, yMin, xMax, yMax).")
def _get_controlPointBounds(self):
from fontTools.pens.boundsPen import ControlBoundsPen
if self._controlPointBoundsCache is None:
pen = ControlBoundsPen(None)
self.draw(pen)
self._controlPointBoundsCache = pen.bounds
return self._controlPointBoundsCache
controlPointBounds = property(_get_controlPointBounds, doc="The control bounds of all points in the contour. This only measures the point positions, it does not measure curves. So, curves without points at the extrema will not be properly measured.")
def _get_clockwise(self):
from defcon.pens.clockwiseTestPointPen import ClockwiseTestPointPen
if self._clockwiseCache is None:
pen = ClockwiseTestPointPen()
self.drawPoints(pen)
self._clockwiseCache = pen.getIsClockwise()
return self._clockwiseCache
def _set_clockwise(self, value):
if self.clockwise != value:
self.reverse()
self._clockwiseCache = None
clockwise = property(_get_clockwise, _set_clockwise, doc="A boolean representing if the contour has a clockwise direction. Setting this posts a *Contour.Changed* notification.")
def _get_open(self):
if not self._points:
return True
return self._points[0].segmentType == 'move'
open = property(_get_open, doc="A boolean indicating if the contour is open or not.")
def _get_onCurvePoints(self):
return [point for point in self._points if point.segmentType]
onCurvePoints = property(_get_onCurvePoints, doc="A list of all on curve points in the contour.")
def _get_segments(self):
if not len(self._points):
return []
segments = [[]]
lastWasOffCurve = False
for point in self._points:
segments[-1].append(point)
if point.segmentType is not None:
segments.append([])
lastWasOffCurve = point.segmentType is None
if len(segments[-1]) == 0:
del segments[-1]
if lastWasOffCurve:
segment = segments.pop(-1)
assert len(segments[0]) == 1
segment.append(segments[0][0])
del segments[0]
segments.append(segment)
elif segments[0][-1].segmentType != "move":
segment = segments.pop(0)
segments.append(segment)
return segments
segments = property(_get_segments, doc="A list of all points in the contour organized into segments.")
# -------
# Methods
# -------
def __len__(self):
return len(self._points)
def __getitem__(self, index):
if index > len(self._points):
raise IndexError
return self._points[index]
def __iter__(self):
pointCount = len(self)
index = 0
while index < pointCount:
point = self[index]
yield point
index += 1
def clear(self):
"""
Clear the contents of the contour.
This posts a *Contour.Changed* notification.
"""
self._clear()
def _clear(self, postNotification=True):
# clear the internal storage
self._points = []
# reset the clockwise cache
self._clockwiseCache = None
# post a dirty notification
if postNotification:
self.dirty = True
def reverse(self):
"""
Reverse the direction of the contour.
This posts a *Contour.Changed* notification.
"""
from robofab.pens.reverseContourPointPen import ReverseContourPointPen
# put the current points in another contour
otherContour = self.__class__(self._pointClass)
# draw the points in this contour through
# the reversing pen.
reversePen = ReverseContourPointPen(otherContour)
self.drawPoints(reversePen)
# clear the points in this contour
# and copy the points from the other
# contour to this contour.
self._clear(postNotification=False)
self._points = list(otherContour._points)
# post a notification
self.dirty = True
def move(self, (x, y)):
"""
Move all points in the contour by **(x, y)**.
This posts a *Contour.Changed* notification.
"""
for point in self._points:
point.move((x, y))
# update the bounds cache
if self._boundsCache:
xMin, yMin, xMax, yMax = self._boundsCache
xMin += x
yMin += y
xMax += x
yMax += y
self._boundsCache = (xMin, yMin, xMax, yMax)
if self._controlPointBoundsCache:
xMin, yMin, xMax, yMax = self._controlPointBoundsCache
xMin += x
yMin += y
xMax += x
yMax += y
self._controlPointBoundsCache = (xMin, yMin, xMax, yMax)
self.dirty = True
def pointInside(self, (x, y), evenOdd=False):
"""
Returns a boolean indicating if **(x, y)** is in the
"black" area of the contour.
"""
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=None, testPoint=(x, y), evenOdd=evenOdd)
self.draw(pen)
return pen.getResult()
def index(self, point):
"""
Get the index for **point**.
"""
return self._points.index(point)
def setStartPoint(self, index):
"""
Set the point at **index** as the first point in the contour.
This point must be an on-curve point.
This posts a *Contour.Changed* notification.
"""
onCurvePoints = self.onCurvePoints
if len(onCurvePoints) < 2:
return
if self.open:
return
point = self._points[index]
assert point.segmentType is not None, "index must represent an on curve point"
before = self._points[:index]
self._points = self._points[index:] + before
self.dirty = True
def positionForProspectivePointInsertionAtSegmentAndT(self, segmentIndex, t):
"""
Get the precise coordinates and a boolean indicating
if the point will be smooth for the given **segmentIndex**
and **t**.
"""
return self._splitAndInsertAtSegmentAndT(segmentIndex, t, False)
def splitAndInsertPointAtSegmentAndT(self, segmentIndex, t):
"""
Insert a point into the contour for the given
**segmentIndex** and **t**.
This posts a *Contour.Changed* notification.
"""
self._splitAndInsertAtSegmentAndT(segmentIndex, t, True)
def _splitAndInsertAtSegmentAndT(self, segmentIndex, t, insert):
segments = self.segments
segment = segments[segmentIndex]
segment.insert(0, segments[segmentIndex-1][-1])
firstPoint = segment[0]
lastPoint = segment[-1]
segmentType = lastPoint.segmentType
segment = [(point.x, point.y) for point in segment]
if segmentType == "line":
(x1, y1), (x2, y2) = segment
x = x1 + (x2 - x1) * t
y = y1 + (y2 - y1) * t
pointsToInsert = [((x, y), "line", False)]
insertionPoint = (x, y)
pointWillBeSmooth = False
elif segmentType == "curve":
pt1, pt2, pt3, pt4 = segment
(pt1, pt2, pt3, pt4), (pt5, pt6, pt7, pt8) = bezierTools.splitCubicAtT(pt1, pt2, pt3, pt4, t)
pointsToInsert = [(pt2, None, False), (pt3, None, False), (pt4, "curve", True), (pt6, None, False), (pt7, None, False)]
insertionPoint = tuple(pt4)
pointWillBeSmooth = True
else:
# XXX could be a quad. in that case, we could handle it.
raise NotImplementedError("unknown segment type: %s" % segmentType)
if insert:
firstPointIndex = self._points.index(firstPoint)
lastPointIndex = self._points.index(lastPoint)
firstPoints = self._points[:firstPointIndex + 1]
if firstPointIndex == len(self._points) - 1:
firstPoints = firstPoints[lastPointIndex:]
lastPoints = []
elif lastPointIndex == 0:
lastPoints = []
else:
lastPoints = self._points[lastPointIndex:]
newPoints = [self._pointClass(pos, segmentType=segmentType, smooth=smooth) for pos, segmentType, smooth in pointsToInsert]
self._points = firstPoints + newPoints + lastPoints
self.dirty = True
return insertionPoint, pointWillBeSmooth
def removeSegment(self, segmentIndex, preserveCurve=False):
"""
Remove the segment at **segmentIndex**. If
**preserveCurve** is True, the contour will
try to preserve the overall curve shape.
"""
segments = self.segments
nextIndex = segmentIndex + 1
if nextIndex == len(segments):
nextIndex = 0
previousIndex = segmentIndex - 1
if previousIndex < 0:
previousIndex = len(segments) + previousIndex
nextSegment = segments[nextIndex]
segment = segments[segmentIndex]
previousSegment = segments[previousIndex]
# if preserveCurve is off
# or if all are lines, handle it
if not preserveCurve or (previousSegment[-1].segmentType == "line"\
and segment[-1].segmentType == "line"\
and nextSegment[-1].segmentType == "line"):
for point in segment:
self._points.remove(point)
# if have a curve, do the preservation
else:
# gather the needed points
previousOnCurveX = previousSegment[-1].x
previousOnCurveY = previousSegment[-1].y
onCurveX = segment[-1].x
onCurveY = segment[-1].y
nextOnCurveX = nextSegment[-1].x
nextOnCurveY = nextSegment[-1].y
if segment[-1].segmentType == "curve":
offCurve1X = segment[0].x
offCurve1Y = segment[0].y
offCurve2X = segment[-2].x
offCurve2Y = segment[-2].y
elif segment[-1].segmentType == "line":
offCurve1X = previousOnCurveX
offCurve1Y = previousOnCurveY
offCurve2X = onCurveX
offCurve2Y = onCurveY
else:
# XXX could be a quad. in that case, we can't handle it.
raise NotImplementedError("unknown segment type: %s" % segment[-1].segmentType)
if nextSegment[-1].segmentType == "curve":
nextOffCurve1X = nextSegment[0].x
nextOffCurve1Y = nextSegment[0].y
nextOffCurve2X = nextSegment[-2].x
nextOffCurve2Y = nextSegment[-2].y
elif nextSegment[-1].segmentType == "line":
nextOffCurve1X = onCurveX
nextOffCurve1Y = onCurveY
nextOffCurve2X = nextOnCurveX
nextOffCurve2Y = nextOnCurveY
else:
# XXX could be a quad. in that case, we can't handle it.
raise NotImplementedError("unknown segment type: %s" % nextSegment[-1].segmentType)
# now do the math
result = bezierMath.joinSegments((previousOnCurveX, previousOnCurveY),
(offCurve1X, offCurve1Y), (offCurve2X, offCurve2Y), (onCurveX, onCurveY),
(nextOffCurve1X, nextOffCurve1Y), (nextOffCurve2X, nextOffCurve2Y), (nextOnCurveX, nextOnCurveY))
# remove the segment
for point in segment:
self._points.remove(point)
# if the next segment type isn't a curve, make it one
if not nextSegment[-1].segmentType == "curve":
nextSegment[-1].segmentType = "curve"
pointIndex = self._points.index(nextSegment[-1])
newPoints = [self._pointClass((result[0][0], result[0][1])), self._pointClass((result[1][0], result[1][1]))]
if pointIndex == 0:
self._points.extend(newPoints)
else:
self._points = self._points[:pointIndex] + newPoints + self._points[pointIndex:]
# otherwise, set the point positions
else:
nextSegment[0].x = result[0][0]
nextSegment[0].y = result[0][1]
nextSegment[1].x = result[1][0]
nextSegment[1].y = result[1][1]
# mark the contour as dirty
self._destroyBoundsCache()
self.dirty = True
# -----------
# Pen methods
# -----------
def beginPath(self):
"""
Standard point pen *beginPath* method.
This should not be used externally.
"""
pass
def endPath(self):
"""
Standard point pen *endPath* method.
This should not be used externally.
"""
pass
def addPoint(self, (x, y), segmentType=None, smooth=False, name=None):
"""
Standard point pen *addPoint* method.
This should not be used externally.
"""
point = self._pointClass((x, y), segmentType=segmentType, smooth=smooth, name=name)
self._addPoint(point)
def _addPoint(self, point):
self._points.append(point)
self._destroyBoundsCache()
self._clockwiseCache = None
self.dirty = True
def draw(self, pen):
"""
Draw the contour with **pen**.
"""
from robofab.pens.adapterPens import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
def drawPoints(self, pointPen):
"""
Draw the contour with **pointPen**.
"""
pointPen.beginPath()
for point in self._points:
pointPen.addPoint((point.x, point.y), segmentType=point.segmentType, smooth=point.smooth, name=point.name)
pointPen.endPath()
# ----
# Undo
# ----
def getDataToSerializeForUndo(self):
data = dict(
points=[point.getDataToSerializeForUndo() for point in self._points]
)
return data
def loadDeserializedDataFromUndo(self, data):
for pointData in data["points"]:
point = self._pointClass((0, 0))
point.loadDeserializedDataFromUndo(pointData)
self._addPoint(point)
# -----
# Tests
# -----
def _testBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.bounds
(0, 0, 700, 700)
"""
def _testControlPointBounds():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.controlPointBounds
(0, 0, 700, 700)
"""
def _testClockwise():
"""
# get
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.clockwise
False
>>> contour = font['A'][1]
>>> contour.clockwise
True
>>> contour._clockwiseCache = None
>>> contour.clockwise = False
>>> contour.clockwise
False
>>> contour._clockwiseCache = None
>>> contour.clockwise = True
>>> contour.clockwise
True
# set
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.clockwise = False
>>> contour.clockwise
False
>>> contour._clockwiseCache = None
>>> contour.clockwise = True
>>> contour.clockwise
True
"""
def _testOpen():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath('TestOpenContour.ufo'))
>>> glyph = font['A']
>>> glyph[0].open
True
>>> glyph[1].open
False
>>> glyph[2].open
True
>>> glyph[3].open
False
"""
def _testOnCurvePoints():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> len(contour.onCurvePoints)
4
>>> [(point.x, point.y) for point in contour.onCurvePoints]
[(0, 0), (700, 0), (700, 700), (0, 700)]
>>> glyph = font['B']
>>> contour = glyph[0]
>>> len(contour.onCurvePoints)
4
>>> [(point.x, point.y) for point in contour.onCurvePoints]
[(0, 350), (350, 0), (700, 350), (350, 700)]
"""
def _testSegments():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> def simpleSegment(segment):
... return [(i.x, i.y, i.segmentType) for i in segment]
>>> font = Font(getTestFontPath())
>>> glyph = font['A']
>>> contour = glyph[0]
>>> [simpleSegment(segment) for segment in contour.segments]
[[(700, 0, 'line')], [(700, 700, 'line')], [(0, 700, 'line')], [(0, 0, 'line')]]
>>> glyph = font['B']
>>> contour = glyph[0]
>>> [simpleSegment(segment) for segment in contour.segments]
[[(0, 157, None), (157, 0, None), (350, 0, 'curve')], [(543, 0, None), (700, 157, None), (700, 350, 'curve')], [(700, 543, None), (543, 700, None), (350, 700, 'curve')], [(157, 700, None), (0, 543, None), (0, 350, 'curve')]]
"""
def _testLen():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> len(contour)
4
>>> contour = font['B'][0]
>>> len(contour)
12
"""
def _testIter():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> [(point.x, point.y) for point in contour]
[(0, 0), (700, 0), (700, 700), (0, 700)]
"""
def _testReverse():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.reverse()
>>> [(point.x, point.y) for point in contour._points]
[(0, 0), (0, 700), (700, 700), (700, 0)]
>>> contour.reverse()
>>> [(point.x, point.y) for point in contour._points]
[(0, 0), (700, 0), (700, 700), (0, 700)]
"""
def _testMove():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.move((100, 100))
>>> contour.bounds
(100, 100, 800, 800)
>>> contour.dirty = True
"""
def _testPointInside():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.pointInside((100, 100))
True
>>> contour.pointInside((0, 0))
False
>>> contour.pointInside((-100, -100))
False
"""
def _testIndex():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['B'][0]
>>> 2 == contour.index(contour[2])
True
"""
def _testSetStartPoint():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['B'][0]
>>> start = [(point.segmentType, point.x, point.y) for point in contour]
>>> contour.setStartPoint(6)
>>> contour.dirty
True
>>> contour.setStartPoint(6)
>>> end = [(point.segmentType, point.x, point.y) for point in contour]
>>> start == end
True
>>> contour = font['A'][0]
>>> start = [(point.segmentType, point.x, point.y) for point in contour]
>>> contour.setStartPoint(2)
>>> contour.setStartPoint(2)
>>> end = [(point.segmentType, point.x, point.y) for point in contour]
>>> start == end
True
>>> contour = font['B'][0]
>>> start = [(point.segmentType, point.x, point.y) for point in contour]
>>> contour.setStartPoint(3)
>>> contour.setStartPoint(9)
>>> end = [(point.segmentType, point.x, point.y) for point in contour]
>>> start == end
True
"""
def _testPositionForProspectivePointInsertionAtSegmentAndT():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.positionForProspectivePointInsertionAtSegmentAndT(0, .5)
((350.0, 0.0), False)
>>> contour = font['B'][0]
>>> contour.positionForProspectivePointInsertionAtSegmentAndT(0, .5)
((102.625, 102.625), True)
>>> contour = font['B'][1]
>>> contour.positionForProspectivePointInsertionAtSegmentAndT(0, .5)
((226.125, 473.5), True)
"""
def _testSplitAndInsertPointAtSegmentAndT():
"""
>>> from defcon.test.testTools import getTestFontPath
>>> from defcon.objects.font import Font
>>> font = Font(getTestFontPath())
>>> contour = font['A'][0]
>>> contour.splitAndInsertPointAtSegmentAndT(0, .5)
>>> [(point.x, point.y, point.segmentType) for point in contour]
[(0, 0, 'line'), (350.0, 0.0, 'line'), (700, 0, 'line'), (700, 700, 'line'), (0, 700, 'line')]
>>> contour = font['B'][0]
>>> contour.splitAndInsertPointAtSegmentAndT(0, .5)
>>> [(point.x, point.y, point.segmentType) for point in contour]
[(0, 350, 'curve'), (0.0, 253.5, None), (39.25, 166.0, None), (102.625, 102.625, 'curve'), (166.0, 39.25, None), (253.5, 0.0, None), (350, 0, 'curve'), (543, 0, None), (700, 157, None), (700, 350, 'curve'), (700, 543, None), (543, 700, None), (350, 700, 'curve'), (157, 700, None), (0, 543, None)]
"""
def _testRemoveSegment():
"""
>>> print "need removeSegment tests!"
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "typemytype/defcon",
"path": "Lib/defcon/objects/contour.py",
"copies": "1",
"size": "24339",
"license": "mit",
"hash": -3128776706674234400,
"line_mean": 33.4257425743,
"line_max": 301,
"alpha_frac": 0.5789062821,
"autogenerated": false,
"ratio": 3.8596574690770695,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49385637511770697,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
from fontTools.varLib.varStore import VarStoreInstancer
def _add_method(*clazzes):
"""Returns a decorator function that adds a new method to one or
more classes."""
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done: continue # Support multiple names of a clazz
done.append(clazz)
assert clazz.__name__ != 'DefaultTable', \
'Oops, table class not found.'
assert not hasattr(clazz, method.__name__), \
"Oops, class '%s' has method '%s'." % (clazz.__name__,
method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper
def _uniq_sort(l):
return sorted(set(l))
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, components, localSubrs, globalSubrs):
psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs)
self.components = components
def op_endchar(self, index):
args = self.popall()
if len(args) >= 4:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args[-4:]
baseGlyph = StandardEncoding[bchar]
accentGlyph = StandardEncoding[achar]
self.components.add(baseGlyph)
self.components.add(accentGlyph)
@_add_method(ttLib.getTableClass('CFF '))
def closure_glyphs(self, s):
cff = self.cff
assert len(cff) == 1
font = cff[cff.keys()[0]]
glyphSet = font.CharStrings
decompose = s.glyphs
while decompose:
components = set()
for g in decompose:
if g not in glyphSet:
continue
gl = glyphSet[g]
subrs = getattr(gl.private, "Subrs", [])
decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
decompiler.execute(gl)
components -= s.glyphs
s.glyphs.update(components)
decompose = components
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.decompile()
c.program = [] if isCFF2 else ['endchar']
else:
if hasattr(font, 'FDArray') and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
dfltWdX = private.defaultWidthX
nmnlWdX = private.nominalWidthX
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, 'endchar']
else:
c.program = ['endchar']
@_add_method(ttLib.getTableClass('CFF '))
def prune_pre_subset(self, font, options):
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
if options.notdef_glyph and not options.notdef_outline:
isCFF2 = cff.major > 1
for fontname in cff.keys():
font = cff[fontname]
_empty_charstring(font, ".notdef", isCFF2=isCFF2)
# Clear useless Encoding
for fontname in cff.keys():
font = cff[fontname]
# https://github.com/fonttools/fonttools/issues/620
font.Encoding = "StandardEncoding"
return True # bool(cff.fontNames)
@_add_method(ttLib.getTableClass('CFF '))
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
glyphs = s.glyphs.union(s.glyphs_emptied)
# Load all glyphs
for g in font.charset:
if g not in glyphs: continue
c, _ = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
indices = [i for i,g in enumerate(font.charset) if g in glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
# XXX We want to set sel.format to None, such that the
# most compact format is selected. However, OTS was
# broken and couldn't parse a FDSelect format 0 that
# happened before CharStrings. As such, always force
# format 3 until we fix cffLib to always generate
# FDSelect after CharStrings.
# https://github.com/khaledhosny/ots/pull/31
#sel.format = None
sel.format = 3
sel.gidArray = [sel.gidArray[i] for i in indices]
cs.charStrings = {g:indices.index(v)
for g,v in cs.charStrings.items()
if g in glyphs}
else:
cs.charStrings = {g:v
for g,v in cs.charStrings.items()
if g in glyphs}
font.charset = [g for g in font.charset if g in glyphs]
font.numGlyphs = len(font.charset)
if s.options.retain_gids:
isCFF2 = cff.major > 1
for g in s.glyphs_emptied:
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
@_add_method(psCharStrings.T2CharString)
def subset_subroutines(self, subrs, gsubrs):
p = self.program
for i in range(1, len(p)):
if p[i] == 'callsubr':
assert isinstance(p[i-1], int)
p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
elif p[i] == 'callgsubr':
assert isinstance(p[i-1], int)
p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
@_add_method(psCharStrings.T2CharString)
def drop_hints(self):
hints = self._hints
if hints.deletions:
p = self.program
for idx in reversed(hints.deletions):
del p[idx-2:idx]
if hints.has_hint:
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
self.program = self.program[hints.last_hint:]
if not self.program:
# TODO CFF2 no need for endchar.
self.program.append('endchar')
if hasattr(self, 'width'):
# Insert width back if needed
if self.width != self.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert self.private.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value"
self.program.insert(0, self.width - self.private.nominalWidthX)
if hints.has_hintmask:
i = 0
p = self.program
while i < len(p):
if p[i] in ['hintmask', 'cntrmask']:
assert i + 1 <= len(p)
del p[i:i+2]
continue
i += 1
assert len(self.program)
del self._hints
class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, private):
psCharStrings.SimpleT2Decompiler.__init__(self,
localSubrs,
globalSubrs,
private)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor):
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
# Note that if a charstring starts with hintmask, it will
# have has_hint set to True, because it *might* produce an
# implicit vstem if called under certain conditions.
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible.
# Only relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints
# continuing after this, or there might be
# other things. Not clear yet.
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
pass
def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None):
self._css = css
psCharStrings.T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX)
self.private = private
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, '_hints') else None
charString._hints = self.Hints()
psCharStrings.T2WidthExtractor.execute(self, charString)
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in range(hints.last_checked, len(charString.program) - 1):
if isinstance(charString.program[i], str):
hints.status = 2
break
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
psCharStrings.T2WidthExtractor.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
psCharStrings.T2WidthExtractor.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
psCharStrings.T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
psCharStrings.T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
psCharStrings.T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
psCharStrings.T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
def op_cntrmask(self, index):
rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
return rv
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2:
# Check from last_check, see if we may be an implicit vstem
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
else:
# We are an implicit vstem
hints.has_hint = True
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
# Check from last_check, make sure we didn't have
# any operators.
if hints.status != 2:
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
hints.last_checked = index
if hints.status != 2:
if subr_hints.has_hint:
hints.has_hint = True
# Decide where to chop off from
if subr_hints.status == 0:
hints.last_hint = index
else:
hints.last_hint = index - 2 # Leave the subr call in
elif subr_hints.status == 0:
hints.deletions.append(index)
hints.status = max(hints.status, subr_hints.status)
class StopHintCountEvent(Exception):
pass
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto",
"op_vmoveto")
def __init__(self, localSubrs, globalSubrs, private=None):
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs,
private)
def execute(self, charString):
self.need_hintcount = True # until proven otherwise
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, '_desubroutinized'):
# If a charstring has already been desubroutinized, we will still
# need to execute it if we need to count hints in order to
# compute the byte length for mask arguments, and haven't finished
# counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
psCharStrings.SimpleT2Decompiler.execute(self, charString)
except StopHintCountEvent:
del self.callingStack[-1]
return
charString._patches = []
psCharStrings.SimpleT2Decompiler.execute(self, charString)
desubroutinized = charString.program[:]
for idx, expansion in reversed(charString._patches):
assert idx >= 2
assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
assert type(desubroutinized[idx - 2]) == int
if expansion[-1] == 'return':
expansion = expansion[:-1]
desubroutinized[idx-2:idx] = expansion
if not self.private.in_cff2:
if 'endchar' in desubroutinized:
# Cut off after first endchar
desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
else:
if not len(desubroutinized) or desubroutinized[-1] != 'return':
desubroutinized.append('return')
charString._desubroutinized = desubroutinized
del charString._patches
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def stop_hint_count(self, *args):
self.need_hintcount = False
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, None)
cs = self.callingStack[-1]
if hasattr(cs, '_desubroutinized'):
raise StopHintCountEvent()
def op_hintmask(self, index):
psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
if self.need_hintcount:
self.stop_hint_count()
def processSubr(self, index, subr):
cs = self.callingStack[-1]
if not hasattr(cs, '_desubroutinized'):
cs._patches.append((index, subr._desubroutinized))
@_add_method(ttLib.getTableClass('CFF '))
def prune_post_subset(self, ttfFont, options):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Drop unused FontDictionaries
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
del arr.file, arr.offsets
# Desubroutinize if asked for
if options.desubroutinize:
self.desubroutinize()
# Drop hints if not needed
if not options.hinting:
self.remove_hints()
elif not options.desubroutinize:
self.remove_unused_subroutines()
return True
def _delete_empty_subrs(private_dict):
if hasattr(private_dict, 'Subrs') and not private_dict.Subrs:
if 'Subrs' in private_dict.rawDict:
del private_dict.rawDict['Subrs']
del private_dict.Subrs
@_add_method(ttLib.getTableClass('CFF '))
def desubroutinize(self):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
c.program = c._desubroutinized
del c._desubroutinized
# Delete all the local subrs
if hasattr(font, 'FDArray'):
for fd in font.FDArray:
pd = fd.Private
if hasattr(pd, 'Subrs'):
del pd.Subrs
if 'Subrs' in pd.rawDict:
del pd.rawDict['Subrs']
else:
pd = font.Private
if hasattr(pd, 'Subrs'):
del pd.Subrs
if 'Subrs' in pd.rawDict:
del pd.rawDict['Subrs']
# as well as the global subrs
cff.GlobalSubrs.clear()
@_add_method(ttLib.getTableClass('CFF '))
def remove_hints(self):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such.
# Upon returning, for each charstring we note all the
# subroutine calls it makes that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
css = set()
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
charstring.drop_hints()
del css
# Drop font-wide hinting values
all_privs = []
if hasattr(font, 'FDArray'):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in ['BlueValues', 'OtherBlues',
'FamilyBlues', 'FamilyOtherBlues',
'BlueScale', 'BlueShift', 'BlueFuzz',
'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW',
'ForceBold', 'LanguageGroup', 'ExpansionFactor']:
if hasattr(priv, k):
setattr(priv, k, None)
self.remove_unused_subroutines()
@_add_method(ttLib.getTableClass('CFF '))
def remove_unused_subroutines(self):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Renumber subroutines to remove unused ones
# Mark all used subroutines
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, 'FDArray'):
all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, '_used'):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = psCharStrings.calcSubrBias(subrs)
subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
# Renumber glyph charstrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
subrs = getattr(c.private, "Subrs", [])
c.subset_subroutines (subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, 'FDArray') and hasattr(font.Private, 'Subrs'):
local_subrs = font.Private.Subrs
else:
local_subrs = []
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, 'file'):
del subrs.file
if hasattr(subrs, 'offsets'):
del subrs.offsets
for subr in subrs.items:
subr.subset_subroutines (local_subrs, font.GlobalSubrs)
# Delete local SubrsIndex if empty
if hasattr(font, 'FDArray'):
for fd in font.FDArray:
_delete_empty_subrs(fd.Private)
else:
_delete_empty_subrs(font.Private)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/subset/cff.py",
"copies": "5",
"size": "19833",
"license": "apache-2.0",
"hash": -7980794738665431000,
"line_mean": 30.4310618067,
"line_max": 113,
"alpha_frac": 0.6996420108,
"autogenerated": false,
"ratio": 2.9067858713176022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6106427882117602,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
from . import DefaultTable
from . import grUtils
import struct
Feat_hdr_format='''
>
version: 16.16F
'''
class table_F__e_a_t(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.features = {}
def decompile(self, data, ttFont):
(_, data) = sstruct.unpack2(Feat_hdr_format, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
numFeats, = struct.unpack('>H', data[:2])
data = data[8:]
allfeats = []
maxsetting = 0
for i in range(numFeats):
if self.version >= 2.0:
(fid, nums, _, offset, flags, lid) = struct.unpack(">LHHLHH",
data[16*i:16*(i+1)])
offset = int((offset - 12 - 16 * numFeats) / 4)
else:
(fid, nums, offset, flags, lid) = struct.unpack(">HHLHH",
data[12*i:12*(i+1)])
offset = int((offset - 12 - 12 * numFeats) / 4)
allfeats.append((fid, nums, offset, flags, lid))
maxsetting = max(maxsetting, offset + nums)
data = data[16*numFeats:]
allsettings = []
for i in range(maxsetting):
if len(data) >= 4 * (i + 1):
(val, lid) = struct.unpack(">HH", data[4*i:4*(i+1)])
allsettings.append((val, lid))
for i,f in enumerate(allfeats):
(fid, nums, offset, flags, lid) = f
fobj = Feature()
fobj.flags = flags
fobj.label = lid
self.features[grUtils.num2tag(fid)] = fobj
fobj.settings = {}
fobj.default = None
fobj.index = i
for i in range(offset, offset + nums):
if i >= len(allsettings): continue
(vid, vlid) = allsettings[i]
fobj.settings[vid] = vlid
if fobj.default is None:
fobj.default = vid
def compile(self, ttFont):
fdat = b""
vdat = b""
offset = 0
for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
fnum = grUtils.tag2num(f)
if self.version >= 2.0:
fdat += struct.pack(">LHHLHH", grUtils.tag2num(f), len(v.settings),
0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label)
elif fnum > 65535: # self healing for alphabetic ids
self.version = 2.0
return self.compile(ttFont)
else:
fdat += struct.pack(">HHLHH", grUtils.tag2num(f), len(v.settings),
offset * 4 + 12 + 12 * len(self.features), v.flags, v.label)
for s, l in sorted(v.settings.items(), key=lambda x:(-1, x[1]) if x[0] == v.default else x):
vdat += struct.pack(">HH", s, l)
offset += len(v.settings)
hdr = sstruct.pack(Feat_hdr_format, self)
return hdr + struct.pack('>HHL', len(self.features), 0, 0) + fdat + vdat
def toXML(self, writer, ttFont):
writer.simpletag('version', version=self.version)
writer.newline()
for f, v in sorted(self.features.items(), key=lambda x:x[1].index):
writer.begintag('feature', fid=f, label=v.label, flags=v.flags,
default=(v.default if v.default else 0))
writer.newline()
for s, l in sorted(v.settings.items()):
writer.simpletag('setting', value=s, label=l)
writer.newline()
writer.endtag('feature')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version':
self.version = float(safeEval(attrs['version']))
elif name == 'feature':
fid = attrs['fid']
fobj = Feature()
fobj.flags = int(safeEval(attrs['flags']))
fobj.label = int(safeEval(attrs['label']))
fobj.default = int(safeEval(attrs.get('default','0')))
fobj.index = len(self.features)
self.features[fid] = fobj
fobj.settings = {}
for element in content:
if not isinstance(element, tuple): continue
tag, a, c = element
if tag == 'setting':
fobj.settings[int(safeEval(a['value']))] = int(safeEval(a['label']))
class Feature(object):
pass
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/F__e_a_t.py",
"copies": "5",
"size": "4678",
"license": "apache-2.0",
"hash": 3039370187206589400,
"line_mean": 40.3982300885,
"line_max": 104,
"alpha_frac": 0.5147498931,
"autogenerated": false,
"ratio": 3.763475462590507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6778225355690506,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
# from itertools import *
from functools import partial
from . import DefaultTable
from . import grUtils
import struct
Glat_format_0 = """
> # big endian
version: 16.16F
"""
Glat_format_3 = """
>
version: 16.16F
compression:L # compression scheme or reserved
"""
Glat_format_1_entry = """
>
attNum: B # Attribute number of first attribute
num: B # Number of attributes in this run
"""
Glat_format_23_entry = """
>
attNum: H # Attribute number of first attribute
num: H # Number of attributes in this run
"""
Glat_format_3_octabox_metrics = """
>
subboxBitmap: H # Which subboxes exist on 4x4 grid
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
Glat_format_3_subbox_entry = """
>
left: B # xi
right: B # xa
bottom: B # yi
top: B # ya
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
"""
class _Object() :
pass
class _Dict(dict) :
pass
class table_G__l_a_t(DefaultTable.DefaultTable):
'''
Support Graphite Glat tables
'''
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.scheme = 0
def decompile(self, data, ttFont):
sstruct.unpack2(Glat_format_0, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
if self.version <= 1.9:
decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry)
elif self.version <= 2.9:
decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry)
elif self.version >= 3.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Glat_format_3, data, self)
self.hasOctaboxes = (self.compression & 1) == 1
decoder = self.decompileAttributes3
gloc = ttFont['Gloc']
self.attributes = {}
count = 0
for s,e in zip(gloc,gloc[1:]):
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
count += 1
def decompileAttributes12(self, data, fmt):
attributes = _Dict()
while len(data) > 3:
e, data = sstruct.unpack2(fmt, data, _Object())
keys = range(e.attNum, e.attNum+e.num)
if len(data) >= 2 * e.num :
vals = struct.unpack_from(('>%dh' % e.num), data)
attributes.update(zip(keys,vals))
data = data[2*e.num:]
return attributes
def decompileAttributes3(self, data):
if self.hasOctaboxes:
o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
numsub = bin(o.subboxBitmap).count("1")
o.subboxes = []
for b in range(numsub):
if len(data) >= 8 :
subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry,
data, _Object())
o.subboxes.append(subbox)
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
if self.hasOctaboxes:
attrs.octabox = o
return attrs
def compile(self, ttFont):
data = sstruct.pack(Glat_format_0, self)
if self.version <= 1.9:
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
elif self.version <= 2.9:
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
elif self.version >= 3.0:
self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
data = sstruct.pack(Glat_format_3, self)
encoder = self.compileAttributes3
glocs = []
for n in range(len(self.attributes)):
glocs.append(len(data))
data += encoder(self.attributes[ttFont.getGlyphName(n)])
glocs.append(len(data))
ttFont['Gloc'].set(glocs)
if self.version >= 3.0:
data = grUtils.compress(self.scheme, data)
return data
def compileAttributes12(self, attrs, fmt):
data = b""
for e in grUtils.entries(attrs):
data += sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]}) + \
struct.pack(('>%dh' % len(e[2])), *e[2])
return data
def compileAttributes3(self, attrs):
if self.hasOctaboxes:
o = attrs.octabox
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
numsub = bin(o.subboxBitmap).count("1")
for b in range(numsub) :
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
else:
data = ""
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
def toXML(self, writer, ttFont):
writer.simpletag('version', version=self.version, compressionScheme=self.scheme)
writer.newline()
for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])):
writer.begintag('glyph', name=n)
writer.newline()
if hasattr(a, 'octabox'):
o = a.octabox
formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics)
vals = {}
for k in names:
if k == 'subboxBitmap': continue
vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 255)
vals['bitmap'] = "{:0X}".format(o.subboxBitmap)
writer.begintag('octaboxes', **vals)
writer.newline()
formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry)
for s in o.subboxes:
vals = {}
for k in names:
vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 255)
writer.simpletag('octabox', **vals)
writer.newline()
writer.endtag('octaboxes')
writer.newline()
for k, v in sorted(a.items()):
writer.simpletag('attribute', index=k, value=v)
writer.newline()
writer.endtag('glyph')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version' :
self.version = float(safeEval(attrs['version']))
self.scheme = int(safeEval(attrs['compressionScheme']))
if name != 'glyph' : return
if not hasattr(self, 'attributes'):
self.attributes = {}
gname = attrs['name']
attributes = _Dict()
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'attribute' :
k = int(safeEval(attrs['index']))
v = int(safeEval(attrs['value']))
attributes[k]=v
elif tag == 'octaboxes':
self.hasOctaboxes = True
o = _Object()
o.subboxBitmap = int(attrs['bitmap'], 16)
o.subboxes = []
del attrs['bitmap']
for k, v in attrs.items():
setattr(o, k, int(float(v[:-1]) * 255. / 100. + 0.5))
for element in subcontent:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
so = _Object()
for k, v in attrs.items():
setattr(so, k, int(float(v[:-1]) * 255. / 100. + 0.5))
o.subboxes.append(so)
attributes.octabox = o
self.attributes[gname] = attributes
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G__l_a_t.py",
"copies": "5",
"size": "8400",
"license": "apache-2.0",
"hash": -4228511836011761000,
"line_mean": 37.5321100917,
"line_max": 93,
"alpha_frac": 0.5422619048,
"autogenerated": false,
"ratio": 3.7316748111950244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6773936715995025,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
log = logging.getLogger(__name__)
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ['maxp', 'loca', 'CFF ', 'CFF2']
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
log.warning("extra bytes at the end of 'head' table")
assert rest == b"\0\0"
# For timestamp fields, ignore the top four bytes. Some fonts have
# bogus values there. Since till 2038 those bytes only can be zero,
# ignore them.
#
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
for stamp in 'created', 'modified':
value = getattr(self, stamp)
if value > 0xFFFFFFFF:
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
value &= 0xFFFFFFFF
setattr(self, stamp, value)
if value < 0x7C259DC0: # January 1, 1970 00:00:00
log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp)
value += 0x7C259DC0
setattr(self, stamp, value)
def compile(self, ttFont):
if ttFont.recalcBBoxes:
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if 'CFF ' in ttFont:
topDict = ttFont['CFF '].cff.topDictIndex[0]
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
elif 'CFF2' in ttFont:
topDict = ttFont['CFF2'].cff.topDictIndex[0]
charStrings = topDict.CharStrings
fontBBox = None
for charString in charStrings.values():
bounds = charString.calcBounds(charStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is not None:
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
_, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in fixes:
value = floatToFixedToStr(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampToString(value)
elif name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
fixes = sstruct.getformat(headFormat)[2]
if name in fixes:
value = strToFixedToFloat(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampFromString(value)
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_e_a_d.py",
"copies": "5",
"size": "3965",
"license": "apache-2.0",
"hash": 4532035536491309600,
"line_mean": 32.8888888889,
"line_max": 89,
"alpha_frac": 0.6754098361,
"autogenerated": false,
"ratio": 3.136867088607595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030756471828288175,
"num_lines": 117
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import readHex, safeEval
import struct
sbixGlyphHeaderFormat = """
>
originOffsetX: h # The x-value of the point in the glyph relative to its
# lower-left corner which corresponds to the origin of
# the glyph on the screen, that is the point on the
# baseline at the left edge of the glyph.
originOffsetY: h # The y-value of the point in the glyph relative to its
# lower-left corner which corresponds to the origin of
# the glyph on the screen, that is the point on the
# baseline at the left edge of the glyph.
graphicType: 4s # e.g. "png "
"""
sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat)
class Glyph(object):
def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0):
self.gid = gid
self.glyphName = glyphName
self.referenceGlyphName = referenceGlyphName
self.originOffsetX = originOffsetX
self.originOffsetY = originOffsetY
self.rawdata = rawdata
self.graphicType = graphicType
self.imageData = imageData
# fix self.graphicType if it is null terminated or too short
if self.graphicType is not None:
if self.graphicType[-1] == "\0":
self.graphicType = self.graphicType[:-1]
if len(self.graphicType) > 4:
from fontTools import ttLib
raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.")
elif len(self.graphicType) < 4:
# pad with spaces
self.graphicType += " "[:(4 - len(self.graphicType))]
def decompile(self, ttFont):
self.glyphName = ttFont.getGlyphName(self.gid)
if self.rawdata is None:
from fontTools import ttLib
raise ttLib.TTLibError("No table data to decompile")
if len(self.rawdata) > 0:
if len(self.rawdata) < sbixGlyphHeaderFormatSize:
from fontTools import ttLib
#print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata))
raise ttLib.TTLibError("Glyph header too short.")
sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self)
if self.graphicType == "dupe":
# this glyph is a reference to another glyph's image data
gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:])
self.referenceGlyphName = ttFont.getGlyphName(gid)
else:
self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:]
self.referenceGlyphName = None
# clean up
del self.rawdata
del self.gid
def compile(self, ttFont):
if self.glyphName is None:
from fontTools import ttLib
raise ttLib.TTLibError("Can't compile Glyph without glyph name")
# TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
# (needed if you just want to compile the sbix table on its own)
self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
if self.graphicType is None:
self.rawdata = b""
else:
self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData
def toXML(self, xmlWriter, ttFont):
if self.graphicType == None:
# TODO: ignore empty glyphs?
# a glyph data entry is required for each glyph,
# but empty ones can be calculated at compile time
xmlWriter.simpletag("glyph", name=self.glyphName)
xmlWriter.newline()
return
xmlWriter.begintag("glyph",
graphicType=self.graphicType,
name=self.glyphName,
originOffsetX=self.originOffsetX,
originOffsetY=self.originOffsetY,
)
xmlWriter.newline()
if self.graphicType == "dupe":
# graphicType == "dupe" is a reference to another glyph id.
xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
else:
xmlWriter.begintag("hexdata")
xmlWriter.newline()
xmlWriter.dumphex(self.imageData)
xmlWriter.endtag("hexdata")
xmlWriter.newline()
xmlWriter.endtag("glyph")
xmlWriter.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "ref":
# glyph is a "dupe", i.e. a reference to another glyph's image data.
# in this case imageData contains the glyph id of the reference glyph
# get glyph id from glyphname
self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''")))
elif name == "hexdata":
self.imageData = readHex(content)
else:
from fontTools import ttLib
raise ttLib.TTLibError("can't handle '%s' element" % name)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/sbixGlyph.py",
"copies": "5",
"size": "4426",
"license": "apache-2.0",
"hash": -5178636415555251000,
"line_mean": 36.8290598291,
"line_max": 150,
"alpha_frac": 0.7202892002,
"autogenerated": false,
"ratio": 3.288261515601783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6508550715801782,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.