id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
175,362 | from collections import namedtuple, OrderedDict
import os
from fontTools.misc.fixedTools import fixedToFloat
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
OTTableWriter,
CountReference,
)
from fontTools.ttLib.tables import otBase
from fontTools.feaLib.ast import STATNameStatement
from fontTools.otlLib.optimize.gpos import (
_compression_level_from_env,
compact_lookup,
)
from fontTools.otlLib.error import OpenTypeLibError
from functools import reduce
import logging
import copy
class ValueRecord(object):
# see ValueRecordFactory
def __init__(self, valueFormat=None, src=None):
if valueFormat is not None:
for mask, name, isDevice, signed in valueRecordFormat:
if valueFormat & mask:
setattr(self, name, None if isDevice else 0)
if src is not None:
for key, val in src.__dict__.items():
if not hasattr(self, key):
continue
setattr(self, key, val)
elif src is not None:
self.__dict__ = src.__dict__.copy()
def getFormat(self):
format = 0
for name in self.__dict__.keys():
format = format | valueRecordFormatDict[name][0]
return format
def getEffectiveFormat(self):
format = 0
for name, value in self.__dict__.items():
if value:
format = format | valueRecordFormatDict[name][0]
return format
def toXML(self, xmlWriter, font, valueName, attrs=None):
if attrs is None:
simpleItems = []
else:
simpleItems = list(attrs)
for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
if hasattr(self, name):
simpleItems.append((name, getattr(self, name)))
deviceItems = []
for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
if hasattr(self, name):
device = getattr(self, name)
if device is not None:
deviceItems.append((name, device))
if deviceItems:
xmlWriter.begintag(valueName, simpleItems)
xmlWriter.newline()
for name, deviceRecord in deviceItems:
if deviceRecord is not None:
deviceRecord.toXML(xmlWriter, font, name=name)
xmlWriter.endtag(valueName)
xmlWriter.newline()
else:
xmlWriter.simpletag(valueName, simpleItems)
xmlWriter.newline()
def fromXML(self, name, attrs, content, font):
from . import otTables
for k, v in attrs.items():
setattr(self, k, int(v))
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
value = getattr(otTables, name)()
for elem2 in content:
if not isinstance(elem2, tuple):
continue
name2, attrs2, content2 = elem2
value.fromXML(name2, attrs2, content2, font)
setattr(self, name, value)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.__dict__ == other.__dict__
The provided code snippet includes necessary dependencies for implementing the `buildValue` function. Write a Python function `def buildValue(value)` to solve the following problem:
Builds a positioning value record. Value records are used to specify coordinates and adjustments for positioning and attaching glyphs. Many of the positioning functions in this library take ``otTables.ValueRecord`` objects as arguments. This function builds value records from dictionaries. Args: value (dict): A dictionary with zero or more of the following keys: - ``xPlacement`` - ``yPlacement`` - ``xAdvance`` - ``yAdvance`` - ``xPlaDevice`` - ``yPlaDevice`` - ``xAdvDevice`` - ``yAdvDevice`` Returns: An ``otTables.ValueRecord`` object.
Here is the function:
def buildValue(value):
"""Builds a positioning value record.
Value records are used to specify coordinates and adjustments for
positioning and attaching glyphs. Many of the positioning functions
in this library take ``otTables.ValueRecord`` objects as arguments.
This function builds value records from dictionaries.
Args:
value (dict): A dictionary with zero or more of the following keys:
- ``xPlacement``
- ``yPlacement``
- ``xAdvance``
- ``yAdvance``
- ``xPlaDevice``
- ``yPlaDevice``
- ``xAdvDevice``
- ``yAdvDevice``
Returns:
An ``otTables.ValueRecord`` object.
"""
self = ValueRecord()
for k, v in value.items():
setattr(self, k, v)
return self | Builds a positioning value record. Value records are used to specify coordinates and adjustments for positioning and attaching glyphs. Many of the positioning functions in this library take ``otTables.ValueRecord`` objects as arguments. This function builds value records from dictionaries. Args: value (dict): A dictionary with zero or more of the following keys: - ``xPlacement`` - ``yPlacement`` - ``xAdvance`` - ``yAdvance`` - ``xPlaDevice`` - ``yPlaDevice`` - ``xAdvDevice`` - ``yAdvDevice`` Returns: An ``otTables.ValueRecord`` object. |
175,363 | from collections import namedtuple, OrderedDict
import os
from fontTools.misc.fixedTools import fixedToFloat
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
OTTableWriter,
CountReference,
)
from fontTools.ttLib.tables import otBase
from fontTools.feaLib.ast import STATNameStatement
from fontTools.otlLib.optimize.gpos import (
_compression_level_from_env,
compact_lookup,
)
from fontTools.otlLib.error import OpenTypeLibError
from functools import reduce
import logging
import copy
def buildCoverage(glyphs, glyphMap):
"""Builds a coverage table.
Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`__)
are used in all OpenType Layout lookups apart from the Extension type, and
define the glyphs involved in a layout subtable. This allows shaping engines
to compare the glyph stream with the coverage table and quickly determine
whether a subtable should be involved in a shaping operation.
This function takes a list of glyphs and a glyphname-to-ID map, and
returns a ``Coverage`` object representing the coverage table.
Example::
glyphMap = font.getReverseGlyphMap()
glyphs = [ "A", "B", "C" ]
coverage = buildCoverage(glyphs, glyphMap)
Args:
glyphs: a sequence of glyph names.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.Coverage`` object or ``None`` if there are no glyphs
supplied.
"""
if not glyphs:
return None
self = ot.Coverage()
self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
return self
def buildAttachPoint(points):
# [4, 23, 41] --> otTables.AttachPoint
# Only used by above.
if not points:
return None
self = ot.AttachPoint()
self.PointIndex = sorted(set(points))
self.PointCount = len(self.PointIndex)
return self
The provided code snippet includes necessary dependencies for implementing the `buildAttachList` function. Write a Python function `def buildAttachList(attachPoints, glyphMap)` to solve the following problem:
Builds an AttachList subtable. A GDEF table may contain an Attachment Point List table (AttachList) which stores the contour indices of attachment points for glyphs with attachment points. This routine builds AttachList subtables. Args: attachPoints (dict): A mapping between glyph names and a list of contour indices. Returns: An ``otTables.AttachList`` object if attachment points are supplied, or ``None`` otherwise.
Here is the function:
def buildAttachList(attachPoints, glyphMap):
"""Builds an AttachList subtable.
A GDEF table may contain an Attachment Point List table (AttachList)
which stores the contour indices of attachment points for glyphs with
attachment points. This routine builds AttachList subtables.
Args:
attachPoints (dict): A mapping between glyph names and a list of
contour indices.
Returns:
An ``otTables.AttachList`` object if attachment points are supplied,
or ``None`` otherwise.
"""
if not attachPoints:
return None
self = ot.AttachList()
self.Coverage = buildCoverage(attachPoints.keys(), glyphMap)
self.AttachPoint = [buildAttachPoint(attachPoints[g]) for g in self.Coverage.glyphs]
self.GlyphCount = len(self.AttachPoint)
return self | Builds an AttachList subtable. A GDEF table may contain an Attachment Point List table (AttachList) which stores the contour indices of attachment points for glyphs with attachment points. This routine builds AttachList subtables. Args: attachPoints (dict): A mapping between glyph names and a list of contour indices. Returns: An ``otTables.AttachList`` object if attachment points are supplied, or ``None`` otherwise. |
175,364 | from collections import namedtuple, OrderedDict
import os
from fontTools.misc.fixedTools import fixedToFloat
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
OTTableWriter,
CountReference,
)
from fontTools.ttLib.tables import otBase
from fontTools.feaLib.ast import STATNameStatement
from fontTools.otlLib.optimize.gpos import (
_compression_level_from_env,
compact_lookup,
)
from fontTools.otlLib.error import OpenTypeLibError
from functools import reduce
import logging
import copy
def buildCoverage(glyphs, glyphMap):
"""Builds a coverage table.
Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`__)
are used in all OpenType Layout lookups apart from the Extension type, and
define the glyphs involved in a layout subtable. This allows shaping engines
to compare the glyph stream with the coverage table and quickly determine
whether a subtable should be involved in a shaping operation.
This function takes a list of glyphs and a glyphname-to-ID map, and
returns a ``Coverage`` object representing the coverage table.
Example::
glyphMap = font.getReverseGlyphMap()
glyphs = [ "A", "B", "C" ]
coverage = buildCoverage(glyphs, glyphMap)
Args:
glyphs: a sequence of glyph names.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.Coverage`` object or ``None`` if there are no glyphs
supplied.
"""
if not glyphs:
return None
self = ot.Coverage()
self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
return self
def buildLigGlyph(coords, points):
# ([500], [4]) --> otTables.LigGlyph; None for empty coords/points
carets = []
if coords:
carets.extend([buildCaretValueForCoord(c) for c in sorted(coords)])
if points:
carets.extend([buildCaretValueForPoint(p) for p in sorted(points)])
if not carets:
return None
self = ot.LigGlyph()
self.CaretValue = carets
self.CaretCount = len(self.CaretValue)
return self
The provided code snippet includes necessary dependencies for implementing the `buildLigCaretList` function. Write a Python function `def buildLigCaretList(coords, points, glyphMap)` to solve the following problem:
Builds a ligature caret list table. Ligatures appear as a single glyph representing multiple characters; however when, for example, editing text containing a ``f_i`` ligature, the user may want to place the cursor between the ``f`` and the ``i``. The ligature caret list in the GDEF table specifies the position to display the "caret" (the character insertion indicator, typically a flashing vertical bar) "inside" the ligature to represent an insertion point. The insertion positions may be specified either by coordinate or by contour point. Example:: coords = { "f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600. } points = { "c_t": [28] # c|t cursor appears at coordinate of contour point 28. } ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap()) Args: coords: A mapping between glyph names and a list of coordinates for the insertion point of each ligature component after the first one. points: A mapping between glyph names and a list of contour points for the insertion point of each ligature component after the first one. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A ``otTables.LigCaretList`` object if any carets are present, or ``None`` otherwise.
Here is the function:
def buildLigCaretList(coords, points, glyphMap):
"""Builds a ligature caret list table.
Ligatures appear as a single glyph representing multiple characters; however
when, for example, editing text containing a ``f_i`` ligature, the user may
want to place the cursor between the ``f`` and the ``i``. The ligature caret
list in the GDEF table specifies the position to display the "caret" (the
character insertion indicator, typically a flashing vertical bar) "inside"
the ligature to represent an insertion point. The insertion positions may
be specified either by coordinate or by contour point.
Example::
coords = {
"f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600.
}
points = {
"c_t": [28] # c|t cursor appears at coordinate of contour point 28.
}
ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap())
Args:
coords: A mapping between glyph names and a list of coordinates for
the insertion point of each ligature component after the first one.
points: A mapping between glyph names and a list of contour points for
the insertion point of each ligature component after the first one.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.LigCaretList`` object if any carets are present, or
``None`` otherwise."""
glyphs = set(coords.keys()) if coords else set()
if points:
glyphs.update(points.keys())
carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs}
carets = {g: c for g, c in carets.items() if c is not None}
if not carets:
return None
self = ot.LigCaretList()
self.Coverage = buildCoverage(carets.keys(), glyphMap)
self.LigGlyph = [carets[g] for g in self.Coverage.glyphs]
self.LigGlyphCount = len(self.LigGlyph)
return self | Builds a ligature caret list table. Ligatures appear as a single glyph representing multiple characters; however when, for example, editing text containing a ``f_i`` ligature, the user may want to place the cursor between the ``f`` and the ``i``. The ligature caret list in the GDEF table specifies the position to display the "caret" (the character insertion indicator, typically a flashing vertical bar) "inside" the ligature to represent an insertion point. The insertion positions may be specified either by coordinate or by contour point. Example:: coords = { "f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600. } points = { "c_t": [28] # c|t cursor appears at coordinate of contour point 28. } ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap()) Args: coords: A mapping between glyph names and a list of coordinates for the insertion point of each ligature component after the first one. points: A mapping between glyph names and a list of contour points for the insertion point of each ligature component after the first one. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns: A ``otTables.LigCaretList`` object if any carets are present, or ``None`` otherwise. |
175,365 | from collections import namedtuple, OrderedDict
import os
from fontTools.misc.fixedTools import fixedToFloat
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
OTTableWriter,
CountReference,
)
from fontTools.ttLib.tables import otBase
from fontTools.feaLib.ast import STATNameStatement
from fontTools.otlLib.optimize.gpos import (
_compression_level_from_env,
compact_lookup,
)
from fontTools.otlLib.error import OpenTypeLibError
from functools import reduce
import logging
import copy
def buildCoverage(glyphs, glyphMap):
"""Builds a coverage table.
Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`__)
are used in all OpenType Layout lookups apart from the Extension type, and
define the glyphs involved in a layout subtable. This allows shaping engines
to compare the glyph stream with the coverage table and quickly determine
whether a subtable should be involved in a shaping operation.
This function takes a list of glyphs and a glyphname-to-ID map, and
returns a ``Coverage`` object representing the coverage table.
Example::
glyphMap = font.getReverseGlyphMap()
glyphs = [ "A", "B", "C" ]
coverage = buildCoverage(glyphs, glyphMap)
Args:
glyphs: a sequence of glyph names.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.Coverage`` object or ``None`` if there are no glyphs
supplied.
"""
if not glyphs:
return None
self = ot.Coverage()
self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
return self
The provided code snippet includes necessary dependencies for implementing the `buildMarkGlyphSetsDef` function. Write a Python function `def buildMarkGlyphSetsDef(markSets, glyphMap)` to solve the following problem:
Builds a mark glyph sets definition table. OpenType Layout lookups may choose to use mark filtering sets to consider or ignore particular combinations of marks. These sets are specified by setting a flag on the lookup, but the mark filtering sets are defined in the ``GDEF`` table. This routine builds the subtable containing the mark glyph set definitions. Example:: set0 = set("acute", "grave") set1 = set("caron", "grave") markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap()) Args: markSets: A list of sets of glyphnames. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns An ``otTables.MarkGlyphSetsDef`` object.
Here is the function:
def buildMarkGlyphSetsDef(markSets, glyphMap):
"""Builds a mark glyph sets definition table.
OpenType Layout lookups may choose to use mark filtering sets to consider
or ignore particular combinations of marks. These sets are specified by
setting a flag on the lookup, but the mark filtering sets are defined in
the ``GDEF`` table. This routine builds the subtable containing the mark
glyph set definitions.
Example::
set0 = set("acute", "grave")
set1 = set("caron", "grave")
markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap())
Args:
markSets: A list of sets of glyphnames.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns
An ``otTables.MarkGlyphSetsDef`` object.
"""
if not markSets:
return None
self = ot.MarkGlyphSetsDef()
self.MarkSetTableFormat = 1
self.Coverage = [buildCoverage(m, glyphMap) for m in markSets]
self.MarkSetCount = len(self.Coverage)
return self | Builds a mark glyph sets definition table. OpenType Layout lookups may choose to use mark filtering sets to consider or ignore particular combinations of marks. These sets are specified by setting a flag on the lookup, but the mark filtering sets are defined in the ``GDEF`` table. This routine builds the subtable containing the mark glyph set definitions. Example:: set0 = set("acute", "grave") set1 = set("caron", "grave") markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap()) Args: markSets: A list of sets of glyphnames. glyphMap: a glyph name to ID map, typically returned from ``font.getReverseGlyphMap()``. Returns An ``otTables.MarkGlyphSetsDef`` object. |
175,366 | import logging
import os
from collections import defaultdict, namedtuple
from functools import reduce
from itertools import chain
from math import log2
from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple
from fontTools.config import OPTIONS
from fontTools.misc.intTools import bit_count, bit_indices
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otBase, otTables
def compact_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None:
def compact_ext_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None:
def compact(font: TTFont, level: int) -> TTFont:
# Ideal plan:
# 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable
# https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable
# 2. Extract glyph-glyph kerning and class-kerning from all present subtables
# 3. Regroup into different subtable arrangements
# 4. Put back into the lookup
#
# Actual implementation:
# 2. Only class kerning is optimized currently
# 3. If the input kerning is already in several subtables, the subtables
# are not grouped together first; instead each subtable is treated
# independently, so currently this step is:
# Split existing subtables into more smaller subtables
gpos = font["GPOS"]
for lookup in gpos.table.LookupList.Lookup:
if lookup.LookupType == 2:
compact_lookup(font, level, lookup)
elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2:
compact_ext_lookup(font, level, lookup)
return font | null |
175,367 | import logging
import os
from collections import defaultdict, namedtuple
from functools import reduce
from itertools import chain
from math import log2
from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple
from fontTools.config import OPTIONS
from fontTools.misc.intTools import bit_count, bit_indices
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otBase, otTables
List = _Alias()
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
def _classDef_bytes(
class_data: List[Tuple[List[Tuple[int, int]], int, int]],
class_ids: List[int],
coverage=False,
):
if not class_ids:
return 0
first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]]
range_count = len(first_ranges)
for i in class_ids[1:]:
data = class_data[i]
range_count += len(data[0])
min_glyph_id = min(min_glyph_id, data[1])
max_glyph_id = max(max_glyph_id, data[2])
glyphCount = max_glyph_id - min_glyph_id + 1
# https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1
format1_bytes = 6 + glyphCount * 2
# https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2
format2_bytes = 4 + range_count * 6
return min(format1_bytes, format2_bytes) | null |
175,368 | from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
def buildCmapSubTable(cmapping, format, platformID, platEncID):
subTable = cmap_classes[format](format)
subTable.cmap = cmapping
subTable.platformID = platformID
subTable.platEncID = platEncID
subTable.language = 0
return subTable | null |
175,369 | from .ttLib import TTFont, newTable
from .ttLib.tables._c_m_a_p import cmap_classes
from .misc.timeTools import timestampNow
import struct
from collections import OrderedDict
class Axis(object):
def __init__(self):
self.axisTag = None
self.axisNameID = 0
self.flags = 0
self.minValue = -1.0
self.defaultValue = 0.0
self.maxValue = 1.0
def compile(self):
return sstruct.pack(FVAR_AXIS_FORMAT, self)
def decompile(self, data):
sstruct.unpack2(FVAR_AXIS_FORMAT, data, self)
def toXML(self, writer, ttFont):
name = (
ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None
)
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
writer.begintag("Axis")
writer.newline()
for tag, value in [
("AxisTag", self.axisTag),
("Flags", "0x%X" % self.flags),
("MinValue", fl2str(self.minValue, 16)),
("DefaultValue", fl2str(self.defaultValue, 16)),
("MaxValue", fl2str(self.maxValue, 16)),
("AxisNameID", str(self.axisNameID)),
]:
writer.begintag(tag)
writer.write(value)
writer.endtag(tag)
writer.newline()
writer.endtag("Axis")
writer.newline()
def fromXML(self, name, _attrs, content, ttFont):
assert name == "Axis"
for tag, _, value in filter(lambda t: type(t) is tuple, content):
value = "".join(value)
if tag == "AxisTag":
self.axisTag = Tag(value)
elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}:
setattr(
self,
tag[0].lower() + tag[1:],
str2fl(value, 16) if tag.endswith("Value") else safeEval(value),
)
class NamedInstance(object):
def __init__(self):
self.subfamilyNameID = 0
self.postscriptNameID = 0xFFFF
self.flags = 0
self.coordinates = {}
def compile(self, axisTags, includePostScriptName):
result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)]
for axis in axisTags:
fixedCoord = fl2fi(self.coordinates[axis], 16)
result.append(struct.pack(">l", fixedCoord))
if includePostScriptName:
result.append(struct.pack(">H", self.postscriptNameID))
return bytesjoin(result)
def decompile(self, data, axisTags):
sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self)
pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT)
for axis in axisTags:
value = struct.unpack(">l", data[pos : pos + 4])[0]
self.coordinates[axis] = fi2fl(value, 16)
pos += 4
if pos + 2 <= len(data):
self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
else:
self.postscriptNameID = 0xFFFF
def toXML(self, writer, ttFont):
name = (
ttFont["name"].getDebugName(self.subfamilyNameID)
if "name" in ttFont
else None
)
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
psname = (
ttFont["name"].getDebugName(self.postscriptNameID)
if "name" in ttFont
else None
)
if psname is not None:
writer.comment("PostScript: " + psname)
writer.newline()
if self.postscriptNameID == 0xFFFF:
writer.begintag(
"NamedInstance",
flags=("0x%X" % self.flags),
subfamilyNameID=self.subfamilyNameID,
)
else:
writer.begintag(
"NamedInstance",
flags=("0x%X" % self.flags),
subfamilyNameID=self.subfamilyNameID,
postscriptNameID=self.postscriptNameID,
)
writer.newline()
for axis in ttFont["fvar"].axes:
writer.simpletag(
"coord",
axis=axis.axisTag,
value=fl2str(self.coordinates[axis.axisTag], 16),
)
writer.newline()
writer.endtag("NamedInstance")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
assert name == "NamedInstance"
self.subfamilyNameID = safeEval(attrs["subfamilyNameID"])
self.flags = safeEval(attrs.get("flags", "0"))
if "postscriptNameID" in attrs:
self.postscriptNameID = safeEval(attrs["postscriptNameID"])
else:
self.postscriptNameID = 0xFFFF
for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content):
if tag == "coord":
value = str2fl(elementAttrs["value"], 16)
self.coordinates[elementAttrs["axis"]] = value
def addFvar(font, axes, instances):
from .ttLib.tables._f_v_a_r import Axis, NamedInstance
assert axes
fvar = newTable("fvar")
nameTable = font["name"]
for axis_def in axes:
axis = Axis()
if isinstance(axis_def, tuple):
(
axis.axisTag,
axis.minValue,
axis.defaultValue,
axis.maxValue,
name,
) = axis_def
else:
(axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = (
axis_def.tag,
axis_def.minimum,
axis_def.default,
axis_def.maximum,
axis_def.name,
)
if isinstance(name, str):
name = dict(en=name)
axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font)
fvar.axes.append(axis)
for instance in instances:
if isinstance(instance, dict):
coordinates = instance["location"]
name = instance["stylename"]
psname = instance.get("postscriptfontname")
else:
coordinates = instance.location
name = instance.localisedStyleName or instance.styleName
psname = instance.postScriptFontName
if isinstance(name, str):
name = dict(en=name)
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font)
if psname is not None:
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = coordinates
fvar.instances.append(inst)
font["fvar"] = fvar | null |
175,370 |
The provided code snippet includes necessary dependencies for implementing the `_add_method` function. Write a Python function `def _add_method(*clazzes)` to solve the following problem:
Returns a decorator function that adds a new method to one or more classes.
Here is the function:
def _add_method(*clazzes):
"""Returns a decorator function that adds a new method to one or
more classes."""
def wrapper(method):
done = []
for clazz in clazzes:
if clazz in done:
continue # Support multiple names of a clazz
done.append(clazz)
assert clazz.__name__ != "DefaultTable", "Oops, table class not found."
assert not hasattr(
clazz, method.__name__
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
setattr(clazz, method.__name__, method)
return None
return wrapper | Returns a decorator function that adds a new method to one or more classes. |
175,371 | from __future__ import annotations
import re
from functools import lru_cache
from itertools import chain, count
from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple
from fontTools import ttLib
from fontTools.subset.util import _add_method
from fontTools.ttLib.tables.S_V_G_ import SVGDocument
def group_elements_by_id(tree: etree.Element) -> Dict[str, etree.Element]:
# select all svg elements with 'id' attribute no matter where they are
# including the root element itself:
# https://github.com/fonttools/fonttools/issues/2548
return {el.attrib["id"]: el for el in xpath("//svg:*[@id]")(tree)}
def closure_element_ids(
elements: Dict[str, etree.Element], element_ids: Set[str]
) -> None:
# Expand the initial subset of element ids to include ids that can be reached
# via references from the initial set.
unvisited = element_ids
while unvisited:
referenced: Set[str] = set()
for el_id in unvisited:
if el_id not in elements:
# ignore dangling reference; not our job to validate svg
continue
referenced.update(iter_referenced_ids(elements[el_id]))
referenced -= element_ids
element_ids.update(referenced)
unvisited = referenced
def subset_elements(el: etree.Element, retained_ids: Set[str]) -> bool:
# Keep elements if their id is in the subset, or any of their children's id is.
# Drop elements whose id is not in the subset, and either have no children,
# or all their children are being dropped.
if el.attrib.get("id") in retained_ids:
# if id is in the set, don't recurse; keep whole subtree
return True
# recursively subset all the children; we use a list comprehension instead
# of a parentheses-less generator expression because we don't want any() to
# short-circuit, as our function has a side effect of dropping empty elements.
if any([subset_elements(e, retained_ids) for e in el]):
return True
assert len(el) == 0
parent = el.getparent()
if parent is not None:
parent.remove(el)
return False
def remap_glyph_ids(
svg: etree.Element, glyph_index_map: Dict[int, int]
) -> Dict[str, str]:
# Given {old_gid: new_gid} map, rename all elements containing id="glyph{gid}"
# special attributes
elements = group_elements_by_id(svg)
id_map = {}
for el_id, el in elements.items():
m = GID_RE.match(el_id)
if not m:
continue
old_index = int(m.group(1))
new_index = glyph_index_map.get(old_index)
if new_index is not None:
if old_index == new_index:
continue
new_id = f"glyph{new_index}"
else:
# If the old index is missing, the element correspond to a glyph that was
# excluded from the font's subset.
# We rename it to avoid clashes with the new GIDs or other element ids.
new_id = f".{el_id}"
n = count(1)
while new_id in elements:
new_id = f"{new_id}.{next(n)}"
id_map[el_id] = new_id
el.attrib["id"] = new_id
return id_map
def update_glyph_href_links(svg: etree.Element, id_map: Dict[str, str]) -> None:
# update all xlink:href="#glyph..." attributes to point to the new glyph ids
for el in xpath(".//svg:*[starts-with(@xlink:href, '#glyph')]")(svg):
old_id = href_local_target(el)
assert old_id is not None
if old_id in id_map:
new_id = id_map[old_id]
el.attrib[XLINK_HREF] = f"#{new_id}"
def ranges(ints: Iterable[int]) -> Iterator[Tuple[int, int]]:
# Yield sorted, non-overlapping (min, max) ranges of consecutive integers
sorted_ints = iter(sorted(set(ints)))
try:
start = end = next(sorted_ints)
except StopIteration:
return
for v in sorted_ints:
if v - 1 == end:
end = v
else:
yield (start, end)
start = end = v
yield (start, end)
List = _Alias()
Dict = _Alias()
class SVGDocument(Sequence):
data: str
startGlyphID: int
endGlyphID: int
compressed: bool = False
# Previously, the SVG table's docList attribute contained a lists of 3 items:
# [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
# For backward compatibility with code that depends of them being sequences of
# fixed length=3, we subclass the Sequence abstract base class and pretend only
# the first three items are present. 'compressed' is only accessible via named
# attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
def __getitem__(self, index):
return astuple(self)[:3][index]
def __len__(self):
return 3
def subset_glyphs(self, s) -> bool:
if etree is None:
raise ImportError("No module named 'lxml', required to subset SVG")
# glyph names (before subsetting)
glyph_order: List[str] = s.orig_glyph_order
# map from glyph names to original glyph indices
rev_orig_glyph_map: Dict[str, int] = s.reverseOrigGlyphMap
# map from original to new glyph indices (after subsetting)
glyph_index_map: Dict[int, int] = s.glyph_index_map
new_docs: List[SVGDocument] = []
for doc in self.docList:
glyphs = {
glyph_order[i] for i in range(doc.startGlyphID, doc.endGlyphID + 1)
}.intersection(s.glyphs)
if not glyphs:
# no intersection: we can drop the whole record
continue
svg = etree.fromstring(
# encode because fromstring dislikes xml encoding decl if input is str.
# SVG xml encoding must be utf-8 as per OT spec.
doc.data.encode("utf-8"),
parser=etree.XMLParser(
# Disable libxml2 security restrictions to support very deep trees.
# Without this we would get an error like this:
# `lxml.etree.XMLSyntaxError: internal error: Huge input lookup`
# when parsing big fonts e.g. noto-emoji-picosvg.ttf.
huge_tree=True,
# ignore blank text as it's not meaningful in OT-SVG; it also prevents
# dangling tail text after removing an element when pretty_print=True
remove_blank_text=True,
),
)
elements = group_elements_by_id(svg)
gids = {rev_orig_glyph_map[g] for g in glyphs}
element_ids = {f"glyph{i}" for i in gids}
closure_element_ids(elements, element_ids)
if not subset_elements(svg, element_ids):
continue
if not s.options.retain_gids:
id_map = remap_glyph_ids(svg, glyph_index_map)
update_glyph_href_links(svg, id_map)
new_doc = etree.tostring(svg, pretty_print=s.options.pretty_svg).decode("utf-8")
new_gids = (glyph_index_map[i] for i in gids)
for start, end in ranges(new_gids):
new_docs.append(SVGDocument(new_doc, start, end, doc.compressed))
self.docList = new_docs
return bool(self.docList) | null |
175,372 | from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
from fontTools.misc.loggingTools import deprecateFunction
from fontTools.subset.util import _add_method, _uniq_sort
class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler):
def __init__(self, components, localSubrs, globalSubrs):
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
self.components = components
def op_endchar(self, index):
args = self.popall()
if len(args) >= 4:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args[-4:]
baseGlyph = StandardEncoding[bchar]
accentGlyph = StandardEncoding[achar]
self.components.add(baseGlyph)
self.components.add(accentGlyph)
def closure_glyphs(self, s):
cff = self.cff
assert len(cff) == 1
font = cff[cff.keys()[0]]
glyphSet = font.CharStrings
decompose = s.glyphs
while decompose:
components = set()
for g in decompose:
if g not in glyphSet:
continue
gl = glyphSet[g]
subrs = getattr(gl.private, "Subrs", [])
decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs)
decompiler.execute(gl)
components -= s.glyphs
s.glyphs.update(components)
decompose = components | null |
175,373 | from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
from fontTools.misc.loggingTools import deprecateFunction
from fontTools.subset.util import _add_method, _uniq_sort
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.setProgram([] if isCFF2 else ["endchar"])
else:
if hasattr(font, "FDArray") and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
dfltWdX = private.defaultWidthX
nmnlWdX = private.nominalWidthX
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, "endchar"]
else:
c.program = ["endchar"]
def prune_pre_subset(self, font, options):
cff = self.cff
# CFF table must have one font only
cff.fontNames = cff.fontNames[:1]
if options.notdef_glyph and not options.notdef_outline:
isCFF2 = cff.major > 1
for fontname in cff.keys():
font = cff[fontname]
_empty_charstring(font, ".notdef", isCFF2=isCFF2)
# Clear useless Encoding
for fontname in cff.keys():
font = cff[fontname]
# https://github.com/fonttools/fonttools/issues/620
font.Encoding = "StandardEncoding"
return True # bool(cff.fontNames) | null |
175,374 | from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
from fontTools.misc.loggingTools import deprecateFunction
from fontTools.subset.util import _add_method, _uniq_sort
def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False):
c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName)
if isCFF2 or ignoreWidth:
# CFF2 charstrings have no widths nor 'endchar' operators
c.setProgram([] if isCFF2 else ["endchar"])
else:
if hasattr(font, "FDArray") and font.FDArray is not None:
private = font.FDArray[fdSelectIndex].Private
else:
private = font.Private
dfltWdX = private.defaultWidthX
nmnlWdX = private.nominalWidthX
pen = NullPen()
c.draw(pen) # this will set the charstring's width
if c.width != dfltWdX:
c.program = [c.width - nmnlWdX, "endchar"]
else:
c.program = ["endchar"]
def subset_glyphs(self, s):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
glyphs = s.glyphs.union(s.glyphs_emptied)
# Load all glyphs
for g in font.charset:
if g not in glyphs:
continue
c, _ = cs.getItemAndSelector(g)
if cs.charStringsAreIndexed:
indices = [i for i, g in enumerate(font.charset) if g in glyphs]
csi = cs.charStringsIndex
csi.items = [csi.items[i] for i in indices]
del csi.file, csi.offsets
if hasattr(font, "FDSelect"):
sel = font.FDSelect
sel.format = None
sel.gidArray = [sel.gidArray[i] for i in indices]
newCharStrings = {}
for indicesIdx, charsetIdx in enumerate(indices):
g = font.charset[charsetIdx]
if g in cs.charStrings:
newCharStrings[g] = indicesIdx
cs.charStrings = newCharStrings
else:
cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs}
font.charset = [g for g in font.charset if g in glyphs]
font.numGlyphs = len(font.charset)
if s.options.retain_gids:
isCFF2 = cff.major > 1
for g in s.glyphs_emptied:
_empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True)
return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) | null |
175,375 | from fontTools.misc import psCharStrings
from fontTools import ttLib
from fontTools.pens.basePen import NullPen
from fontTools.misc.roundTools import otRound
from fontTools.misc.loggingTools import deprecateFunction
from fontTools.subset.util import _add_method, _uniq_sort
def desubroutinize(self):
def remove_hints(self):
def remove_unused_subroutines(self):
def _uniq_sort(l):
def prune_post_subset(self, ttfFont, options):
cff = self.cff
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Drop unused FontDictionaries
if hasattr(font, "FDSelect"):
sel = font.FDSelect
indices = _uniq_sort(sel.gidArray)
sel.gidArray = [indices.index(ss) for ss in sel.gidArray]
arr = font.FDArray
arr.items = [arr[i] for i in indices]
del arr.file, arr.offsets
# Desubroutinize if asked for
if options.desubroutinize:
cff.desubroutinize()
# Drop hints if not needed
if not options.hinting:
self.remove_hints()
elif not options.desubroutinize:
self.remove_unused_subroutines()
return True | null |
175,376 | from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
if isinstance(string, str):
string = string.split()
program = []
for token in string:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
program.append(token)
return program | null |
175,377 | from fontTools.cffLib import maxStackLimit
def programToString(program):
return " ".join(str(x) for x in program) | null |
175,378 | from fontTools.cffLib import maxStackLimit
The provided code snippet includes necessary dependencies for implementing the `_everyN` function. Write a Python function `def _everyN(el, n)` to solve the following problem:
Group the list el into groups of size n
Here is the function:
def _everyN(el, n):
"""Group the list el into groups of size n"""
if len(el) % n != 0:
raise ValueError(el)
for i in range(0, len(el), n):
yield el[i : i + n] | Group the list el into groups of size n |
175,379 | from fontTools.cffLib import maxStackLimit
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (¯\_(ツ)_/¯).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. If
the vsindex argument is None, getNumRegions returns the default number
of regions for the charstring, else it returns the numRegions for
the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = None
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, str):
stack.append(token)
continue
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenBlendStack += numBlends + len(stack) - 1
lastBlendIndex = len(stack)
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(("", [width]))
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(("", stack))
commands.append((token, []))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(("", stack))
return commands
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op, args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
for op, args in commands:
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [
n
for arg in args
for n in (
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
)
]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
func = getattr(mapping, op, None)
if not func:
result.append((op, args))
continue
try:
for command in func(args):
result.append(command)
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
) | null |
175,380 | from fontTools.cffLib import maxStackLimit
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (¯\_(ツ)_/¯).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. If
the vsindex argument is None, getNumRegions returns the default number
of regions for the charstring, else it returns the numRegions for
the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = None
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, str):
stack.append(token)
continue
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenBlendStack += numBlends + len(stack) - 1
lastBlendIndex = len(stack)
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(("", [width]))
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(("", stack))
commands.append((token, []))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(("", stack))
return commands
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op, args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def specializeCommands(
commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48,
):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
# 0. Generalize commands.
# This ensures that they are in our expected simple form, with each line/curve only
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
# If caller is sure the input is in this form, they can turn off generalization to
# save time.
#
# 1. Combine successive rmoveto operations.
#
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
# We specialize into some, made-up, variants as well, which simplifies following
# passes.
#
# 3. Merge or delete redundant operations, to the extent requested.
# OpenType spec declares point numbers in CFF undefined. As such, we happily
# change topology. If client relies on point numbers (in GPOS anchors, or for
# hinting purposes(what?)) they can turn this off.
#
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
#
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
#
# 6. Resolve any remaining made-up operators into real operators.
#
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
else:
commands = list(commands) # Make copy since we modify in-place later.
# 1. Combine successive rmoveto operations.
for i in range(len(commands) - 1, 0, -1):
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
v1, v2 = commands[i - 1][1], commands[i][1]
commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
#
# We, in fact, specialize into more, made-up, variants that special-case when both
# X and Y components are zero. This simplifies the following optimization passes.
# This case is rare, but OCD does not let me skip it.
#
# After this round, we will have four variants that use the following mnemonics:
#
# - 'r' for relative, ie. non-zero X and non-zero Y,
# - 'h' for horizontal, ie. zero X and non-zero Y,
# - 'v' for vertical, ie. non-zero X and zero Y,
# - '0' for zeros, ie. zero X and zero Y.
#
# The '0' pseudo-operators are not part of the spec, but help simplify the following
# optimization rounds. We resolve them at the end. So, after this, we will have four
# moveto and four lineto variants:
#
# - 0moveto, 0lineto
# - hmoveto, hlineto
# - vmoveto, vlineto
# - rmoveto, rlineto
#
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
#
# There are nine different variants of curves without the '0'. Those nine map exactly
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
#
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
# the '0' with either 'h' or 'v' and it works.
#
# When we get to curve splines however, things become more complicated... XXX finish this.
# There's one more complexity with splines. If one side of the spline is not horizontal or
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
# This limits our merge opportunities later.
#
for i in range(len(commands)):
op, args = commands[i]
if op in {"rmoveto", "rlineto"}:
c, args = _categorizeVector(args)
commands[i] = c + op[1:], args
continue
if op == "rrcurveto":
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
#
# TODO
# A 0moveto that comes before all other path operations can be removed.
# though I find conflicting evidence for this.
#
# TODO
# "If hstem and vstem hints are both declared at the beginning of a
# CharString, and this sequence is followed directly by the hintmask or
# cntrmask operators, then the vstem hint operator (or, if applicable,
# the vstemhm operator) need not be included."
#
# "The sequence and form of a CFF2 CharString program may be represented as:
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
#
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
#
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands) - 1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == "00curveto":
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c + "lineto"
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == "0lineto":
del commands[i]
continue
# Merge adjacent hlineto's and vlineto's.
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
_, other_args = commands[i - 1]
assert len(args) == 1 and len(other_args) == 1
try:
new_args = [_addArgs(args[0], other_args[0])]
except ValueError:
continue
commands[i - 1] = (op, new_args)
del commands[i]
continue
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
for i in range(1, len(commands) - 1):
op, args = commands[i]
prv, nxt = commands[i - 1][0], commands[i + 1][0]
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
assert len(args) == 1
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
commands[i] = ("rlineto", args)
continue
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
assert (op[0] == "r") ^ (op[1] == "r")
if op[0] == "v":
pos = 0
elif op[0] != "r":
pos = 1
elif op[1] == "v":
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ("rrcurveto", args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
for i in range(len(commands) - 1, 0, -1):
op1, args1 = commands[i - 1]
op2, args2 = commands[i]
new_op = None
# Merge logic...
if {op1, op2} <= {"rlineto", "rrcurveto"}:
if op1 == op2:
new_op = op1
else:
if op2 == "rrcurveto" and len(args2) == 6:
new_op = "rlinecurve"
elif len(args2) == 2:
new_op = "rcurveline"
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
new_op = op2
elif {op1, op2} == {"vlineto", "hlineto"}:
new_op = op1
elif "curveto" == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
continue
d = _mergeCategories(d1, d2)
if d is None:
continue
if d0 == "r":
d = _mergeCategories(d, d3)
if d is None:
continue
new_op = "r" + d + "curveto"
elif d3 == "r":
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None:
continue
new_op = d0 + "r" + "curveto"
else:
d0 = _mergeCategories(d0, d3)
if d0 is None:
continue
new_op = d0 + d + "curveto"
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
if new_op and len(args1) + len(args2) < maxstack:
commands[i - 1] = (new_op, args1 + args2)
del commands[i]
# 6. Resolve any remaining made-up operators into real operators.
for i in range(len(commands)):
op, args = commands[i]
if op in {"0moveto", "0lineto"}:
commands[i] = "h" + op[1:], args
continue
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
op0, op1 = op[:2]
if (op0 == "r") ^ (op1 == "r"):
assert len(args) % 2 == 1
if op0 == "0":
op0 = "h"
if op1 == "0":
op1 = "h"
if op0 == "r":
op0 = op1
if op1 == "r":
op1 = _negateCategory(op0)
assert {op0, op1} <= {"h", "v"}, (op0, op1)
if len(args) % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == "h") ^ (len(args) % 8 == 1):
# Swap last two args order
args = args[:-2] + args[-1:] + args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == "h": # hhcurveto
# Swap first two args order
args = args[1:2] + args[:1] + args[2:]
commands[i] = op0 + op1 + "curveto", args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
for i in range(len(commands)):
op, args = commands[i]
if any(isinstance(arg, list) for arg in args):
commands[i] = op, _convertToBlendCmds(args)
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
) | null |
175,381 | from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
def byteCost(widths, default, nominal):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]):
default_factory: Callable[[], _VT]
def __init__(self, **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ...
def __init__(
self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
) -> None: ...
def __missing__(self, key: _KT) -> _VT: ...
def copy(self: _S) -> _S: ...
The provided code snippet includes necessary dependencies for implementing the `optimizeWidthsBruteforce` function. Write a Python function `def optimizeWidthsBruteforce(widths)` to solve the following problem:
Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts.
Here is the function:
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal | Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts. |
175,382 | from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]):
default_factory: Callable[[], _VT]
def __init__(self, **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ...
def __init__(
self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
) -> None: ...
def __missing__(self, key: _KT) -> _VT: ...
def copy(self: _S) -> _S: ...
def add(__a: Any, __b: Any) -> Any: ...
The provided code snippet includes necessary dependencies for implementing the `optimizeWidths` function. Write a Python function `def optimizeWidths(widths)` to solve the following problem:
Given a list of glyph widths, or dictionary mapping glyph width to number of glyphs having that, returns a tuple of best CFF default and nominal glyph widths. This algorithm is linear in UPEM+numGlyphs.
Here is the function:
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal | Given a list of glyph widths, or dictionary mapping glyph width to number of glyphs having that, returns a tuple of best CFF default and nominal glyph widths. This algorithm is linear in UPEM+numGlyphs. |
175,383 | import codecs
import encodings
class ExtendCodec(codecs.Codec):
def __init__(self, name, base_encoding, mapping):
self.name = name
self.base_encoding = base_encoding
self.mapping = mapping
self.reverse = {v: k for k, v in mapping.items()}
self.max_len = max(len(v) for v in mapping.values())
self.info = codecs.CodecInfo(
name=self.name, encode=self.encode, decode=self.decode
)
codecs.register_error(name, self.error)
def _map(self, mapper, output_type, exc_type, input, errors):
base_error_handler = codecs.lookup_error(errors)
length = len(input)
out = output_type()
while input:
# first try to use self.error as the error handler
try:
part = mapper(input, self.base_encoding, errors=self.name)
out += part
break # All converted
except exc_type as e:
# else convert the correct part, handle error as requested and continue
out += mapper(input[: e.start], self.base_encoding, self.name)
replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
def encode(self, input, errors="strict"):
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
def decode(self, input, errors="strict"):
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
if isinstance(e, UnicodeDecodeError):
for end in range(e.start + 1, e.end + 1):
s = e.object[e.start : end]
if s in self.mapping:
return self.mapping[s], end
elif isinstance(e, UnicodeEncodeError):
for end in range(e.start + 1, e.start + self.max_len + 1):
s = e.object[e.start : end]
if s in self.reverse:
return self.reverse[s], end
e.encoding = self.name
raise e
_extended_encodings = {
"x_mac_japanese_ttx": (
"shift_jis",
{
b"\xFC": chr(0x007C),
b"\x7E": chr(0x007E),
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_trad_chinese_ttx": (
"big5",
{
b"\x80": chr(0x005C),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_korean_ttx": (
"euc_kr",
{
b"\x80": chr(0x00A0),
b"\x81": chr(0x20A9),
b"\x82": chr(0x2014),
b"\x83": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
"x_mac_simp_chinese_ttx": (
"gb2312",
{
b"\x80": chr(0x00FC),
b"\xA0": chr(0x00A0),
b"\xFD": chr(0x00A9),
b"\xFE": chr(0x2122),
b"\xFF": chr(0x2026),
},
),
}
_cache = {}
codecs.register(search_function)
import codecs
codecs.register(search_function)
def search_function(name):
name = encodings.normalize_encoding(name) # Rather undocumented...
if name in _extended_encodings:
if name not in _cache:
base_encoding, mapping = _extended_encodings[name]
assert name[-4:] == "_ttx"
# Python 2 didn't have any of the encodings that we are implementing
# in this file. Python 3 added aliases for the East Asian ones, mapping
# them "temporarily" to the same base encoding as us, with a comment
# suggesting that full implementation will appear some time later.
# As such, try the Python version of the x_mac_... first, if that is found,
# use *that* as our base encoding. This would make our encoding upgrade
# to the full encoding when and if Python finally implements that.
# http://bugs.python.org/issue24041
base_encodings = [name[:-4], base_encoding]
for base_encoding in base_encodings:
try:
codecs.lookup(base_encoding)
except LookupError:
continue
_cache[name] = ExtendCodec(name, base_encoding, mapping)
break
return _cache[name].info
return None | null |
175,384 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
woff2KnownTags = (
"cmap",
"head",
"hhea",
"hmtx",
"maxp",
"name",
"OS/2",
"post",
"cvt ",
"fpgm",
"glyf",
"loca",
"prep",
"CFF ",
"VORG",
"EBDT",
"EBLC",
"gasp",
"hdmx",
"kern",
"LTSH",
"PCLT",
"VDMX",
"vhea",
"vmtx",
"BASE",
"GDEF",
"GPOS",
"GSUB",
"EBSC",
"JSTF",
"MATH",
"CBDT",
"CBLC",
"COLR",
"CPAL",
"SVG ",
"sbix",
"acnt",
"avar",
"bdat",
"bloc",
"bsln",
"cvar",
"fdsc",
"feat",
"fmtx",
"fvar",
"gvar",
"hsty",
"just",
"lcar",
"mort",
"morx",
"opbd",
"prop",
"trak",
"Zapf",
"Silf",
"Glat",
"Gloc",
"Feat",
"Sill",
)
woff2UnknownTagIndex = 0x3F
The provided code snippet includes necessary dependencies for implementing the `getKnownTagIndex` function. Write a Python function `def getKnownTagIndex(tag)` to solve the following problem:
Return index of 'tag' in woff2KnownTags list. Return 63 if not found.
Here is the function:
def getKnownTagIndex(tag):
"""Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
for i in range(len(woff2KnownTags)):
if tag == woff2KnownTags[i]:
return i
return woff2UnknownTagIndex | Return index of 'tag' in woff2KnownTags list. Return 63 if not found. |
175,385 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
woff2Base128MaxSize = 5
def byteord(c):
return c if isinstance(c, int) else ord(c)
class TTLibError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `unpackBase128` function. Write a Python function `def unpackBase128(data)` to solve the following problem:
r"""Read one to five bytes from UIntBase128-encoded input string, and return a tuple containing the decoded integer plus any leftover data. >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") True >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 True >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128 value must not start with leading zeros >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128-encoded sequence is longer than 5 bytes >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128 value exceeds 2**32-1
Here is the function:
def unpackBase128(data):
r"""Read one to five bytes from UIntBase128-encoded input string, and return
a tuple containing the decoded integer plus any leftover data.
>>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
True
>>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
True
>>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TTLibError: UIntBase128 value must not start with leading zeros
>>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
>>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TTLibError: UIntBase128 value exceeds 2**32-1
"""
if len(data) == 0:
raise TTLibError("not enough data to unpack UIntBase128")
result = 0
if byteord(data[0]) == 0x80:
# font must be rejected if UIntBase128 value starts with 0x80
raise TTLibError("UIntBase128 value must not start with leading zeros")
for i in range(woff2Base128MaxSize):
if len(data) == 0:
raise TTLibError("not enough data to unpack UIntBase128")
code = byteord(data[0])
data = data[1:]
# if any of the top seven bits are set then we're about to overflow
if result & 0xFE000000:
raise TTLibError("UIntBase128 value exceeds 2**32-1")
# set current value = old value times 128 bitwise-or (byte bitwise-and 127)
result = (result << 7) | (code & 0x7F)
# repeat until the most significant bit of byte is false
if (code & 0x80) == 0:
# return result plus left over data
return result, data
# make sure not to exceed the size bound
raise TTLibError("UIntBase128-encoded sequence is longer than 5 bytes") | r"""Read one to five bytes from UIntBase128-encoded input string, and return a tuple containing the decoded integer plus any leftover data. >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") True >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 True >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128 value must not start with leading zeros >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128-encoded sequence is longer than 5 bytes >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): File "<stdin>", line 1, in ? TTLibError: UIntBase128 value exceeds 2**32-1 |
175,386 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
def base128Size(n):
"""Return the length in bytes of a UIntBase128-encoded sequence with value n.
>>> base128Size(0)
1
>>> base128Size(24567)
3
>>> base128Size(2**32-1)
5
"""
assert n >= 0
size = 1
while n >= 128:
size += 1
n >>= 7
return size
class TTLibError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `packBase128` function. Write a Python function `def packBase128(n)` to solve the following problem:
r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of bytes using UIntBase128 variable-length encoding. Produce the shortest possible encoding. >>> packBase128(63) == b"\x3f" True >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' True
Here is the function:
def packBase128(n):
r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
bytes using UIntBase128 variable-length encoding. Produce the shortest possible
encoding.
>>> packBase128(63) == b"\x3f"
True
>>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
True
"""
if n < 0 or n >= 2**32:
raise TTLibError("UIntBase128 format requires 0 <= integer <= 2**32-1")
data = b""
size = base128Size(n)
for i in range(size):
b = (n >> (7 * (size - i - 1))) & 0x7F
if i < size - 1:
b |= 0x80
data += struct.pack("B", b)
return data | r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of bytes using UIntBase128 variable-length encoding. Produce the shortest possible encoding. >>> packBase128(63) == b"\x3f" True >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' True |
175,387 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
def byteord(c):
return c if isinstance(c, int) else ord(c)
class TTLibError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `unpack255UShort` function. Write a Python function `def unpack255UShort(data)` to solve the following problem:
Read one to three bytes from 255UInt16-encoded input string, and return a tuple containing the decoded integer plus any leftover data. >>> unpack255UShort(bytechr(252))[0] 252 Note that some numbers (e.g. 506) can have multiple encodings: >>> unpack255UShort(struct.pack("BB", 254, 0))[0] 506 >>> unpack255UShort(struct.pack("BB", 255, 253))[0] 506 >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] 506
Here is the function:
def unpack255UShort(data):
"""Read one to three bytes from 255UInt16-encoded input string, and return a
tuple containing the decoded integer plus any leftover data.
>>> unpack255UShort(bytechr(252))[0]
252
Note that some numbers (e.g. 506) can have multiple encodings:
>>> unpack255UShort(struct.pack("BB", 254, 0))[0]
506
>>> unpack255UShort(struct.pack("BB", 255, 253))[0]
506
>>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
506
"""
code = byteord(data[:1])
data = data[1:]
if code == 253:
# read two more bytes as an unsigned short
if len(data) < 2:
raise TTLibError("not enough data to unpack 255UInt16")
(result,) = struct.unpack(">H", data[:2])
data = data[2:]
elif code == 254:
# read another byte, plus 253 * 2
if len(data) == 0:
raise TTLibError("not enough data to unpack 255UInt16")
result = byteord(data[:1])
result += 506
data = data[1:]
elif code == 255:
# read another byte, plus 253
if len(data) == 0:
raise TTLibError("not enough data to unpack 255UInt16")
result = byteord(data[:1])
result += 253
data = data[1:]
else:
# leave as is if lower than 253
result = code
# return result plus left over data
return result, data | Read one to three bytes from 255UInt16-encoded input string, and return a tuple containing the decoded integer plus any leftover data. >>> unpack255UShort(bytechr(252))[0] 252 Note that some numbers (e.g. 506) can have multiple encodings: >>> unpack255UShort(struct.pack("BB", 254, 0))[0] 506 >>> unpack255UShort(struct.pack("BB", 255, 253))[0] 506 >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] 506 |
175,388 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
class TTLibError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `pack255UShort` function. Write a Python function `def pack255UShort(value)` to solve the following problem:
r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring using 255UInt16 variable-length encoding. >>> pack255UShort(252) == b'\xfc' True >>> pack255UShort(506) == b'\xfe\x00' True >>> pack255UShort(762) == b'\xfd\x02\xfa' True
Here is the function:
def pack255UShort(value):
r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
using 255UInt16 variable-length encoding.
>>> pack255UShort(252) == b'\xfc'
True
>>> pack255UShort(506) == b'\xfe\x00'
True
>>> pack255UShort(762) == b'\xfd\x02\xfa'
True
"""
if value < 0 or value > 0xFFFF:
raise TTLibError("255UInt16 format requires 0 <= integer <= 65535")
if value < 253:
return struct.pack(">B", value)
elif value < 506:
return struct.pack(">BB", 255, value - 253)
elif value < 762:
return struct.pack(">BB", 254, value - 506)
else:
return struct.pack(">BH", 253, value) | r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring using 255UInt16 variable-length encoding. >>> pack255UShort(252) == b'\xfc' True >>> pack255UShort(506) == b'\xfe\x00' True >>> pack255UShort(762) == b'\xfd\x02\xfa' True |
175,389 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
log = logging.getLogger("fontTools.ttLib.woff2")
class WOFF2FlavorData(WOFFFlavorData):
Flavor = "woff2"
def __init__(self, reader=None, data=None, transformedTables=None):
"""Data class that holds the WOFF2 header major/minor version, any
metadata or private data (as bytes strings), and the set of
table tags that have transformations applied (if reader is not None),
or will have once the WOFF2 font is compiled.
Args:
reader: an SFNTReader (or subclass) object to read flavor data from.
data: another WOFFFlavorData object to initialise data from.
transformedTables: set of strings containing table tags to be transformed.
Raises:
ImportError if the brotli module is not installed.
NOTE: The 'reader' argument, on the one hand, and the 'data' and
'transformedTables' arguments, on the other hand, are mutually exclusive.
"""
if not haveBrotli:
raise ImportError("No module named brotli")
if reader is not None:
if data is not None:
raise TypeError("'reader' and 'data' arguments are mutually exclusive")
if transformedTables is not None:
raise TypeError(
"'reader' and 'transformedTables' arguments are mutually exclusive"
)
if transformedTables is not None and (
"glyf" in transformedTables
and "loca" not in transformedTables
or "loca" in transformedTables
and "glyf" not in transformedTables
):
raise ValueError("'glyf' and 'loca' must be transformed (or not) together")
super(WOFF2FlavorData, self).__init__(reader=reader)
if reader:
transformedTables = [
tag for tag, entry in reader.tables.items() if entry.transformed
]
elif data:
self.majorVersion = data.majorVersion
self.majorVersion = data.minorVersion
self.metaData = data.metaData
self.privData = data.privData
if transformedTables is None and hasattr(data, "transformedTables"):
transformedTables = data.transformedTables
if transformedTables is None:
transformedTables = woff2TransformedTableTags
self.transformedTables = set(transformedTables)
def _decompress(self, rawData):
return brotli.decompress(rawData)
The provided code snippet includes necessary dependencies for implementing the `compress` function. Write a Python function `def compress(input_file, output_file, transform_tables=None)` to solve the following problem:
Compress OpenType font to WOFF2. Args: input_file: a file path, file or file-like object (open in binary mode) containing an OpenType font (either CFF- or TrueType-flavored). output_file: a file path, file or file-like object where to save the compressed WOFF2 font. transform_tables: Optional[Iterable[str]]: a set of table tags for which to enable preprocessing transformations. By default, only 'glyf' and 'loca' tables are transformed. An empty set means disable all transformations.
Here is the function:
def compress(input_file, output_file, transform_tables=None):
"""Compress OpenType font to WOFF2.
Args:
input_file: a file path, file or file-like object (open in binary mode)
containing an OpenType font (either CFF- or TrueType-flavored).
output_file: a file path, file or file-like object where to save the
compressed WOFF2 font.
transform_tables: Optional[Iterable[str]]: a set of table tags for which
to enable preprocessing transformations. By default, only 'glyf'
and 'loca' tables are transformed. An empty set means disable all
transformations.
"""
log.info("Processing %s => %s" % (input_file, output_file))
font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
font.flavor = "woff2"
if transform_tables is not None:
font.flavorData = WOFF2FlavorData(
data=font.flavorData, transformedTables=transform_tables
)
font.save(output_file, reorderTables=False) | Compress OpenType font to WOFF2. Args: input_file: a file path, file or file-like object (open in binary mode) containing an OpenType font (either CFF- or TrueType-flavored). output_file: a file path, file or file-like object where to save the compressed WOFF2 font. transform_tables: Optional[Iterable[str]]: a set of table tags for which to enable preprocessing transformations. By default, only 'glyf' and 'loca' tables are transformed. An empty set means disable all transformations. |
175,390 | from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad
from fontTools.ttLib import (
TTFont,
TTLibError,
getTableModule,
getTableClass,
getSearchRange,
)
from fontTools.ttLib.sfnt import (
SFNTReader,
SFNTWriter,
DirectoryEntry,
WOFFFlavorData,
sfntDirectoryFormat,
sfntDirectorySize,
SFNTDirectoryEntry,
sfntDirectoryEntrySize,
calcChecksum,
)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
log = logging.getLogger("fontTools.ttLib.woff2")
The provided code snippet includes necessary dependencies for implementing the `decompress` function. Write a Python function `def decompress(input_file, output_file)` to solve the following problem:
Decompress WOFF2 font to OpenType font. Args: input_file: a file path, file or file-like object (open in binary mode) containing a compressed WOFF2 font. output_file: a file path, file or file-like object where to save the decompressed OpenType font.
Here is the function:
def decompress(input_file, output_file):
"""Decompress WOFF2 font to OpenType font.
Args:
input_file: a file path, file or file-like object (open in binary mode)
containing a compressed WOFF2 font.
output_file: a file path, file or file-like object where to save the
decompressed OpenType font.
"""
log.info("Processing %s => %s" % (input_file, output_file))
font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
font.flavor = None
font.flavorData = None
font.save(output_file, reorderTables=True) | Decompress WOFF2 font to OpenType font. Args: input_file: a file path, file or file-like object (open in binary mode) containing a compressed WOFF2 font. output_file: a file path, file or file-like object where to save the decompressed OpenType font. |
175,391 | import itertools
import logging
from typing import Callable, Iterable, Optional, Mapping
from fontTools.misc.roundTools import otRound
from fontTools.ttLib import ttFont
from fontTools.ttLib.tables import _g_l_y_f
from fontTools.ttLib.tables import _h_m_t_x
from fontTools.pens.ttGlyphPen import TTGlyphPen
import pathops
class RemoveOverlapsError(Exception):
pass
log = logging.getLogger("fontTools.ttLib.removeOverlaps")
def removeTTGlyphOverlaps(
glyphName: str,
glyphSet: _TTGlyphMapping,
glyfTable: _g_l_y_f.table__g_l_y_f,
hmtxTable: _h_m_t_x.table__h_m_t_x,
removeHinting: bool = True,
) -> bool:
glyph = glyfTable[glyphName]
# decompose composite glyphs only if components overlap each other
if (
glyph.numberOfContours > 0
or glyph.isComposite()
and componentsOverlap(glyph, glyphSet)
):
path = skPathFromGlyph(glyphName, glyphSet)
# remove overlaps
path2 = _simplify(path, glyphName)
# replace TTGlyph if simplified path is different (ignoring contour order)
if {tuple(c) for c in path.contours} != {tuple(c) for c in path2.contours}:
glyfTable[glyphName] = glyph = ttfGlyphFromSkPath(path2)
# simplified glyph is always unhinted
assert not glyph.program
# also ensure hmtx LSB == glyph.xMin so glyph origin is at x=0
width, lsb = hmtxTable[glyphName]
if lsb != glyph.xMin:
hmtxTable[glyphName] = (width, glyph.xMin)
return True
if removeHinting:
glyph.removeHinting()
return False
Optional: _SpecialForm = ...
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
The provided code snippet includes necessary dependencies for implementing the `removeOverlaps` function. Write a Python function `def removeOverlaps( font: ttFont.TTFont, glyphNames: Optional[Iterable[str]] = None, removeHinting: bool = True, ignoreErrors=False, ) -> None` to solve the following problem:
Simplify glyphs in TTFont by merging overlapping contours. Overlapping components are first decomposed to simple contours, then merged. Currently this only works with TrueType fonts with 'glyf' table. Raises NotImplementedError if 'glyf' table is absent. Note that removing overlaps invalidates the hinting. By default we drop hinting from all glyphs whether or not overlaps are removed from a given one, as it would look weird if only some glyphs are left (un)hinted. Args: font: input TTFont object, modified in place. glyphNames: optional iterable of glyph names (str) to remove overlaps from. By default, all glyphs in the font are processed. removeHinting (bool): set to False to keep hinting for unmodified glyphs. ignoreErrors (bool): set to True to ignore errors while removing overlaps, thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363).
Here is the function:
def removeOverlaps(
font: ttFont.TTFont,
glyphNames: Optional[Iterable[str]] = None,
removeHinting: bool = True,
ignoreErrors=False,
) -> None:
"""Simplify glyphs in TTFont by merging overlapping contours.
Overlapping components are first decomposed to simple contours, then merged.
Currently this only works with TrueType fonts with 'glyf' table.
Raises NotImplementedError if 'glyf' table is absent.
Note that removing overlaps invalidates the hinting. By default we drop hinting
from all glyphs whether or not overlaps are removed from a given one, as it would
look weird if only some glyphs are left (un)hinted.
Args:
font: input TTFont object, modified in place.
glyphNames: optional iterable of glyph names (str) to remove overlaps from.
By default, all glyphs in the font are processed.
removeHinting (bool): set to False to keep hinting for unmodified glyphs.
ignoreErrors (bool): set to True to ignore errors while removing overlaps,
thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363).
"""
try:
glyfTable = font["glyf"]
except KeyError:
raise NotImplementedError("removeOverlaps currently only works with TTFs")
hmtxTable = font["hmtx"]
# wraps the underlying glyf Glyphs, takes care of interfacing with drawing pens
glyphSet = font.getGlyphSet()
if glyphNames is None:
glyphNames = font.getGlyphOrder()
# process all simple glyphs first, then composites with increasing component depth,
# so that by the time we test for component intersections the respective base glyphs
# have already been simplified
glyphNames = sorted(
glyphNames,
key=lambda name: (
glyfTable[name].getCompositeMaxpValues(glyfTable).maxComponentDepth
if glyfTable[name].isComposite()
else 0,
name,
),
)
modified = set()
for glyphName in glyphNames:
try:
if removeTTGlyphOverlaps(
glyphName, glyphSet, glyfTable, hmtxTable, removeHinting
):
modified.add(glyphName)
except RemoveOverlapsError:
if not ignoreErrors:
raise
log.error("Failed to remove overlaps for '%s'", glyphName)
log.debug("Removed overlaps for %s glyphs:\n%s", len(modified), " ".join(modified)) | Simplify glyphs in TTFont by merging overlapping contours. Overlapping components are first decomposed to simple contours, then merged. Currently this only works with TrueType fonts with 'glyf' table. Raises NotImplementedError if 'glyf' table is absent. Note that removing overlaps invalidates the hinting. By default we drop hinting from all glyphs whether or not overlaps are removed from a given one, as it would look weird if only some glyphs are left (un)hinted. Args: font: input TTFont object, modified in place. glyphNames: optional iterable of glyph names (str) to remove overlaps from. By default, all glyphs in the font are processed. removeHinting (bool): set to False to keep hinting for unmodified glyphs. ignoreErrors (bool): set to True to ignore errors while removing overlaps, thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363). |
175,392 | from abc import ABC, abstractmethod
from collections.abc import Mapping
from contextlib import contextmanager
from copy import copy
from types import SimpleNamespace
from fontTools.misc.fixedTools import otRound
from fontTools.misc.loggingTools import deprecateFunction
from fontTools.misc.transform import Transform
from fontTools.pens.transformPen import TransformPen, TransformPointPen
def copy(x: _T) -> _T:
def _setCoordinates(glyph, coord, glyfTable):
# Handle phantom points for (left, right, top, bottom) positions.
assert len(coord) >= 4
leftSideX = coord[-4][0]
rightSideX = coord[-3][0]
topSideY = coord[-2][1]
bottomSideY = coord[-1][1]
for _ in range(4):
del coord[-1]
if glyph.isComposite():
assert len(coord) == len(glyph.components)
glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
for p, comp in zip(coord, glyph.components):
if hasattr(comp, "x"):
comp.x, comp.y = p
elif glyph.isVarComposite():
glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
for comp in glyph.components:
coord = comp.setCoordinates(coord)
assert not coord
elif glyph.numberOfContours == 0:
assert len(coord) == 0
else:
assert len(coord) == len(glyph.coordinates)
glyph.coordinates = coord
glyph.recalcBounds(glyfTable)
horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
verticalAdvanceWidth = otRound(topSideY - bottomSideY)
leftSideBearing = otRound(glyph.xMin - leftSideX)
topSideBearing = otRound(topSideY - glyph.yMax)
return (
horizontalAdvanceWidth,
leftSideBearing,
verticalAdvanceWidth,
topSideBearing,
) | null |
175,393 | from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
otRound,
)
from fontTools.misc.textTools import safeEval
import array
from collections import Counter, defaultdict
import io
import logging
import struct
import sys
class TupleVariation(object):
def __init__(self, axes, coordinates):
self.axes = axes.copy()
self.coordinates = list(coordinates)
def __repr__(self):
axes = ",".join(
sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
)
return "<TupleVariation %s %s>" % (axes, self.coordinates)
def __eq__(self, other):
return self.coordinates == other.coordinates and self.axes == other.axes
def getUsedPoints(self):
# Empty set means "all points used".
if None not in self.coordinates:
return frozenset()
used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
# Return None if no points used.
return used if used else None
def hasImpact(self):
"""Returns True if this TupleVariation has any visible impact.
If the result is False, the TupleVariation can be omitted from the font
without making any visible difference.
"""
return any(c is not None for c in self.coordinates)
def toXML(self, writer, axisTags):
writer.begintag("tuple")
writer.newline()
for axis in axisTags:
value = self.axes.get(axis)
if value is not None:
minValue, value, maxValue = value
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
if minValue == defaultMinValue and maxValue == defaultMaxValue:
writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
else:
attrs = [
("axis", axis),
("min", fl2str(minValue, 14)),
("value", fl2str(value, 14)),
("max", fl2str(maxValue, 14)),
]
writer.simpletag("coord", attrs)
writer.newline()
wrote_any_deltas = False
for i, delta in enumerate(self.coordinates):
if type(delta) == tuple and len(delta) == 2:
writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
writer.newline()
wrote_any_deltas = True
elif type(delta) == int:
writer.simpletag("delta", cvt=i, value=delta)
writer.newline()
wrote_any_deltas = True
elif delta is not None:
log.error("bad delta format")
writer.comment("bad delta #%d" % i)
writer.newline()
wrote_any_deltas = True
if not wrote_any_deltas:
writer.comment("no deltas")
writer.newline()
writer.endtag("tuple")
writer.newline()
def fromXML(self, name, attrs, _content):
if name == "coord":
axis = attrs["axis"]
value = str2fl(attrs["value"], 14)
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
minValue = str2fl(attrs.get("min", defaultMinValue), 14)
maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
self.axes[axis] = (minValue, value, maxValue)
elif name == "delta":
if "pt" in attrs:
point = safeEval(attrs["pt"])
x = safeEval(attrs["x"])
y = safeEval(attrs["y"])
self.coordinates[point] = (x, y)
elif "cvt" in attrs:
cvt = safeEval(attrs["cvt"])
value = safeEval(attrs["value"])
self.coordinates[cvt] = value
else:
log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
assert set(self.axes.keys()) <= set(axisTags), (
"Unknown axis tag found.",
self.axes.keys(),
axisTags,
)
tupleData = []
auxData = []
if pointData is None:
usedPoints = self.getUsedPoints()
if usedPoints is None: # Nothing to encode
return b"", b""
pointData = self.compilePoints(usedPoints)
coord = self.compileCoord(axisTags)
flags = sharedCoordIndices.get(coord)
if flags is None:
flags = EMBEDDED_PEAK_TUPLE
tupleData.append(coord)
intermediateCoord = self.compileIntermediateCoord(axisTags)
if intermediateCoord is not None:
flags |= INTERMEDIATE_REGION
tupleData.append(intermediateCoord)
# pointData of b'' implies "use shared points".
if pointData:
flags |= PRIVATE_POINT_NUMBERS
auxData.append(pointData)
auxData.append(self.compileDeltas())
auxData = b"".join(auxData)
tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
return b"".join(tupleData), auxData
def compileCoord(self, axisTags):
result = bytearray()
axes = self.axes
for axis in axisTags:
triple = axes.get(axis)
if triple is None:
result.extend(b"\0\0")
else:
result.extend(struct.pack(">h", fl2fi(triple[1], 14)))
return bytes(result)
def compileIntermediateCoord(self, axisTags):
needed = False
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
needed = True
break
if not needed:
return None
minCoords = bytearray()
maxCoords = bytearray()
for axis in axisTags:
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
minCoords.extend(struct.pack(">h", fl2fi(minValue, 14)))
maxCoords.extend(struct.pack(">h", fl2fi(maxValue, 14)))
return minCoords + maxCoords
def decompileCoord_(axisTags, data, offset):
coord = {}
pos = offset
for axis in axisTags:
coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
pos += 2
return coord, pos
def compilePoints(points):
# If the set consists of all points in the glyph, it gets encoded with
# a special encoding: a single zero byte.
#
# To use this optimization, points passed in must be empty set.
# The following two lines are not strictly necessary as the main code
# below would emit the same. But this is most common and faster.
if not points:
return b"\0"
# In the 'gvar' table, the packing of point numbers is a little surprising.
# It consists of multiple runs, each being a delta-encoded list of integers.
# For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
# [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
# There are two types of runs, with values being either 8 or 16 bit unsigned
# integers.
points = list(points)
points.sort()
numPoints = len(points)
result = bytearray()
# The binary representation starts with the total number of points in the set,
# encoded into one or two bytes depending on the value.
if numPoints < 0x80:
result.append(numPoints)
else:
result.append((numPoints >> 8) | 0x80)
result.append(numPoints & 0xFF)
MAX_RUN_LENGTH = 127
pos = 0
lastValue = 0
while pos < numPoints:
runLength = 0
headerPos = len(result)
result.append(0)
useByteEncoding = None
while pos < numPoints and runLength <= MAX_RUN_LENGTH:
curValue = points[pos]
delta = curValue - lastValue
if useByteEncoding is None:
useByteEncoding = 0 <= delta <= 0xFF
if useByteEncoding and (delta > 0xFF or delta < 0):
# we need to start a new run (which will not use byte encoding)
break
# TODO This never switches back to a byte-encoding from a short-encoding.
# That's suboptimal.
if useByteEncoding:
result.append(delta)
else:
result.append(delta >> 8)
result.append(delta & 0xFF)
lastValue = curValue
pos += 1
runLength += 1
if useByteEncoding:
result[headerPos] = runLength - 1
else:
result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
return result
def decompilePoints_(numPoints, data, offset, tableTag):
"""(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
assert tableTag in ("cvar", "gvar")
pos = offset
numPointsInData = data[pos]
pos += 1
if (numPointsInData & POINTS_ARE_WORDS) != 0:
numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
pos += 1
if numPointsInData == 0:
return (range(numPoints), pos)
result = []
while len(result) < numPointsInData:
runHeader = data[pos]
pos += 1
numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
point = 0
if (runHeader & POINTS_ARE_WORDS) != 0:
points = array.array("H")
pointsSize = numPointsInRun * 2
else:
points = array.array("B")
pointsSize = numPointsInRun
points.frombytes(data[pos : pos + pointsSize])
if sys.byteorder != "big":
points.byteswap()
assert len(points) == numPointsInRun
pos += pointsSize
result.extend(points)
# Convert relative to absolute
absolute = []
current = 0
for delta in result:
current += delta
absolute.append(current)
result = absolute
del absolute
badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
if badPoints:
log.warning(
"point %s out of range in '%s' table"
% (",".join(sorted(badPoints)), tableTag)
)
return (result, pos)
def compileDeltas(self):
deltaX = []
deltaY = []
if self.getCoordWidth() == 2:
for c in self.coordinates:
if c is None:
continue
deltaX.append(c[0])
deltaY.append(c[1])
else:
for c in self.coordinates:
if c is None:
continue
deltaX.append(c)
bytearr = bytearray()
self.compileDeltaValues_(deltaX, bytearr)
self.compileDeltaValues_(deltaY, bytearr)
return bytearr
def compileDeltaValues_(deltas, bytearr=None):
"""[value1, value2, value3, ...] --> bytearray
Emits a sequence of runs. Each run starts with a
byte-sized header whose 6 least significant bits
(header & 0x3F) indicate how many values are encoded
in this run. The stored length is the actual length
minus one; run lengths are thus in the range [1..64].
If the header byte has its most significant bit (0x80)
set, all values in this run are zero, and no data
follows. Otherwise, the header byte is followed by
((header & 0x3F) + 1) signed values. If (header &
0x40) is clear, the delta values are stored as signed
bytes; if (header & 0x40) is set, the delta values are
signed 16-bit integers.
""" # Explaining the format because the 'gvar' spec is hard to understand.
if bytearr is None:
bytearr = bytearray()
pos = 0
numDeltas = len(deltas)
while pos < numDeltas:
value = deltas[pos]
if value == 0:
pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
elif -128 <= value <= 127:
pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
else:
pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
return bytearr
def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
while pos < numDeltas and deltas[pos] == 0:
pos += 1
runLength = pos - offset
while runLength >= 64:
bytearr.append(DELTAS_ARE_ZERO | 63)
runLength -= 64
if runLength:
bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
return pos
def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
while pos < numDeltas:
value = deltas[pos]
if not (-128 <= value <= 127):
break
# Within a byte-encoded run of deltas, a single zero
# is best stored literally as 0x00 value. However,
# if are two or more zeroes in a sequence, it is
# better to start a new run. For example, the sequence
# of deltas [15, 15, 0, 15, 15] becomes 6 bytes
# (04 0F 0F 00 0F 0F) when storing the zero value
# literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
# when starting a new run.
if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0:
break
pos += 1
runLength = pos - offset
while runLength >= 64:
bytearr.append(63)
bytearr.extend(array.array("b", deltas[offset : offset + 64]))
offset += 64
runLength -= 64
if runLength:
bytearr.append(runLength - 1)
bytearr.extend(array.array("b", deltas[offset:pos]))
return pos
def encodeDeltaRunAsWords_(deltas, offset, bytearr):
pos = offset
numDeltas = len(deltas)
while pos < numDeltas:
value = deltas[pos]
# Within a word-encoded run of deltas, it is easiest
# to start a new run (with a different encoding)
# whenever we encounter a zero value. For example,
# the sequence [0x6666, 0, 0x7777] needs 7 bytes when
# storing the zero literally (42 66 66 00 00 77 77),
# and equally 7 bytes when starting a new run
# (40 66 66 80 40 77 77).
if value == 0:
break
# Within a word-encoded run of deltas, a single value
# in the range (-128..127) should be encoded literally
# because it is more compact. For example, the sequence
# [0x6666, 2, 0x7777] becomes 7 bytes when storing
# the value literally (42 66 66 00 02 77 77), but 8 bytes
# when starting a new run (40 66 66 00 02 40 77 77).
if (
(-128 <= value <= 127)
and pos + 1 < numDeltas
and (-128 <= deltas[pos + 1] <= 127)
):
break
pos += 1
runLength = pos - offset
while runLength >= 64:
bytearr.append(DELTAS_ARE_WORDS | 63)
a = array.array("h", deltas[offset : offset + 64])
if sys.byteorder != "big":
a.byteswap()
bytearr.extend(a)
offset += 64
runLength -= 64
if runLength:
bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
a = array.array("h", deltas[offset:pos])
if sys.byteorder != "big":
a.byteswap()
bytearr.extend(a)
return pos
def decompileDeltas_(numDeltas, data, offset):
"""(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
result = []
pos = offset
while len(result) < numDeltas:
runHeader = data[pos]
pos += 1
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
if (runHeader & DELTAS_ARE_ZERO) != 0:
result.extend([0] * numDeltasInRun)
else:
if (runHeader & DELTAS_ARE_WORDS) != 0:
deltas = array.array("h")
deltasSize = numDeltasInRun * 2
else:
deltas = array.array("b")
deltasSize = numDeltasInRun
deltas.frombytes(data[pos : pos + deltasSize])
if sys.byteorder != "big":
deltas.byteswap()
assert len(deltas) == numDeltasInRun
pos += deltasSize
result.extend(deltas)
assert len(result) == numDeltas
return (result, pos)
def getTupleSize_(flags, axisCount):
size = 4
if (flags & EMBEDDED_PEAK_TUPLE) != 0:
size += axisCount * 2
if (flags & INTERMEDIATE_REGION) != 0:
size += axisCount * 4
return size
def getCoordWidth(self):
"""Return 2 if coordinates are (x, y) as in gvar, 1 if single values
as in cvar, or 0 if empty.
"""
firstDelta = next((c for c in self.coordinates if c is not None), None)
if firstDelta is None:
return 0 # empty or has no impact
if type(firstDelta) in (int, float):
return 1
if type(firstDelta) is tuple and len(firstDelta) == 2:
return 2
raise TypeError(
"invalid type of delta; expected (int or float) number, or "
"Tuple[number, number]: %r" % firstDelta
)
def scaleDeltas(self, scalar):
if scalar == 1.0:
return # no change
coordWidth = self.getCoordWidth()
self.coordinates = [
None
if d is None
else d * scalar
if coordWidth == 1
else (d[0] * scalar, d[1] * scalar)
for d in self.coordinates
]
def roundDeltas(self):
coordWidth = self.getCoordWidth()
self.coordinates = [
None
if d is None
else otRound(d)
if coordWidth == 1
else (otRound(d[0]), otRound(d[1]))
for d in self.coordinates
]
def calcInferredDeltas(self, origCoords, endPts):
from fontTools.varLib.iup import iup_delta
if self.getCoordWidth() == 1:
raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
if None in self.coordinates:
if len(self.coordinates) != len(origCoords):
raise ValueError(
"Expected len(origCoords) == %d; found %d"
% (len(self.coordinates), len(origCoords))
)
self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
from fontTools.varLib.iup import iup_delta_optimize
if None in self.coordinates:
return # already optimized
deltaOpt = iup_delta_optimize(
self.coordinates, origCoords, endPts, tolerance=tolerance
)
if None in deltaOpt:
if isComposite and all(d is None for d in deltaOpt):
# Fix for macOS composites
# https://github.com/fonttools/fonttools/issues/1381
deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
# Use "optimized" version only if smaller...
varOpt = TupleVariation(self.axes, deltaOpt)
# Shouldn't matter that this is different from fvar...?
axisTags = sorted(self.axes.keys())
tupleData, auxData = self.compile(axisTags)
unoptimizedLength = len(tupleData) + len(auxData)
tupleData, auxData = varOpt.compile(axisTags)
optimizedLength = len(tupleData) + len(auxData)
if optimizedLength < unoptimizedLength:
self.coordinates = varOpt.coordinates
def __imul__(self, scalar):
self.scaleDeltas(scalar)
return self
def __iadd__(self, other):
if not isinstance(other, TupleVariation):
return NotImplemented
deltas1 = self.coordinates
length = len(deltas1)
deltas2 = other.coordinates
if len(deltas2) != length:
raise ValueError("cannot sum TupleVariation deltas with different lengths")
# 'None' values have different meanings in gvar vs cvar TupleVariations:
# within the gvar, when deltas are not provided explicitly for some points,
# they need to be inferred; whereas for the 'cvar' table, if deltas are not
# provided for some CVT values, then no adjustments are made (i.e. None == 0).
# Thus, we cannot sum deltas for gvar TupleVariations if they contain
# inferred inferred deltas (the latter need to be computed first using
# 'calcInferredDeltas' method), but we can treat 'None' values in cvar
# deltas as if they are zeros.
if self.getCoordWidth() == 2:
for i, d2 in zip(range(length), deltas2):
d1 = deltas1[i]
try:
deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
except TypeError:
raise ValueError("cannot sum gvar deltas with inferred points")
else:
for i, d2 in zip(range(length), deltas2):
d1 = deltas1[i]
if d1 is not None and d2 is not None:
deltas1[i] = d1 + d2
elif d1 is None and d2 is not None:
deltas1[i] = d2
# elif d2 is None do nothing
return self
def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
result = []
for _ in range(sharedTupleCount):
t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
result.append(t)
return result | null |
175,394 | from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
otRound,
)
from fontTools.misc.textTools import safeEval
import array
from collections import Counter, defaultdict
import io
import logging
import struct
import sys
TUPLE_INDEX_MASK = 0x0FFF
class Counter(Dict[_T, int], Generic[_T]):
def __init__(self, **kwargs: int) -> None: ...
def __init__(self, mapping: Mapping[_T, int]) -> None: ...
def __init__(self, iterable: Iterable[_T]) -> None: ...
def copy(self: _S) -> _S: ...
def elements(self) -> Iterator[_T]: ...
def most_common(self, n: Optional[int] = ...) -> List[Tuple[_T, int]]: ...
def subtract(self, __mapping: Mapping[_T, int]) -> None: ...
def subtract(self, iterable: Iterable[_T]) -> None: ...
# The Iterable[Tuple[...]] argument type is not actually desirable
# (the tuples will be added as keys, breaking type safety) but
# it's included so that the signature is compatible with
# Dict.update. Not sure if we should use '# type: ignore' instead
# and omit the type from the union.
def update(self, __m: Mapping[_T, int], **kwargs: int) -> None: ...
def update(self, __m: Union[Iterable[_T], Iterable[Tuple[_T, int]]], **kwargs: int) -> None: ...
def update(self, **kwargs: int) -> None: ...
def __add__(self, other: Counter[_T]) -> Counter[_T]: ...
def __sub__(self, other: Counter[_T]) -> Counter[_T]: ...
def __and__(self, other: Counter[_T]) -> Counter[_T]: ...
def __or__(self, other: Counter[_T]) -> Counter[_T]: ...
def __iadd__(self, other: Counter[_T]) -> Counter[_T]: ...
def __isub__(self, other: Counter[_T]) -> Counter[_T]: ...
def __iand__(self, other: Counter[_T]) -> Counter[_T]: ...
def __ior__(self, other: Counter[_T]) -> Counter[_T]: ...
def compileSharedTuples(
axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
):
coordCount = Counter()
for var in variations:
coord = var.compileCoord(axisTags)
coordCount[coord] += 1
# In python < 3.7, most_common() ordering is non-deterministic
# so apply a sort to make sure the ordering is consistent.
sharedCoords = sorted(
coordCount.most_common(MAX_NUM_SHARED_COORDS),
key=lambda item: (-item[1], item[0]),
)
return [c[0] for c in sharedCoords if c[1] > 1] | null |
175,395 | from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import logging
def _make_map(font, chars, gids):
assert len(chars) == len(gids)
glyphNames = font.getGlyphNameMany(gids)
cmap = {}
for char, gid, name in zip(chars, gids, glyphNames):
if gid == 0:
continue
cmap[char] = name
return cmap | null |
175,396 | from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import logging
def splitRange(startCode, endCode, cmap):
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently. I can't prove I've got the optimal solution, but it seems
# to do well with the fonts I tested: none became bigger, many became smaller.
if startCode == endCode:
return [], [endCode]
lastID = cmap[startCode]
lastCode = startCode
inOrder = None
orderedBegin = None
subRanges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break # the whole range, we're fine
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i - 1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i - 1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end | null |
175,397 | from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import logging
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
(val,) = struct.unpack(">L", data)
return val | null |
175,398 | from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import logging
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:] | null |
175,399 | from fontTools.misc import sstruct
from fontTools.misc.roundTools import otRound
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.ttLib.tables import DefaultTable
import bisect
import logging
OS2_UNICODE_RANGES = (
(("Basic Latin", (0x0000, 0x007F)),),
(("Latin-1 Supplement", (0x0080, 0x00FF)),),
(("Latin Extended-A", (0x0100, 0x017F)),),
(("Latin Extended-B", (0x0180, 0x024F)),),
(
("IPA Extensions", (0x0250, 0x02AF)),
("Phonetic Extensions", (0x1D00, 0x1D7F)),
("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)),
),
(
("Spacing Modifier Letters", (0x02B0, 0x02FF)),
("Modifier Tone Letters", (0xA700, 0xA71F)),
),
(
("Combining Diacritical Marks", (0x0300, 0x036F)),
("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)),
),
(("Greek and Coptic", (0x0370, 0x03FF)),),
(("Coptic", (0x2C80, 0x2CFF)),),
(
("Cyrillic", (0x0400, 0x04FF)),
("Cyrillic Supplement", (0x0500, 0x052F)),
("Cyrillic Extended-A", (0x2DE0, 0x2DFF)),
("Cyrillic Extended-B", (0xA640, 0xA69F)),
),
(("Armenian", (0x0530, 0x058F)),),
(("Hebrew", (0x0590, 0x05FF)),),
(("Vai", (0xA500, 0xA63F)),),
(("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))),
(("NKo", (0x07C0, 0x07FF)),),
(("Devanagari", (0x0900, 0x097F)),),
(("Bengali", (0x0980, 0x09FF)),),
(("Gurmukhi", (0x0A00, 0x0A7F)),),
(("Gujarati", (0x0A80, 0x0AFF)),),
(("Oriya", (0x0B00, 0x0B7F)),),
(("Tamil", (0x0B80, 0x0BFF)),),
(("Telugu", (0x0C00, 0x0C7F)),),
(("Kannada", (0x0C80, 0x0CFF)),),
(("Malayalam", (0x0D00, 0x0D7F)),),
(("Thai", (0x0E00, 0x0E7F)),),
(("Lao", (0x0E80, 0x0EFF)),),
(("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))),
(("Balinese", (0x1B00, 0x1B7F)),),
(("Hangul Jamo", (0x1100, 0x11FF)),),
(
("Latin Extended Additional", (0x1E00, 0x1EFF)),
("Latin Extended-C", (0x2C60, 0x2C7F)),
("Latin Extended-D", (0xA720, 0xA7FF)),
),
(("Greek Extended", (0x1F00, 0x1FFF)),),
(
("General Punctuation", (0x2000, 0x206F)),
("Supplemental Punctuation", (0x2E00, 0x2E7F)),
),
(("Superscripts And Subscripts", (0x2070, 0x209F)),),
(("Currency Symbols", (0x20A0, 0x20CF)),),
(("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),),
(("Letterlike Symbols", (0x2100, 0x214F)),),
(("Number Forms", (0x2150, 0x218F)),),
(
("Arrows", (0x2190, 0x21FF)),
("Supplemental Arrows-A", (0x27F0, 0x27FF)),
("Supplemental Arrows-B", (0x2900, 0x297F)),
("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)),
),
(
("Mathematical Operators", (0x2200, 0x22FF)),
("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)),
("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)),
("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)),
),
(("Miscellaneous Technical", (0x2300, 0x23FF)),),
(("Control Pictures", (0x2400, 0x243F)),),
(("Optical Character Recognition", (0x2440, 0x245F)),),
(("Enclosed Alphanumerics", (0x2460, 0x24FF)),),
(("Box Drawing", (0x2500, 0x257F)),),
(("Block Elements", (0x2580, 0x259F)),),
(("Geometric Shapes", (0x25A0, 0x25FF)),),
(("Miscellaneous Symbols", (0x2600, 0x26FF)),),
(("Dingbats", (0x2700, 0x27BF)),),
(("CJK Symbols And Punctuation", (0x3000, 0x303F)),),
(("Hiragana", (0x3040, 0x309F)),),
(
("Katakana", (0x30A0, 0x30FF)),
("Katakana Phonetic Extensions", (0x31F0, 0x31FF)),
),
(("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))),
(("Hangul Compatibility Jamo", (0x3130, 0x318F)),),
(("Phags-pa", (0xA840, 0xA87F)),),
(("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),),
(("CJK Compatibility", (0x3300, 0x33FF)),),
(("Hangul Syllables", (0xAC00, 0xD7AF)),),
(("Non-Plane 0 *", (0xD800, 0xDFFF)),),
(("Phoenician", (0x10900, 0x1091F)),),
(
("CJK Unified Ideographs", (0x4E00, 0x9FFF)),
("CJK Radicals Supplement", (0x2E80, 0x2EFF)),
("Kangxi Radicals", (0x2F00, 0x2FDF)),
("Ideographic Description Characters", (0x2FF0, 0x2FFF)),
("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)),
("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)),
("Kanbun", (0x3190, 0x319F)),
),
(("Private Use Area (plane 0)", (0xE000, 0xF8FF)),),
(
("CJK Strokes", (0x31C0, 0x31EF)),
("CJK Compatibility Ideographs", (0xF900, 0xFAFF)),
("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)),
),
(("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),),
(("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),),
(("Combining Half Marks", (0xFE20, 0xFE2F)),),
(
("Vertical Forms", (0xFE10, 0xFE1F)),
("CJK Compatibility Forms", (0xFE30, 0xFE4F)),
),
(("Small Form Variants", (0xFE50, 0xFE6F)),),
(("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),),
(("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),),
(("Specials", (0xFFF0, 0xFFFF)),),
(("Tibetan", (0x0F00, 0x0FFF)),),
(("Syriac", (0x0700, 0x074F)),),
(("Thaana", (0x0780, 0x07BF)),),
(("Sinhala", (0x0D80, 0x0DFF)),),
(("Myanmar", (0x1000, 0x109F)),),
(
("Ethiopic", (0x1200, 0x137F)),
("Ethiopic Supplement", (0x1380, 0x139F)),
("Ethiopic Extended", (0x2D80, 0x2DDF)),
),
(("Cherokee", (0x13A0, 0x13FF)),),
(("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),),
(("Ogham", (0x1680, 0x169F)),),
(("Runic", (0x16A0, 0x16FF)),),
(("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))),
(("Mongolian", (0x1800, 0x18AF)),),
(("Braille Patterns", (0x2800, 0x28FF)),),
(("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))),
(
("Tagalog", (0x1700, 0x171F)),
("Hanunoo", (0x1720, 0x173F)),
("Buhid", (0x1740, 0x175F)),
("Tagbanwa", (0x1760, 0x177F)),
),
(("Old Italic", (0x10300, 0x1032F)),),
(("Gothic", (0x10330, 0x1034F)),),
(("Deseret", (0x10400, 0x1044F)),),
(
("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)),
("Musical Symbols", (0x1D100, 0x1D1FF)),
("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)),
),
(("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),),
(
("Private Use (plane 15)", (0xF0000, 0xFFFFD)),
("Private Use (plane 16)", (0x100000, 0x10FFFD)),
),
(
("Variation Selectors", (0xFE00, 0xFE0F)),
("Variation Selectors Supplement", (0xE0100, 0xE01EF)),
),
(("Tags", (0xE0000, 0xE007F)),),
(("Limbu", (0x1900, 0x194F)),),
(("Tai Le", (0x1950, 0x197F)),),
(("New Tai Lue", (0x1980, 0x19DF)),),
(("Buginese", (0x1A00, 0x1A1F)),),
(("Glagolitic", (0x2C00, 0x2C5F)),),
(("Tifinagh", (0x2D30, 0x2D7F)),),
(("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),),
(("Syloti Nagri", (0xA800, 0xA82F)),),
(
("Linear B Syllabary", (0x10000, 0x1007F)),
("Linear B Ideograms", (0x10080, 0x100FF)),
("Aegean Numbers", (0x10100, 0x1013F)),
),
(("Ancient Greek Numbers", (0x10140, 0x1018F)),),
(("Ugaritic", (0x10380, 0x1039F)),),
(("Old Persian", (0x103A0, 0x103DF)),),
(("Shavian", (0x10450, 0x1047F)),),
(("Osmanya", (0x10480, 0x104AF)),),
(("Cypriot Syllabary", (0x10800, 0x1083F)),),
(("Kharoshthi", (0x10A00, 0x10A5F)),),
(("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),),
(
("Cuneiform", (0x12000, 0x123FF)),
("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)),
),
(("Counting Rod Numerals", (0x1D360, 0x1D37F)),),
(("Sundanese", (0x1B80, 0x1BBF)),),
(("Lepcha", (0x1C00, 0x1C4F)),),
(("Ol Chiki", (0x1C50, 0x1C7F)),),
(("Saurashtra", (0xA880, 0xA8DF)),),
(("Kayah Li", (0xA900, 0xA92F)),),
(("Rejang", (0xA930, 0xA95F)),),
(("Cham", (0xAA00, 0xAA5F)),),
(("Ancient Symbols", (0x10190, 0x101CF)),),
(("Phaistos Disc", (0x101D0, 0x101FF)),),
(
("Carian", (0x102A0, 0x102DF)),
("Lycian", (0x10280, 0x1029F)),
("Lydian", (0x10920, 0x1093F)),
),
(("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))),
)
def _getUnicodeRanges():
# build the ranges of codepoints for each unicode range bit, and cache result
if not _unicodeStarts:
unicodeRanges = [
(start, (stop, bit))
for bit, blocks in enumerate(OS2_UNICODE_RANGES)
for _, (start, stop) in blocks
]
for start, (stop, bit) in sorted(unicodeRanges):
_unicodeStarts.append(start)
_unicodeValues.append((stop, bit))
return _unicodeStarts, _unicodeValues
The provided code snippet includes necessary dependencies for implementing the `intersectUnicodeRanges` function. Write a Python function `def intersectUnicodeRanges(unicodes, inverse=False)` to solve the following problem:
Intersect a sequence of (int) Unicode codepoints with the Unicode block ranges defined in the OpenType specification v1.7, and return the set of 'ulUnicodeRanges' bits for which there is at least ONE intersection. If 'inverse' is True, return the the bits for which there is NO intersection. >>> intersectUnicodeRanges([0x0410]) == {9} True >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122} True >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == ( ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122}) True
Here is the function:
def intersectUnicodeRanges(unicodes, inverse=False):
"""Intersect a sequence of (int) Unicode codepoints with the Unicode block
ranges defined in the OpenType specification v1.7, and return the set of
'ulUnicodeRanges' bits for which there is at least ONE intersection.
If 'inverse' is True, return the the bits for which there is NO intersection.
>>> intersectUnicodeRanges([0x0410]) == {9}
True
>>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
True
>>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
True
"""
unicodes = set(unicodes)
unicodestarts, unicodevalues = _getUnicodeRanges()
bits = set()
for code in unicodes:
stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
if code <= stop:
bits.add(bit)
# The spec says that bit 57 ("Non Plane 0") implies that there's
# at least one codepoint beyond the BMP; so I also include all
# the non-BMP codepoints here
if any(0x10000 <= code < 0x110000 for code in unicodes):
bits.add(57)
return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits | Intersect a sequence of (int) Unicode codepoints with the Unicode block ranges defined in the OpenType specification v1.7, and return the set of 'ulUnicodeRanges' bits for which there is at least ONE intersection. If 'inverse' is True, return the the bits for which there is NO intersection. >>> intersectUnicodeRanges([0x0410]) == {9} True >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122} True >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == ( ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122}) True |
175,400 | from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval
from fontTools.misc import sstruct
from . import DefaultTable
import base64
def tostr(s, encoding="ascii", errors="strict"):
if not isinstance(s, str):
return s.decode(encoding, errors)
else:
return s
def strjoin(iterable, joiner=""):
return tostr(joiner).join(iterable)
def b64encode(b):
s = base64.b64encode(b)
# Line-break at 76 chars.
items = []
while s:
items.append(tostr(s[:76]))
items.append("\n")
s = s[76:]
return strjoin(items) | null |
175,401 | from fontTools.config import OPTIONS
from fontTools.misc.textTools import Tag, bytesjoin
from .DefaultTable import DefaultTable
from enum import IntEnum
import sys
import array
import struct
import logging
from functools import lru_cache
from typing import Iterator, NamedTuple, Optional, Tuple
def packUInt8(value):
return struct.pack(">B", value) | null |
175,402 | from fontTools.config import OPTIONS
from fontTools.misc.textTools import Tag, bytesjoin
from .DefaultTable import DefaultTable
from enum import IntEnum
import sys
import array
import struct
import logging
from functools import lru_cache
from typing import Iterator, NamedTuple, Optional, Tuple
def packUShort(value):
return struct.pack(">H", value) | null |
175,403 | from fontTools.config import OPTIONS
from fontTools.misc.textTools import Tag, bytesjoin
from .DefaultTable import DefaultTable
from enum import IntEnum
import sys
import array
import struct
import logging
from functools import lru_cache
from typing import Iterator, NamedTuple, Optional, Tuple
def packULong(value):
assert 0 <= value < 0x100000000, value
return struct.pack(">I", value) | null |
175,404 | from fontTools.config import OPTIONS
from fontTools.misc.textTools import Tag, bytesjoin
from .DefaultTable import DefaultTable
from enum import IntEnum
import sys
import array
import struct
import logging
from functools import lru_cache
from typing import Iterator, NamedTuple, Optional, Tuple
def packUInt24(value):
assert 0 <= value < 0x1000000, value
return struct.pack(">I", value)[1:] | null |
175,405 | from fontTools.config import OPTIONS
from fontTools.misc.textTools import Tag, bytesjoin
from .DefaultTable import DefaultTable
from enum import IntEnum
import sys
import array
import struct
import logging
from functools import lru_cache
from typing import Iterator, NamedTuple, Optional, Tuple
valueRecordFormat = [
# Mask Name isDevice signed
(0x0001, "XPlacement", 0, 1),
(0x0002, "YPlacement", 0, 1),
(0x0004, "XAdvance", 0, 1),
(0x0008, "YAdvance", 0, 1),
(0x0010, "XPlaDevice", 1, 0),
(0x0020, "YPlaDevice", 1, 0),
(0x0040, "XAdvDevice", 1, 0),
(0x0080, "YAdvDevice", 1, 0),
# reserved:
(0x0100, "Reserved1", 0, 0),
(0x0200, "Reserved2", 0, 0),
(0x0400, "Reserved3", 0, 0),
(0x0800, "Reserved4", 0, 0),
(0x1000, "Reserved5", 0, 0),
(0x2000, "Reserved6", 0, 0),
(0x4000, "Reserved7", 0, 0),
(0x8000, "Reserved8", 0, 0),
]
def _buildDict():
d = {}
for mask, name, isDevice, signed in valueRecordFormat:
d[name] = mask, isDevice, signed
return d | null |
175,406 | from fontTools.misc import sstruct
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
import pdb
import struct
METALabelDict = {
0: "MojikumiX4051", # An integer in the range 1-20
1: "UNIUnifiedBaseChars",
2: "BaseFontName",
3: "Language",
4: "CreationDate",
5: "FoundryName",
6: "FoundryCopyright",
7: "OwnerURI",
8: "WritingScript",
10: "StrokeCount",
11: "IndexingRadical",
}
def getLabelString(labelID):
try:
label = METALabelDict[labelID]
except KeyError:
label = "Unknown label"
return str(label) | null |
175,407 | from fontTools.misc import sstruct
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
import pdb
import struct
def byteord(c):
return c if isinstance(c, int) else ord(c)
def mapXMLToUTF8(string):
uString = str()
strLen = len(string)
i = 0
while i < strLen:
prefixLen = 0
if string[i : i + 3] == "&#x":
prefixLen = 3
elif string[i : i + 7] == "&#x":
prefixLen = 7
if prefixLen:
i = i + prefixLen
j = i
while string[i] != ";":
i = i + 1
valStr = string[j:i]
uString = uString + chr(eval("0x" + valStr))
else:
uString = uString + chr(byteord(string[i]))
i = i + 1
return uString.encode("utf_8") | null |
175,408 | from fontTools.misc import sstruct
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
import pdb
import struct
def mapUTF8toXML(string):
uString = string.decode("utf_8")
string = ""
for uChar in uString:
i = ord(uChar)
if (i < 0x80) and (i > 0x1F):
string = string + uChar
else:
string = string + "&#x" + hex(i)[2:] + ";"
return string | null |
175,409 | from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import bytesjoin, safeEval
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
import struct
import itertools
from collections import deque
import logging
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompiling doesn't work for this class...
# if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == "__":
raise AttributeError(attr)
if attr == "data":
raise AttributeError(attr)
self.decompile()
return getattr(self, attr)
def ensureDecompiled(self, recurse=False):
if hasattr(self, "data"):
self.decompile()
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(
indexSubHeaderFormat,
self.indexFormat,
self.imageFormat,
self.imageDataOffset,
)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(
self.__class__.__name__,
[
("imageFormat", self.imageFormat),
("firstGlyphIndex", self.firstGlyphIndex),
("lastGlyphIndex", self.lastGlyphIndex),
],
)
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs["imageFormat"])
self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "glyphLoc":
self.names.append(attrs["name"])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
def bytesjoin(iterable, joiner=b""):
return tobytes(joiner).join(tobytes(item) for item in iterable)
class deque(Sized, Iterable[_T], Reversible[_T], Generic[_T]):
def __init__(self, iterable: Iterable[_T] = ..., maxlen: int = ...) -> None: ...
def maxlen(self) -> Optional[int]: ...
def append(self, x: _T) -> None: ...
def appendleft(self, x: _T) -> None: ...
def clear(self) -> None: ...
def count(self, x: _T) -> int: ...
def extend(self, iterable: Iterable[_T]) -> None: ...
def extendleft(self, iterable: Iterable[_T]) -> None: ...
def pop(self) -> _T: ...
def popleft(self) -> _T: ...
def remove(self, value: _T) -> None: ...
def reverse(self) -> None: ...
def rotate(self, n: int = ...) -> None: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T]: ...
def __str__(self) -> str: ...
def __hash__(self) -> int: ...
def __getitem__(self, i: int) -> _T: ...
def __setitem__(self, i: int, x: _T) -> None: ...
def __contains__(self, o: _T) -> bool: ...
def __reversed__(self) -> Iterator[_T]: ...
def __iadd__(self: _S, iterable: Iterable[_T]) -> _S: ...
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = ">" + formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [
glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [
struct.unpack(dataFormat, self.data[slice(*loc)])[0]
for loc in indexingLocations
]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert (
curLoc[1] == nxtLoc[0]
), "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [
struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(offsetArray) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin | null |
175,410 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def _memoize(f):
class memodict(dict):
def __missing__(self, key):
ret = f(key)
if len(key) == 1:
self[key] = ret
return ret
return memodict().__getitem__ | null |
175,411 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def bytechr(n):
return bytes([n])
def byteord(c):
return c if isinstance(c, int) else ord(c)
def bytesjoin(iterable, joiner=b""):
return tobytes(joiner).join(tobytes(item) for item in iterable)
def _reverseBytes(data):
if len(data) != 1:
return bytesjoin(map(_reverseBytes, data))
byte = byteord(data)
result = 0
for i in range(8):
result = result << 1
result |= byte & 1
byte = byte >> 1
return bytechr(result) | null |
175,412 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
writer.begintag("rawimagedata")
writer.newline()
writer.dumphex(bitmapObject.imageData)
writer.endtag("rawimagedata")
writer.newline() | null |
175,413 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def readHex(content):
def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
bitmapObject.imageData = readHex(content) | null |
175,414 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def hexStr(data):
def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
metrics = bitmapObject.exportMetrics
del bitmapObject.exportMetrics
bitDepth = bitmapObject.exportBitDepth
del bitmapObject.exportBitDepth
writer.begintag(
"rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height
)
writer.newline()
for curRow in range(metrics.height):
rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
writer.simpletag("row", value=hexStr(rowData))
writer.newline()
writer.endtag("rowimagedata")
writer.newline() | null |
175,415 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
safeEval = ast.literal_eval
def deHexStr(hexdata):
"""Convert a hex string to binary data."""
hexdata = strjoin(hexdata.split())
if len(hexdata) % 2:
hexdata = hexdata + "0"
data = []
for i in range(0, len(hexdata), 2):
data.append(bytechr(int(hexdata[i : i + 2], 16)))
return bytesjoin(data)
class SmallGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = smallGlyphMetricsFormat
def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
bitDepth = safeEval(attrs["bitDepth"])
metrics = SmallGlyphMetrics()
metrics.width = safeEval(attrs["width"])
metrics.height = safeEval(attrs["height"])
dataRows = []
for element in content:
if not isinstance(element, tuple):
continue
name, attr, content = element
# Chop off 'imagedata' from the tag to get just the option.
if name == "row":
dataRows.append(deHexStr(attr["value"]))
bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) | null |
175,416 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def _data2binary(data, numBits):
binaryList = []
for curByte in data:
value = byteord(curByte)
numBitsCut = min(8, numBits)
for i in range(numBitsCut):
if value & 0x1:
binaryList.append("1")
else:
binaryList.append("0")
value = value >> 1
numBits -= numBitsCut
return strjoin(binaryList)
def strjoin(iterable, joiner=""):
return tostr(joiner).join(iterable)
def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
metrics = bitmapObject.exportMetrics
del bitmapObject.exportMetrics
bitDepth = bitmapObject.exportBitDepth
del bitmapObject.exportBitDepth
# A dict for mapping binary to more readable/artistic ASCII characters.
binaryConv = {"0": ".", "1": "@"}
writer.begintag(
"bitwiseimagedata",
bitDepth=bitDepth,
width=metrics.width,
height=metrics.height,
)
writer.newline()
for curRow in range(metrics.height):
rowData = bitmapObject.getRow(
curRow, bitDepth=1, metrics=metrics, reverseBytes=True
)
rowData = _data2binary(rowData, metrics.width)
# Make the output a readable ASCII art form.
rowData = strjoin(map(binaryConv.get, rowData))
writer.simpletag("row", value=rowData)
writer.newline()
writer.endtag("bitwiseimagedata")
writer.newline() | null |
175,417 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def _binary2data(binary):
safeEval = ast.literal_eval
def strjoin(iterable, joiner=""):
class SmallGlyphMetrics(BitmapGlyphMetrics):
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
bitDepth = safeEval(attrs["bitDepth"])
metrics = SmallGlyphMetrics()
metrics.width = safeEval(attrs["width"])
metrics.height = safeEval(attrs["height"])
# A dict for mapping from ASCII to binary. All characters are considered
# a '1' except space, period and '0' which maps to '0'.
binaryConv = {" ": "0", ".": "0", "0": "0"}
dataRows = []
for element in content:
if not isinstance(element, tuple):
continue
name, attr, content = element
if name == "row":
mapParams = zip(attr["value"], itertools.repeat("1"))
rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
dataRows.append(_binary2data(rowData))
bitmapObject.setRows(
dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True
) | null |
175,418 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
try:
folder = os.path.dirname(writer.file.name)
except AttributeError:
# fall back to current directory if output file's directory isn't found
folder = "."
folder = os.path.join(folder, "bitmaps")
filename = glyphName + bitmapObject.fileExtension
if not os.path.isdir(folder):
os.makedirs(folder)
folder = os.path.join(folder, "strike%d" % strikeIndex)
if not os.path.isdir(folder):
os.makedirs(folder)
fullPath = os.path.join(folder, filename)
writer.simpletag("extfileimagedata", value=fullPath)
writer.newline()
with open(fullPath, "wb") as file:
file.write(bitmapObject.imageData) | null |
175,419 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
fullPath = attrs["value"]
with open(fullPath, "rb") as file:
bitmapObject.imageData = file.read() | null |
175,420 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
safeEval,
readHex,
hexStr,
deHexStr,
)
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from . import DefaultTable
import itertools
import os
import struct
import logging
log = logging.getLogger(__name__)
class BigGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = bigGlyphMetricsFormat
class SmallGlyphMetrics(BitmapGlyphMetrics):
binaryFormat = smallGlyphMetricsFormat
def _createBitmapPlusMetricsMixin(metricsClass):
# Both metrics names are listed here to make meaningful error messages.
metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
curMetricsName = metricsClass.__name__
# Find which metrics this is for and determine the opposite name.
metricsId = metricStrings.index(curMetricsName)
oppositeMetricsName = metricStrings[1 - metricsId]
class BitmapPlusMetricsMixin(object):
def writeMetrics(self, writer, ttFont):
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == curMetricsName:
self.metrics = metricsClass()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == oppositeMetricsName:
log.warning(
"Warning: %s being ignored in format %d.",
oppositeMetricsName,
self.getFormat(),
)
return BitmapPlusMetricsMixin | null |
175,421 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
class LookupList(BaseTable):
def table(self):
for l in self.Lookup:
for st in l.SubTable:
if type(st).__name__.endswith("Subst"):
return "GSUB"
if type(st).__name__.endswith("Pos"):
return "GPOS"
raise ValueError
def toXML2(self, xmlWriter, font):
if (
not font
or "Debg" not in font
or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data
):
return super().toXML2(xmlWriter, font)
debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table]
for conv in self.getConverters():
if conv.repeat:
value = getattr(self, conv.name, [])
for lookupIndex, item in enumerate(value):
if str(lookupIndex) in debugData:
info = LookupDebugInfo(*debugData[str(lookupIndex)])
tag = info.location
if info.name:
tag = f"{info.name}: {tag}"
if info.feature:
script, language, feature = info.feature
tag = f"{tag} in {feature} ({script}/{language})"
xmlWriter.comment(tag)
xmlWriter.newline()
conv.xmlWrite(
xmlWriter, font, item, conv.name, [("index", lookupIndex)]
)
else:
if conv.aux and not eval(conv.aux, None, vars(self)):
continue
value = getattr(
self, conv.name, None
) # TODO Handle defaults instead of defaulting to None!
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
The provided code snippet includes necessary dependencies for implementing the `fixLookupOverFlows` function. Write a Python function `def fixLookupOverFlows(ttf, overflowRecord)` to solve the following problem:
Either the offset from the LookupList to a lookup overflowed, or an offset from a lookup to a subtable overflowed. The table layout is: GPSO/GUSB Script List Feature List LookUpList Lookup[0] and contents SubTable offset list SubTable[0] and contents ... SubTable[n] and contents ... Lookup[n] and contents SubTable offset list SubTable[0] and contents ... SubTable[n] and contents If the offset to a lookup overflowed (SubTableIndex is None) we must promote the *previous* lookup to an Extension type. If the offset from a lookup to subtable overflowed, then we must promote it to an Extension Lookup type.
Here is the function:
def fixLookupOverFlows(ttf, overflowRecord):
"""Either the offset from the LookupList to a lookup overflowed, or
an offset from a lookup to a subtable overflowed.
The table layout is:
GPSO/GUSB
Script List
Feature List
LookUpList
Lookup[0] and contents
SubTable offset list
SubTable[0] and contents
...
SubTable[n] and contents
...
Lookup[n] and contents
SubTable offset list
SubTable[0] and contents
...
SubTable[n] and contents
If the offset to a lookup overflowed (SubTableIndex is None)
we must promote the *previous* lookup to an Extension type.
If the offset from a lookup to subtable overflowed, then we must promote it
to an Extension Lookup type.
"""
ok = 0
lookupIndex = overflowRecord.LookupListIndex
if overflowRecord.SubTableIndex is None:
lookupIndex = lookupIndex - 1
if lookupIndex < 0:
return ok
if overflowRecord.tableType == "GSUB":
extType = 7
elif overflowRecord.tableType == "GPOS":
extType = 9
lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup
lookup = lookups[lookupIndex]
# If the previous lookup is an extType, look further back. Very unlikely, but possible.
while lookup.SubTable[0].__class__.LookupType == extType:
lookupIndex = lookupIndex - 1
if lookupIndex < 0:
return ok
lookup = lookups[lookupIndex]
for lookupIndex in range(lookupIndex, len(lookups)):
lookup = lookups[lookupIndex]
if lookup.LookupType != extType:
lookup.LookupType = extType
for si in range(len(lookup.SubTable)):
subTable = lookup.SubTable[si]
extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
extSubTable = extSubTableClass()
extSubTable.Format = 1
extSubTable.ExtSubTable = subTable
lookup.SubTable[si] = extSubTable
ok = 1
return ok | Either the offset from the LookupList to a lookup overflowed, or an offset from a lookup to a subtable overflowed. The table layout is: GPSO/GUSB Script List Feature List LookUpList Lookup[0] and contents SubTable offset list SubTable[0] and contents ... SubTable[n] and contents ... Lookup[n] and contents SubTable offset list SubTable[0] and contents ... SubTable[n] and contents If the offset to a lookup overflowed (SubTableIndex is None) we must promote the *previous* lookup to an Extension type. If the offset from a lookup to subtable overflowed, then we must promote it to an Extension Lookup type. |
175,422 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
def splitMultipleSubst(oldSubTable, newSubTable, overflowRecord):
ok = 1
oldMapping = sorted(oldSubTable.mapping.items())
oldLen = len(oldMapping)
if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
# Coverage table is written last. Overflow is to or within the
# the coverage table. We will just cut the subtable in half.
newLen = oldLen // 2
elif overflowRecord.itemName == "Sequence":
# We just need to back up by two items from the overflowed
# Sequence index to make sure the offset to the Coverage table
# doesn't overflow.
newLen = overflowRecord.itemIndex - 1
newSubTable.mapping = {}
for i in range(newLen, oldLen):
item = oldMapping[i]
key = item[0]
newSubTable.mapping[key] = item[1]
del oldSubTable.mapping[key]
return ok | null |
175,423 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord):
ok = 1
if hasattr(oldSubTable, "sortCoverageLast"):
newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
oldAlts = sorted(oldSubTable.alternates.items())
oldLen = len(oldAlts)
if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
# Coverage table is written last. overflow is to or within the
# the coverage table. We will just cut the subtable in half.
newLen = oldLen // 2
elif overflowRecord.itemName == "AlternateSet":
# We just need to back up by two items
# from the overflowed AlternateSet index to make sure the offset
# to the Coverage table doesn't overflow.
newLen = overflowRecord.itemIndex - 1
newSubTable.alternates = {}
for i in range(newLen, oldLen):
item = oldAlts[i]
key = item[0]
newSubTable.alternates[key] = item[1]
del oldSubTable.alternates[key]
return ok | null |
175,424 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord):
ok = 1
oldLigs = sorted(oldSubTable.ligatures.items())
oldLen = len(oldLigs)
if overflowRecord.itemName in ["Coverage", "RangeRecord"]:
# Coverage table is written last. overflow is to or within the
# the coverage table. We will just cut the subtable in half.
newLen = oldLen // 2
elif overflowRecord.itemName == "LigatureSet":
# We just need to back up by two items
# from the overflowed AlternateSet index to make sure the offset
# to the Coverage table doesn't overflow.
newLen = overflowRecord.itemIndex - 1
newSubTable.ligatures = {}
for i in range(newLen, oldLen):
item = oldLigs[i]
key = item[0]
newSubTable.ligatures[key] = item[1]
del oldSubTable.ligatures[key]
return ok | null |
175,425 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
class Coverage(FormatSwitchingBaseTable):
def populateDefaults(self, propagator=None):
def postRead(self, rawTable, font):
def preWrite(self, font):
def toXML2(self, xmlWriter, font):
def fromXML(self, name, attrs, content, font):
def splitPairPos(oldSubTable, newSubTable, overflowRecord):
st = oldSubTable
ok = False
newSubTable.Format = oldSubTable.Format
if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1:
for name in "ValueFormat1", "ValueFormat2":
setattr(newSubTable, name, getattr(oldSubTable, name))
# Move top half of coverage to new subtable
newSubTable.Coverage = oldSubTable.Coverage.__class__()
coverage = oldSubTable.Coverage.glyphs
records = oldSubTable.PairSet
oldCount = len(oldSubTable.PairSet) // 2
oldSubTable.Coverage.glyphs = coverage[:oldCount]
oldSubTable.PairSet = records[:oldCount]
newSubTable.Coverage.glyphs = coverage[oldCount:]
newSubTable.PairSet = records[oldCount:]
oldSubTable.PairSetCount = len(oldSubTable.PairSet)
newSubTable.PairSetCount = len(newSubTable.PairSet)
ok = True
elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1:
if not hasattr(oldSubTable, "Class2Count"):
oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record)
for name in "Class2Count", "ClassDef2", "ValueFormat1", "ValueFormat2":
setattr(newSubTable, name, getattr(oldSubTable, name))
# The two subtables will still have the same ClassDef2 and the table
# sharing will still cause the sharing to overflow. As such, disable
# sharing on the one that is serialized second (that's oldSubTable).
oldSubTable.DontShare = True
# Move top half of class numbers to new subtable
newSubTable.Coverage = oldSubTable.Coverage.__class__()
newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__()
coverage = oldSubTable.Coverage.glyphs
classDefs = oldSubTable.ClassDef1.classDefs
records = oldSubTable.Class1Record
oldCount = len(oldSubTable.Class1Record) // 2
newGlyphs = set(k for k, v in classDefs.items() if v >= oldCount)
oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs]
oldSubTable.ClassDef1.classDefs = {
k: v for k, v in classDefs.items() if v < oldCount
}
oldSubTable.Class1Record = records[:oldCount]
newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs]
newSubTable.ClassDef1.classDefs = {
k: (v - oldCount) for k, v in classDefs.items() if v > oldCount
}
newSubTable.Class1Record = records[oldCount:]
oldSubTable.Class1Count = len(oldSubTable.Class1Record)
newSubTable.Class1Count = len(newSubTable.Class1Record)
ok = True
return ok | null |
175,426 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
def splitMarkBasePos(oldSubTable, newSubTable, overflowRecord):
# split half of the mark classes to the new subtable
classCount = oldSubTable.ClassCount
if classCount < 2:
# oh well, not much left to split...
return False
oldClassCount = classCount // 2
newClassCount = classCount - oldClassCount
oldMarkCoverage, oldMarkRecords = [], []
newMarkCoverage, newMarkRecords = [], []
for glyphName, markRecord in zip(
oldSubTable.MarkCoverage.glyphs, oldSubTable.MarkArray.MarkRecord
):
if markRecord.Class < oldClassCount:
oldMarkCoverage.append(glyphName)
oldMarkRecords.append(markRecord)
else:
markRecord.Class -= oldClassCount
newMarkCoverage.append(glyphName)
newMarkRecords.append(markRecord)
oldBaseRecords, newBaseRecords = [], []
for rec in oldSubTable.BaseArray.BaseRecord:
oldBaseRecord, newBaseRecord = rec.__class__(), rec.__class__()
oldBaseRecord.BaseAnchor = rec.BaseAnchor[:oldClassCount]
newBaseRecord.BaseAnchor = rec.BaseAnchor[oldClassCount:]
oldBaseRecords.append(oldBaseRecord)
newBaseRecords.append(newBaseRecord)
newSubTable.Format = oldSubTable.Format
oldSubTable.MarkCoverage.glyphs = oldMarkCoverage
newSubTable.MarkCoverage = oldSubTable.MarkCoverage.__class__()
newSubTable.MarkCoverage.glyphs = newMarkCoverage
# share the same BaseCoverage in both halves
newSubTable.BaseCoverage = oldSubTable.BaseCoverage
oldSubTable.ClassCount = oldClassCount
newSubTable.ClassCount = newClassCount
oldSubTable.MarkArray.MarkRecord = oldMarkRecords
newSubTable.MarkArray = oldSubTable.MarkArray.__class__()
newSubTable.MarkArray.MarkRecord = newMarkRecords
oldSubTable.MarkArray.MarkCount = len(oldMarkRecords)
newSubTable.MarkArray.MarkCount = len(newMarkRecords)
oldSubTable.BaseArray.BaseRecord = oldBaseRecords
newSubTable.BaseArray = oldSubTable.BaseArray.__class__()
newSubTable.BaseArray.BaseRecord = newBaseRecords
oldSubTable.BaseArray.BaseCount = len(oldBaseRecords)
newSubTable.BaseArray.BaseCount = len(newBaseRecords)
return True | null |
175,427 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
log = logging.getLogger(__name__)
class LookupList(BaseTable):
def table(self):
for l in self.Lookup:
for st in l.SubTable:
if type(st).__name__.endswith("Subst"):
return "GSUB"
if type(st).__name__.endswith("Pos"):
return "GPOS"
raise ValueError
def toXML2(self, xmlWriter, font):
if (
not font
or "Debg" not in font
or LOOKUP_DEBUG_INFO_KEY not in font["Debg"].data
):
return super().toXML2(xmlWriter, font)
debugData = font["Debg"].data[LOOKUP_DEBUG_INFO_KEY][self.table]
for conv in self.getConverters():
if conv.repeat:
value = getattr(self, conv.name, [])
for lookupIndex, item in enumerate(value):
if str(lookupIndex) in debugData:
info = LookupDebugInfo(*debugData[str(lookupIndex)])
tag = info.location
if info.name:
tag = f"{info.name}: {tag}"
if info.feature:
script, language, feature = info.feature
tag = f"{tag} in {feature} ({script}/{language})"
xmlWriter.comment(tag)
xmlWriter.newline()
conv.xmlWrite(
xmlWriter, font, item, conv.name, [("index", lookupIndex)]
)
else:
if conv.aux and not eval(conv.aux, None, vars(self)):
continue
value = getattr(
self, conv.name, None
) # TODO Handle defaults instead of defaulting to None!
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
splitTable = {
"GSUB": {
# 1: splitSingleSubst,
2: splitMultipleSubst,
3: splitAlternateSubst,
4: splitLigatureSubst,
# 5: splitContextSubst,
# 6: splitChainContextSubst,
# 7: splitExtensionSubst,
# 8: splitReverseChainSingleSubst,
},
"GPOS": {
# 1: splitSinglePos,
2: splitPairPos,
# 3: splitCursivePos,
4: splitMarkBasePos,
# 5: splitMarkLigPos,
# 6: splitMarkMarkPos,
# 7: splitContextPos,
# 8: splitChainContextPos,
# 9: splitExtensionPos,
},
}
The provided code snippet includes necessary dependencies for implementing the `fixSubTableOverFlows` function. Write a Python function `def fixSubTableOverFlows(ttf, overflowRecord)` to solve the following problem:
An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
Here is the function:
def fixSubTableOverFlows(ttf, overflowRecord):
"""
An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
"""
table = ttf[overflowRecord.tableType].table
lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
subIndex = overflowRecord.SubTableIndex
subtable = lookup.SubTable[subIndex]
# First, try not sharing anything for this subtable...
if not hasattr(subtable, "DontShare"):
subtable.DontShare = True
return True
if hasattr(subtable, "ExtSubTable"):
# We split the subtable of the Extension table, and add a new Extension table
# to contain the new subtable.
subTableType = subtable.ExtSubTable.__class__.LookupType
extSubTable = subtable
subtable = extSubTable.ExtSubTable
newExtSubTableClass = lookupTypes[overflowRecord.tableType][
extSubTable.__class__.LookupType
]
newExtSubTable = newExtSubTableClass()
newExtSubTable.Format = extSubTable.Format
toInsert = newExtSubTable
newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
newSubTable = newSubTableClass()
newExtSubTable.ExtSubTable = newSubTable
else:
subTableType = subtable.__class__.LookupType
newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
newSubTable = newSubTableClass()
toInsert = newSubTable
if hasattr(lookup, "SubTableCount"): # may not be defined yet.
lookup.SubTableCount = lookup.SubTableCount + 1
try:
splitFunc = splitTable[overflowRecord.tableType][subTableType]
except KeyError:
log.error(
"Don't know how to split %s lookup type %s",
overflowRecord.tableType,
subTableType,
)
return False
ok = splitFunc(subtable, newSubTable, overflowRecord)
if ok:
lookup.SubTable.insert(subIndex + 1, toInsert)
return ok | An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts. |
175,428 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
class FeatureParamsSize(FeatureParams):
pass
class FeatureParamsStylisticSet(FeatureParams):
pass
class FeatureParamsCharacterVariants(FeatureParams):
pass
class SingleSubst(FormatSwitchingBaseTable):
def populateDefaults(self, propagator=None):
if not hasattr(self, "mapping"):
self.mapping = {}
def postRead(self, rawTable, font):
mapping = {}
input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
if self.Format == 1:
delta = rawTable["DeltaGlyphID"]
inputGIDS = font.getGlyphIDMany(input)
outGIDS = [(glyphID + delta) % 65536 for glyphID in inputGIDS]
outNames = font.getGlyphNameMany(outGIDS)
for inp, out in zip(input, outNames):
mapping[inp] = out
elif self.Format == 2:
assert (
len(input) == rawTable["GlyphCount"]
), "invalid SingleSubstFormat2 table"
subst = rawTable["Substitute"]
for inp, sub in zip(input, subst):
mapping[inp] = sub
else:
assert 0, "unknown format: %s" % self.Format
self.mapping = mapping
del self.Format # Don't need this anymore
def preWrite(self, font):
mapping = getattr(self, "mapping", None)
if mapping is None:
mapping = self.mapping = {}
items = list(mapping.items())
getGlyphID = font.getGlyphID
gidItems = [(getGlyphID(a), getGlyphID(b)) for a, b in items]
sortableItems = sorted(zip(gidItems, items))
# figure out format
format = 2
delta = None
for inID, outID in gidItems:
if delta is None:
delta = (outID - inID) % 65536
if (inID + delta) % 65536 != outID:
break
else:
if delta is None:
# the mapping is empty, better use format 2
format = 2
else:
format = 1
rawTable = {}
self.Format = format
cov = Coverage()
input = [item[1][0] for item in sortableItems]
subst = [item[1][1] for item in sortableItems]
cov.glyphs = input
rawTable["Coverage"] = cov
if format == 1:
assert delta is not None
rawTable["DeltaGlyphID"] = delta
else:
rawTable["Substitute"] = subst
return rawTable
def toXML2(self, xmlWriter, font):
items = sorted(self.mapping.items())
for inGlyph, outGlyph in items:
xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", outGlyph)])
xmlWriter.newline()
def fromXML(self, name, attrs, content, font):
mapping = getattr(self, "mapping", None)
if mapping is None:
mapping = {}
self.mapping = mapping
mapping[attrs["in"]] = attrs["out"]
class MultipleSubst(FormatSwitchingBaseTable):
def populateDefaults(self, propagator=None):
if not hasattr(self, "mapping"):
self.mapping = {}
def postRead(self, rawTable, font):
mapping = {}
if self.Format == 1:
glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"])
subst = [s.Substitute for s in rawTable["Sequence"]]
mapping = dict(zip(glyphs, subst))
else:
assert 0, "unknown format: %s" % self.Format
self.mapping = mapping
del self.Format # Don't need this anymore
def preWrite(self, font):
mapping = getattr(self, "mapping", None)
if mapping is None:
mapping = self.mapping = {}
cov = Coverage()
cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID)
self.Format = 1
rawTable = {
"Coverage": cov,
"Sequence": [self.makeSequence_(mapping[glyph]) for glyph in cov.glyphs],
}
return rawTable
def toXML2(self, xmlWriter, font):
items = sorted(self.mapping.items())
for inGlyph, outGlyphs in items:
out = ",".join(outGlyphs)
xmlWriter.simpletag("Substitution", [("in", inGlyph), ("out", out)])
xmlWriter.newline()
def fromXML(self, name, attrs, content, font):
mapping = getattr(self, "mapping", None)
if mapping is None:
mapping = {}
self.mapping = mapping
# TTX v3.0 and earlier.
if name == "Coverage":
self.old_coverage_ = []
for element in content:
if not isinstance(element, tuple):
continue
element_name, element_attrs, _ = element
if element_name == "Glyph":
self.old_coverage_.append(element_attrs["value"])
return
if name == "Sequence":
index = int(attrs.get("index", len(mapping)))
glyph = self.old_coverage_[index]
glyph_mapping = mapping[glyph] = []
for element in content:
if not isinstance(element, tuple):
continue
element_name, element_attrs, _ = element
if element_name == "Substitute":
glyph_mapping.append(element_attrs["value"])
return
# TTX v3.1 and later.
outGlyphs = attrs["out"].split(",") if attrs["out"] else []
mapping[attrs["in"]] = [g.strip() for g in outGlyphs]
def makeSequence_(g):
seq = Sequence()
seq.Substitute = g
return seq
class AlternateSubst(FormatSwitchingBaseTable):
def populateDefaults(self, propagator=None):
if not hasattr(self, "alternates"):
self.alternates = {}
def postRead(self, rawTable, font):
alternates = {}
if self.Format == 1:
input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
alts = rawTable["AlternateSet"]
assert len(input) == len(alts)
for inp, alt in zip(input, alts):
alternates[inp] = alt.Alternate
else:
assert 0, "unknown format: %s" % self.Format
self.alternates = alternates
del self.Format # Don't need this anymore
def preWrite(self, font):
self.Format = 1
alternates = getattr(self, "alternates", None)
if alternates is None:
alternates = self.alternates = {}
items = list(alternates.items())
for i in range(len(items)):
glyphName, set = items[i]
items[i] = font.getGlyphID(glyphName), glyphName, set
items.sort()
cov = Coverage()
cov.glyphs = [item[1] for item in items]
alternates = []
setList = [item[-1] for item in items]
for set in setList:
alts = AlternateSet()
alts.Alternate = set
alternates.append(alts)
# a special case to deal with the fact that several hundred Adobe Japan1-5
# CJK fonts will overflow an offset if the coverage table isn't pushed to the end.
# Also useful in that when splitting a sub-table because of an offset overflow
# I don't need to calculate the change in the subtable offset due to the change in the coverage table size.
# Allows packing more rules in subtable.
self.sortCoverageLast = 1
return {"Coverage": cov, "AlternateSet": alternates}
def toXML2(self, xmlWriter, font):
items = sorted(self.alternates.items())
for glyphName, alternates in items:
xmlWriter.begintag("AlternateSet", glyph=glyphName)
xmlWriter.newline()
for alt in alternates:
xmlWriter.simpletag("Alternate", glyph=alt)
xmlWriter.newline()
xmlWriter.endtag("AlternateSet")
xmlWriter.newline()
def fromXML(self, name, attrs, content, font):
alternates = getattr(self, "alternates", None)
if alternates is None:
alternates = {}
self.alternates = alternates
glyphName = attrs["glyph"]
set = []
alternates[glyphName] = set
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
set.append(attrs["glyph"])
class LigatureSubst(FormatSwitchingBaseTable):
def populateDefaults(self, propagator=None):
if not hasattr(self, "ligatures"):
self.ligatures = {}
def postRead(self, rawTable, font):
ligatures = {}
if self.Format == 1:
input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
ligSets = rawTable["LigatureSet"]
assert len(input) == len(ligSets)
for i in range(len(input)):
ligatures[input[i]] = ligSets[i].Ligature
else:
assert 0, "unknown format: %s" % self.Format
self.ligatures = ligatures
del self.Format # Don't need this anymore
def preWrite(self, font):
self.Format = 1
ligatures = getattr(self, "ligatures", None)
if ligatures is None:
ligatures = self.ligatures = {}
if ligatures and isinstance(next(iter(ligatures)), tuple):
# New high-level API in v3.1 and later. Note that we just support compiling this
# for now. We don't load to this API, and don't do XML with it.
# ligatures is map from components-sequence to lig-glyph
newLigatures = dict()
for comps, lig in sorted(
ligatures.items(), key=lambda item: (-len(item[0]), item[0])
):
ligature = Ligature()
ligature.Component = comps[1:]
ligature.CompCount = len(comps)
ligature.LigGlyph = lig
newLigatures.setdefault(comps[0], []).append(ligature)
ligatures = newLigatures
items = list(ligatures.items())
for i in range(len(items)):
glyphName, set = items[i]
items[i] = font.getGlyphID(glyphName), glyphName, set
items.sort()
cov = Coverage()
cov.glyphs = [item[1] for item in items]
ligSets = []
setList = [item[-1] for item in items]
for set in setList:
ligSet = LigatureSet()
ligs = ligSet.Ligature = []
for lig in set:
ligs.append(lig)
ligSets.append(ligSet)
# Useful in that when splitting a sub-table because of an offset overflow
# I don't need to calculate the change in subtabl offset due to the coverage table size.
# Allows packing more rules in subtable.
self.sortCoverageLast = 1
return {"Coverage": cov, "LigatureSet": ligSets}
def toXML2(self, xmlWriter, font):
items = sorted(self.ligatures.items())
for glyphName, ligSets in items:
xmlWriter.begintag("LigatureSet", glyph=glyphName)
xmlWriter.newline()
for lig in ligSets:
xmlWriter.simpletag(
"Ligature", glyph=lig.LigGlyph, components=",".join(lig.Component)
)
xmlWriter.newline()
xmlWriter.endtag("LigatureSet")
xmlWriter.newline()
def fromXML(self, name, attrs, content, font):
ligatures = getattr(self, "ligatures", None)
if ligatures is None:
ligatures = {}
self.ligatures = ligatures
glyphName = attrs["glyph"]
ligs = []
ligatures[glyphName] = ligs
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
lig = Ligature()
lig.LigGlyph = attrs["glyph"]
components = attrs["components"]
lig.Component = components.split(",") if components else []
lig.CompCount = len(lig.Component)
ligs.append(lig)
_equivalents = {
"MarkArray": ("Mark1Array",),
"LangSys": ("DefaultLangSys",),
"Coverage": (
"MarkCoverage",
"BaseCoverage",
"LigatureCoverage",
"Mark1Coverage",
"Mark2Coverage",
"BacktrackCoverage",
"InputCoverage",
"LookAheadCoverage",
"VertGlyphCoverage",
"HorizGlyphCoverage",
"TopAccentCoverage",
"ExtendedShapeCoverage",
"MathKernCoverage",
),
"ClassDef": (
"ClassDef1",
"ClassDef2",
"BacktrackClassDef",
"InputClassDef",
"LookAheadClassDef",
"GlyphClassDef",
"MarkAttachClassDef",
),
"Anchor": (
"EntryAnchor",
"ExitAnchor",
"BaseAnchor",
"LigatureAnchor",
"Mark2Anchor",
"MarkAnchor",
),
"Device": (
"XPlaDevice",
"YPlaDevice",
"XAdvDevice",
"YAdvDevice",
"XDeviceTable",
"YDeviceTable",
"DeviceTable",
),
"Axis": (
"HorizAxis",
"VertAxis",
),
"MinMax": ("DefaultMinMax",),
"BaseCoord": (
"MinCoord",
"MaxCoord",
),
"JstfLangSys": ("DefJstfLangSys",),
"JstfGSUBModList": (
"ShrinkageEnableGSUB",
"ShrinkageDisableGSUB",
"ExtensionEnableGSUB",
"ExtensionDisableGSUB",
),
"JstfGPOSModList": (
"ShrinkageEnableGPOS",
"ShrinkageDisableGPOS",
"ExtensionEnableGPOS",
"ExtensionDisableGPOS",
),
"JstfMax": (
"ShrinkageJstfMax",
"ExtensionJstfMax",
),
"MathKern": (
"TopRightMathKern",
"TopLeftMathKern",
"BottomRightMathKern",
"BottomLeftMathKern",
),
"MathGlyphConstruction": ("VertGlyphConstruction", "HorizGlyphConstruction"),
}
class BaseTable(object):
"""Generic base class for all OpenType (sub)tables."""
def __getattr__(self, attr):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
return getattr(self, attr)
raise AttributeError(attr)
def ensureDecompiled(self, recurse=False):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
if recurse:
for subtable in self.iterSubTables():
subtable.value.ensureDecompiled(recurse)
def __getstate__(self):
# before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader
# https://github.com/fonttools/fonttools/issues/2965
if "reader" in self.__dict__:
state = self.__dict__.copy()
state["reader"] = self.__dict__["reader"].copy()
return state
return self.__dict__
def getRecordSize(cls, reader):
totalSize = 0
for conv in cls.converters:
size = conv.getRecordSize(reader)
if size is NotImplemented:
return NotImplemented
countValue = 1
if conv.repeat:
if conv.repeat in reader:
countValue = reader[conv.repeat] + conv.aux
else:
return NotImplemented
totalSize += size * countValue
return totalSize
def getConverters(self):
return self.converters
def getConverterByName(self, name):
return self.convertersByName[name]
def populateDefaults(self, propagator=None):
for conv in self.getConverters():
if conv.repeat:
if not hasattr(self, conv.name):
setattr(self, conv.name, [])
countValue = len(getattr(self, conv.name)) - conv.aux
try:
count_conv = self.getConverterByName(conv.repeat)
setattr(self, conv.repeat, countValue)
except KeyError:
# conv.repeat is a propagated count
if propagator and conv.repeat in propagator:
propagator[conv.repeat].setValue(countValue)
else:
if conv.aux and not eval(conv.aux, None, self.__dict__):
continue
if hasattr(self, conv.name):
continue # Warn if it should NOT be present?!
if hasattr(conv, "writeNullOffset"):
setattr(self, conv.name, None) # Warn?
# elif not conv.isCount:
# # Warn?
# pass
if hasattr(conv, "DEFAULT"):
# OptionalValue converters (e.g. VarIndex)
setattr(self, conv.name, conv.DEFAULT)
def decompile(self, reader, font):
self.readFormat(reader)
table = {}
self.__rawTable = table # for debugging
for conv in self.getConverters():
if conv.name == "SubTable":
conv = conv.getConverter(reader.tableTag, table["LookupType"])
if conv.name == "ExtSubTable":
conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"])
if conv.name == "FeatureParams":
conv = conv.getConverter(reader["FeatureTag"])
if conv.name == "SubStruct":
conv = conv.getConverter(reader.tableTag, table["MorphType"])
try:
if conv.repeat:
if isinstance(conv.repeat, int):
countValue = conv.repeat
elif conv.repeat in table:
countValue = table[conv.repeat]
else:
# conv.repeat is a propagated count
countValue = reader[conv.repeat]
countValue += conv.aux
table[conv.name] = conv.readArray(reader, font, table, countValue)
else:
if conv.aux and not eval(conv.aux, None, table):
continue
table[conv.name] = conv.read(reader, font, table)
if conv.isPropagated:
reader[conv.name] = table[conv.name]
except Exception as e:
name = conv.name
e.args = e.args + (name,)
raise
if hasattr(self, "postRead"):
self.postRead(table, font)
else:
self.__dict__.update(table)
del self.__rawTable # succeeded, get rid of debugging info
def compile(self, writer, font):
self.ensureDecompiled()
# TODO Following hack to be removed by rewriting how FormatSwitching tables
# are handled.
# https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
if hasattr(self, "preWrite"):
deleteFormat = not hasattr(self, "Format")
table = self.preWrite(font)
deleteFormat = deleteFormat and hasattr(self, "Format")
else:
deleteFormat = False
table = self.__dict__.copy()
# some count references may have been initialized in a custom preWrite; we set
# these in the writer's state beforehand (instead of sequentially) so they will
# be propagated to all nested subtables even if the count appears in the current
# table only *after* the offset to the subtable that it is counting.
for conv in self.getConverters():
if conv.isCount and conv.isPropagated:
value = table.get(conv.name)
if isinstance(value, CountReference):
writer[conv.name] = value
if hasattr(self, "sortCoverageLast"):
writer.sortCoverageLast = 1
if hasattr(self, "DontShare"):
writer.DontShare = True
if hasattr(self.__class__, "LookupType"):
writer["LookupType"].setValue(self.__class__.LookupType)
self.writeFormat(writer)
for conv in self.getConverters():
value = table.get(
conv.name
) # TODO Handle defaults instead of defaulting to None!
if conv.repeat:
if value is None:
value = []
countValue = len(value) - conv.aux
if isinstance(conv.repeat, int):
assert len(value) == conv.repeat, "expected %d values, got %d" % (
conv.repeat,
len(value),
)
elif conv.repeat in table:
CountReference(table, conv.repeat, value=countValue)
else:
# conv.repeat is a propagated count
writer[conv.repeat].setValue(countValue)
try:
conv.writeArray(writer, font, table, value)
except Exception as e:
e.args = e.args + (conv.name + "[]",)
raise
elif conv.isCount:
# Special-case Count values.
# Assumption: a Count field will *always* precede
# the actual array(s).
# We need a default value, as it may be set later by a nested
# table. We will later store it here.
# We add a reference: by the time the data is assembled
# the Count value will be filled in.
# We ignore the current count value since it will be recomputed,
# unless it's a CountReference that was already initialized in a custom preWrite.
if isinstance(value, CountReference):
ref = value
ref.size = conv.staticSize
writer.writeData(ref)
table[conv.name] = ref.getValue()
else:
ref = writer.writeCountReference(table, conv.name, conv.staticSize)
table[conv.name] = None
if conv.isPropagated:
writer[conv.name] = ref
elif conv.isLookupType:
# We make sure that subtables have the same lookup type,
# and that the type is the same as the one set on the
# Lookup object, if any is set.
if conv.name not in table:
table[conv.name] = None
ref = writer.writeCountReference(
table, conv.name, conv.staticSize, table[conv.name]
)
writer["LookupType"] = ref
else:
if conv.aux and not eval(conv.aux, None, table):
continue
try:
conv.write(writer, font, table, value)
except Exception as e:
name = value.__class__.__name__ if value is not None else conv.name
e.args = e.args + (name,)
raise
if conv.isPropagated:
writer[conv.name] = value
if deleteFormat:
del self.Format
def readFormat(self, reader):
pass
def writeFormat(self, writer):
pass
def toXML(self, xmlWriter, font, attrs=None, name=None):
tableName = name if name else self.__class__.__name__
if attrs is None:
attrs = []
if hasattr(self, "Format"):
attrs = attrs + [("Format", self.Format)]
xmlWriter.begintag(tableName, attrs)
xmlWriter.newline()
self.toXML2(xmlWriter, font)
xmlWriter.endtag(tableName)
xmlWriter.newline()
def toXML2(self, xmlWriter, font):
# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
# This is because in TTX our parent writes our main tag, and in otBase.py we
# do it ourselves. I think I'm getting schizophrenic...
for conv in self.getConverters():
if conv.repeat:
value = getattr(self, conv.name, [])
for i in range(len(value)):
item = value[i]
conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)])
else:
if conv.aux and not eval(conv.aux, None, vars(self)):
continue
value = getattr(
self, conv.name, None
) # TODO Handle defaults instead of defaulting to None!
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
def fromXML(self, name, attrs, content, font):
try:
conv = self.getConverterByName(name)
except KeyError:
raise # XXX on KeyError, raise nice error
value = conv.xmlRead(attrs, content, font)
if conv.repeat:
seq = getattr(self, conv.name, None)
if seq is None:
seq = []
setattr(self, conv.name, seq)
seq.append(value)
else:
setattr(self, conv.name, value)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
self.ensureDecompiled()
other.ensureDecompiled()
return self.__dict__ == other.__dict__
class SubTableEntry(NamedTuple):
"""See BaseTable.iterSubTables()"""
name: str
value: "BaseTable"
index: Optional[int] = None # index into given array, None for single values
def iterSubTables(self) -> Iterator[SubTableEntry]:
"""Yield (name, value, index) namedtuples for all subtables of current table.
A sub-table is an instance of BaseTable (or subclass thereof) that is a child
of self, the current parent table.
The tuples also contain the attribute name (str) of the of parent table to get
a subtable, and optionally, for lists of subtables (i.e. attributes associated
with a converter that has a 'repeat'), an index into the list containing the
given subtable value.
This method can be useful to traverse trees of otTables.
"""
for conv in self.getConverters():
name = conv.name
value = getattr(self, name, None)
if value is None:
continue
if isinstance(value, BaseTable):
yield self.SubTableEntry(name, value)
elif isinstance(value, list):
yield from (
self.SubTableEntry(name, v, index=i)
for i, v in enumerate(value)
if isinstance(v, BaseTable)
)
# instance (not @class)method for consistency with FormatSwitchingBaseTable
def getVariableAttrs(self):
return getVariableAttrs(self.__class__)
def getFormatSwitchingBaseTableClass(formatType):
try:
return formatSwitchingBaseTables[formatType]
except KeyError:
raise TypeError(f"Unsupported format type: {formatType!r}")
otData = [
#
# common
#
("LookupOrder", []),
(
"ScriptList",
[
("uint16", "ScriptCount", None, None, "Number of ScriptRecords"),
(
"struct",
"ScriptRecord",
"ScriptCount",
0,
"Array of ScriptRecords -listed alphabetically by ScriptTag",
),
],
),
(
"ScriptRecord",
[
("Tag", "ScriptTag", None, None, "4-byte ScriptTag identifier"),
(
"Offset",
"Script",
None,
None,
"Offset to Script table-from beginning of ScriptList",
),
],
),
(
"Script",
[
(
"Offset",
"DefaultLangSys",
None,
None,
"Offset to DefaultLangSys table-from beginning of Script table-may be NULL",
),
(
"uint16",
"LangSysCount",
None,
None,
"Number of LangSysRecords for this script-excluding the DefaultLangSys",
),
(
"struct",
"LangSysRecord",
"LangSysCount",
0,
"Array of LangSysRecords-listed alphabetically by LangSysTag",
),
],
),
(
"LangSysRecord",
[
("Tag", "LangSysTag", None, None, "4-byte LangSysTag identifier"),
(
"Offset",
"LangSys",
None,
None,
"Offset to LangSys table-from beginning of Script table",
),
],
),
(
"LangSys",
[
(
"Offset",
"LookupOrder",
None,
None,
"= NULL (reserved for an offset to a reordering table)",
),
(
"uint16",
"ReqFeatureIndex",
None,
None,
"Index of a feature required for this language system- if no required features = 0xFFFF",
),
(
"uint16",
"FeatureCount",
None,
None,
"Number of FeatureIndex values for this language system-excludes the required feature",
),
(
"uint16",
"FeatureIndex",
"FeatureCount",
0,
"Array of indices into the FeatureList-in arbitrary order",
),
],
),
(
"FeatureList",
[
(
"uint16",
"FeatureCount",
None,
None,
"Number of FeatureRecords in this table",
),
(
"struct",
"FeatureRecord",
"FeatureCount",
0,
"Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag",
),
],
),
(
"FeatureRecord",
[
("Tag", "FeatureTag", None, None, "4-byte feature identification tag"),
(
"Offset",
"Feature",
None,
None,
"Offset to Feature table-from beginning of FeatureList",
),
],
),
(
"Feature",
[
(
"Offset",
"FeatureParams",
None,
None,
"= NULL (reserved for offset to FeatureParams)",
),
(
"uint16",
"LookupCount",
None,
None,
"Number of LookupList indices for this feature",
),
(
"uint16",
"LookupListIndex",
"LookupCount",
0,
"Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)",
),
],
),
("FeatureParams", []),
(
"FeatureParamsSize",
[
(
"DeciPoints",
"DesignSize",
None,
None,
"The design size in 720/inch units (decipoints).",
),
(
"uint16",
"SubfamilyID",
None,
None,
"Serves as an identifier that associates fonts in a subfamily.",
),
("NameID", "SubfamilyNameID", None, None, "Subfamily NameID."),
(
"DeciPoints",
"RangeStart",
None,
None,
"Small end of recommended usage range (exclusive) in 720/inch units.",
),
(
"DeciPoints",
"RangeEnd",
None,
None,
"Large end of recommended usage range (inclusive) in 720/inch units.",
),
],
),
(
"FeatureParamsStylisticSet",
[
("uint16", "Version", None, None, "Set to 0."),
("NameID", "UINameID", None, None, "UI NameID."),
],
),
(
"FeatureParamsCharacterVariants",
[
("uint16", "Format", None, None, "Set to 0."),
("NameID", "FeatUILabelNameID", None, None, "Feature UI label NameID."),
(
"NameID",
"FeatUITooltipTextNameID",
None,
None,
"Feature UI tooltip text NameID.",
),
("NameID", "SampleTextNameID", None, None, "Sample text NameID."),
("uint16", "NumNamedParameters", None, None, "Number of named parameters."),
(
"NameID",
"FirstParamUILabelNameID",
None,
None,
"First NameID of UI feature parameters.",
),
(
"uint16",
"CharCount",
None,
None,
"Count of characters this feature provides glyph variants for.",
),
(
"uint24",
"Character",
"CharCount",
0,
"Unicode characters for which this feature provides glyph variants.",
),
],
),
(
"LookupList",
[
("uint16", "LookupCount", None, None, "Number of lookups in this table"),
(
"Offset",
"Lookup",
"LookupCount",
0,
"Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)",
),
],
),
(
"Lookup",
[
(
"uint16",
"LookupType",
None,
None,
"Different enumerations for GSUB and GPOS",
),
("LookupFlag", "LookupFlag", None, None, "Lookup qualifiers"),
(
"uint16",
"SubTableCount",
None,
None,
"Number of SubTables for this lookup",
),
(
"Offset",
"SubTable",
"SubTableCount",
0,
"Array of offsets to SubTables-from beginning of Lookup table",
),
(
"uint16",
"MarkFilteringSet",
None,
"LookupFlag & 0x0010",
"If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.",
),
],
),
(
"CoverageFormat1",
[
("uint16", "CoverageFormat", None, None, "Format identifier-format = 1"),
("uint16", "GlyphCount", None, None, "Number of glyphs in the GlyphArray"),
(
"GlyphID",
"GlyphArray",
"GlyphCount",
0,
"Array of GlyphIDs-in numerical order",
),
],
),
(
"CoverageFormat2",
[
("uint16", "CoverageFormat", None, None, "Format identifier-format = 2"),
("uint16", "RangeCount", None, None, "Number of RangeRecords"),
(
"struct",
"RangeRecord",
"RangeCount",
0,
"Array of glyph ranges-ordered by Start GlyphID",
),
],
),
(
"RangeRecord",
[
("GlyphID", "Start", None, None, "First GlyphID in the range"),
("GlyphID", "End", None, None, "Last GlyphID in the range"),
(
"uint16",
"StartCoverageIndex",
None,
None,
"Coverage Index of first GlyphID in range",
),
],
),
(
"ClassDefFormat1",
[
("uint16", "ClassFormat", None, None, "Format identifier-format = 1"),
(
"GlyphID",
"StartGlyph",
None,
None,
"First GlyphID of the ClassValueArray",
),
("uint16", "GlyphCount", None, None, "Size of the ClassValueArray"),
(
"uint16",
"ClassValueArray",
"GlyphCount",
0,
"Array of Class Values-one per GlyphID",
),
],
),
(
"ClassDefFormat2",
[
("uint16", "ClassFormat", None, None, "Format identifier-format = 2"),
("uint16", "ClassRangeCount", None, None, "Number of ClassRangeRecords"),
(
"struct",
"ClassRangeRecord",
"ClassRangeCount",
0,
"Array of ClassRangeRecords-ordered by Start GlyphID",
),
],
),
(
"ClassRangeRecord",
[
("GlyphID", "Start", None, None, "First GlyphID in the range"),
("GlyphID", "End", None, None, "Last GlyphID in the range"),
("uint16", "Class", None, None, "Applied to all glyphs in the range"),
],
),
(
"Device",
[
("uint16", "StartSize", None, None, "Smallest size to correct-in ppem"),
("uint16", "EndSize", None, None, "Largest size to correct-in ppem"),
(
"uint16",
"DeltaFormat",
None,
None,
"Format of DeltaValue array data: 1, 2, or 3",
),
(
"DeltaValue",
"DeltaValue",
"",
"DeltaFormat in (1,2,3)",
"Array of compressed data",
),
],
),
#
# gpos
#
(
"GPOS",
[
(
"Version",
"Version",
None,
None,
"Version of the GPOS table- 0x00010000 or 0x00010001",
),
(
"Offset",
"ScriptList",
None,
None,
"Offset to ScriptList table-from beginning of GPOS table",
),
(
"Offset",
"FeatureList",
None,
None,
"Offset to FeatureList table-from beginning of GPOS table",
),
(
"Offset",
"LookupList",
None,
None,
"Offset to LookupList table-from beginning of GPOS table",
),
(
"LOffset",
"FeatureVariations",
None,
"Version >= 0x00010001",
"Offset to FeatureVariations table-from beginning of GPOS table",
),
],
),
(
"SinglePosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of SinglePos subtable",
),
(
"uint16",
"ValueFormat",
None,
None,
"Defines the types of data in the ValueRecord",
),
(
"ValueRecord",
"Value",
None,
None,
"Defines positioning value(s)-applied to all glyphs in the Coverage table",
),
],
),
(
"SinglePosFormat2",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of SinglePos subtable",
),
(
"uint16",
"ValueFormat",
None,
None,
"Defines the types of data in the ValueRecord",
),
("uint16", "ValueCount", None, None, "Number of ValueRecords"),
(
"ValueRecord",
"Value",
"ValueCount",
0,
"Array of ValueRecords-positioning values applied to glyphs",
),
],
),
(
"PairPosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair",
),
(
"uint16",
"ValueFormat1",
None,
None,
"Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)",
),
(
"uint16",
"ValueFormat2",
None,
None,
"Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)",
),
("uint16", "PairSetCount", None, None, "Number of PairSet tables"),
(
"Offset",
"PairSet",
"PairSetCount",
0,
"Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index",
),
],
),
(
"PairSet",
[
("uint16", "PairValueCount", None, None, "Number of PairValueRecords"),
(
"struct",
"PairValueRecord",
"PairValueCount",
0,
"Array of PairValueRecords-ordered by GlyphID of the second glyph",
),
],
),
(
"PairValueRecord",
[
(
"GlyphID",
"SecondGlyph",
None,
None,
"GlyphID of second glyph in the pair-first glyph is listed in the Coverage table",
),
(
"ValueRecord",
"Value1",
None,
None,
"Positioning data for the first glyph in the pair",
),
(
"ValueRecord",
"Value2",
None,
None,
"Positioning data for the second glyph in the pair",
),
],
),
(
"PairPosFormat2",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair",
),
(
"uint16",
"ValueFormat1",
None,
None,
"ValueRecord definition-for the first glyph of the pair-may be zero (0)",
),
(
"uint16",
"ValueFormat2",
None,
None,
"ValueRecord definition-for the second glyph of the pair-may be zero (0)",
),
(
"Offset",
"ClassDef1",
None,
None,
"Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair",
),
(
"Offset",
"ClassDef2",
None,
None,
"Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair",
),
(
"uint16",
"Class1Count",
None,
None,
"Number of classes in ClassDef1 table-includes Class0",
),
(
"uint16",
"Class2Count",
None,
None,
"Number of classes in ClassDef2 table-includes Class0",
),
(
"struct",
"Class1Record",
"Class1Count",
0,
"Array of Class1 records-ordered by Class1",
),
],
),
(
"Class1Record",
[
(
"struct",
"Class2Record",
"Class2Count",
0,
"Array of Class2 records-ordered by Class2",
),
],
),
(
"Class2Record",
[
(
"ValueRecord",
"Value1",
None,
None,
"Positioning for first glyph-empty if ValueFormat1 = 0",
),
(
"ValueRecord",
"Value2",
None,
None,
"Positioning for second glyph-empty if ValueFormat2 = 0",
),
],
),
(
"CursivePosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of CursivePos subtable",
),
("uint16", "EntryExitCount", None, None, "Number of EntryExit records"),
(
"struct",
"EntryExitRecord",
"EntryExitCount",
0,
"Array of EntryExit records-in Coverage Index order",
),
],
),
(
"EntryExitRecord",
[
(
"Offset",
"EntryAnchor",
None,
None,
"Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL",
),
(
"Offset",
"ExitAnchor",
None,
None,
"Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL",
),
],
),
(
"MarkBasePosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"MarkCoverage",
None,
None,
"Offset to MarkCoverage table-from beginning of MarkBasePos subtable",
),
(
"Offset",
"BaseCoverage",
None,
None,
"Offset to BaseCoverage table-from beginning of MarkBasePos subtable",
),
("uint16", "ClassCount", None, None, "Number of classes defined for marks"),
(
"Offset",
"MarkArray",
None,
None,
"Offset to MarkArray table-from beginning of MarkBasePos subtable",
),
(
"Offset",
"BaseArray",
None,
None,
"Offset to BaseArray table-from beginning of MarkBasePos subtable",
),
],
),
(
"BaseArray",
[
("uint16", "BaseCount", None, None, "Number of BaseRecords"),
(
"struct",
"BaseRecord",
"BaseCount",
0,
"Array of BaseRecords-in order of BaseCoverage Index",
),
],
),
(
"BaseRecord",
[
(
"Offset",
"BaseAnchor",
"ClassCount",
0,
"Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based",
),
],
),
(
"MarkLigPosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"MarkCoverage",
None,
None,
"Offset to Mark Coverage table-from beginning of MarkLigPos subtable",
),
(
"Offset",
"LigatureCoverage",
None,
None,
"Offset to Ligature Coverage table-from beginning of MarkLigPos subtable",
),
("uint16", "ClassCount", None, None, "Number of defined mark classes"),
(
"Offset",
"MarkArray",
None,
None,
"Offset to MarkArray table-from beginning of MarkLigPos subtable",
),
(
"Offset",
"LigatureArray",
None,
None,
"Offset to LigatureArray table-from beginning of MarkLigPos subtable",
),
],
),
(
"LigatureArray",
[
(
"uint16",
"LigatureCount",
None,
None,
"Number of LigatureAttach table offsets",
),
(
"Offset",
"LigatureAttach",
"LigatureCount",
0,
"Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index",
),
],
),
(
"LigatureAttach",
[
(
"uint16",
"ComponentCount",
None,
None,
"Number of ComponentRecords in this ligature",
),
(
"struct",
"ComponentRecord",
"ComponentCount",
0,
"Array of Component records-ordered in writing direction",
),
],
),
(
"ComponentRecord",
[
(
"Offset",
"LigatureAnchor",
"ClassCount",
0,
"Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array",
),
],
),
(
"MarkMarkPosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Mark1Coverage",
None,
None,
"Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable",
),
(
"Offset",
"Mark2Coverage",
None,
None,
"Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable",
),
(
"uint16",
"ClassCount",
None,
None,
"Number of Combining Mark classes defined",
),
(
"Offset",
"Mark1Array",
None,
None,
"Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable",
),
(
"Offset",
"Mark2Array",
None,
None,
"Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable",
),
],
),
(
"Mark2Array",
[
("uint16", "Mark2Count", None, None, "Number of Mark2 records"),
(
"struct",
"Mark2Record",
"Mark2Count",
0,
"Array of Mark2 records-in Coverage order",
),
],
),
(
"Mark2Record",
[
(
"Offset",
"Mark2Anchor",
"ClassCount",
0,
"Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array",
),
],
),
(
"PosLookupRecord",
[
(
"uint16",
"SequenceIndex",
None,
None,
"Index to input glyph sequence-first glyph = 0",
),
(
"uint16",
"LookupListIndex",
None,
None,
"Lookup to apply to that position-zero-based",
),
],
),
(
"ContextPosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of ContextPos subtable",
),
("uint16", "PosRuleSetCount", None, None, "Number of PosRuleSet tables"),
(
"Offset",
"PosRuleSet",
"PosRuleSetCount",
0,
"Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index",
),
],
),
(
"PosRuleSet",
[
("uint16", "PosRuleCount", None, None, "Number of PosRule tables"),
(
"Offset",
"PosRule",
"PosRuleCount",
0,
"Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference",
),
],
),
(
"PosRule",
[
(
"uint16",
"GlyphCount",
None,
None,
"Number of glyphs in the Input glyph sequence",
),
("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
(
"GlyphID",
"Input",
"GlyphCount",
-1,
"Array of input GlyphIDs-starting with the second glyph",
),
(
"struct",
"PosLookupRecord",
"PosCount",
0,
"Array of positioning lookups-in design order",
),
],
),
(
"ContextPosFormat2",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of ContextPos subtable",
),
(
"Offset",
"ClassDef",
None,
None,
"Offset to ClassDef table-from beginning of ContextPos subtable",
),
("uint16", "PosClassSetCount", None, None, "Number of PosClassSet tables"),
(
"Offset",
"PosClassSet",
"PosClassSetCount",
0,
"Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL",
),
],
),
(
"PosClassSet",
[
(
"uint16",
"PosClassRuleCount",
None,
None,
"Number of PosClassRule tables",
),
(
"Offset",
"PosClassRule",
"PosClassRuleCount",
0,
"Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference",
),
],
),
(
"PosClassRule",
[
("uint16", "GlyphCount", None, None, "Number of glyphs to be matched"),
("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
(
"uint16",
"Class",
"GlyphCount",
-1,
"Array of classes-beginning with the second class-to be matched to the input glyph sequence",
),
(
"struct",
"PosLookupRecord",
"PosCount",
0,
"Array of positioning lookups-in design order",
),
],
),
(
"ContextPosFormat3",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 3"),
(
"uint16",
"GlyphCount",
None,
None,
"Number of glyphs in the input sequence",
),
("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
(
"Offset",
"Coverage",
"GlyphCount",
0,
"Array of offsets to Coverage tables-from beginning of ContextPos subtable",
),
(
"struct",
"PosLookupRecord",
"PosCount",
0,
"Array of positioning lookups-in design order",
),
],
),
(
"ChainContextPosFormat1",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of ContextPos subtable",
),
(
"uint16",
"ChainPosRuleSetCount",
None,
None,
"Number of ChainPosRuleSet tables",
),
(
"Offset",
"ChainPosRuleSet",
"ChainPosRuleSetCount",
0,
"Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index",
),
],
),
(
"ChainPosRuleSet",
[
(
"uint16",
"ChainPosRuleCount",
None,
None,
"Number of ChainPosRule tables",
),
(
"Offset",
"ChainPosRule",
"ChainPosRuleCount",
0,
"Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference",
),
],
),
(
"ChainPosRule",
[
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
),
(
"GlyphID",
"Backtrack",
"BacktrackGlyphCount",
0,
"Array of backtracking GlyphID's (to be matched before the input sequence)",
),
(
"uint16",
"InputGlyphCount",
None,
None,
"Total number of glyphs in the input sequence (includes the first glyph)",
),
(
"GlyphID",
"Input",
"InputGlyphCount",
-1,
"Array of input GlyphIDs (start with second glyph)",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)",
),
(
"GlyphID",
"LookAhead",
"LookAheadGlyphCount",
0,
"Array of lookahead GlyphID's (to be matched after the input sequence)",
),
("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
(
"struct",
"PosLookupRecord",
"PosCount",
0,
"Array of PosLookupRecords (in design order)",
),
],
),
(
"ChainContextPosFormat2",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of ChainContextPos subtable",
),
(
"Offset",
"BacktrackClassDef",
None,
None,
"Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable",
),
(
"Offset",
"InputClassDef",
None,
None,
"Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable",
),
(
"Offset",
"LookAheadClassDef",
None,
None,
"Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable",
),
(
"uint16",
"ChainPosClassSetCount",
None,
None,
"Number of ChainPosClassSet tables",
),
(
"Offset",
"ChainPosClassSet",
"ChainPosClassSetCount",
0,
"Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL",
),
],
),
(
"ChainPosClassSet",
[
(
"uint16",
"ChainPosClassRuleCount",
None,
None,
"Number of ChainPosClassRule tables",
),
(
"Offset",
"ChainPosClassRule",
"ChainPosClassRuleCount",
0,
"Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference",
),
],
),
(
"ChainPosClassRule",
[
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
),
(
"uint16",
"Backtrack",
"BacktrackGlyphCount",
0,
"Array of backtracking classes(to be matched before the input sequence)",
),
(
"uint16",
"InputGlyphCount",
None,
None,
"Total number of classes in the input sequence (includes the first class)",
),
(
"uint16",
"Input",
"InputGlyphCount",
-1,
"Array of input classes(start with second class; to be matched with the input glyph sequence)",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)",
),
(
"uint16",
"LookAhead",
"LookAheadGlyphCount",
0,
"Array of lookahead classes(to be matched after the input sequence)",
),
("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
(
"struct",
"PosLookupRecord",
"PosCount",
0,
"Array of PosLookupRecords (in design order)",
),
],
),
(
"ChainContextPosFormat3",
[
("uint16", "PosFormat", None, None, "Format identifier-format = 3"),
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Number of glyphs in the backtracking sequence",
),
(
"Offset",
"BacktrackCoverage",
"BacktrackGlyphCount",
0,
"Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
),
(
"uint16",
"InputGlyphCount",
None,
None,
"Number of glyphs in input sequence",
),
(
"Offset",
"InputCoverage",
"InputGlyphCount",
0,
"Array of offsets to coverage tables in input sequence, in glyph sequence order",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Number of glyphs in lookahead sequence",
),
(
"Offset",
"LookAheadCoverage",
"LookAheadGlyphCount",
0,
"Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
),
("uint16", "PosCount", None, None, "Number of PosLookupRecords"),
(
"struct",
"PosLookupRecord",
"PosCount",
0,
"Array of PosLookupRecords,in design order",
),
],
),
(
"ExtensionPosFormat1",
[
("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."),
(
"uint16",
"ExtensionLookupType",
None,
None,
"Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).",
),
("LOffset", "ExtSubTable", None, None, "Offset to SubTable"),
],
),
# ('ValueRecord', [
# ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'),
# ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'),
# ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'),
# ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'),
# ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'),
# ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'),
# ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'),
# ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'),
# ]),
(
"AnchorFormat1",
[
("uint16", "AnchorFormat", None, None, "Format identifier-format = 1"),
("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
("int16", "YCoordinate", None, None, "Vertical value-in design units"),
],
),
(
"AnchorFormat2",
[
("uint16", "AnchorFormat", None, None, "Format identifier-format = 2"),
("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
("int16", "YCoordinate", None, None, "Vertical value-in design units"),
("uint16", "AnchorPoint", None, None, "Index to glyph contour point"),
],
),
(
"AnchorFormat3",
[
("uint16", "AnchorFormat", None, None, "Format identifier-format = 3"),
("int16", "XCoordinate", None, None, "Horizontal value-in design units"),
("int16", "YCoordinate", None, None, "Vertical value-in design units"),
(
"Offset",
"XDeviceTable",
None,
None,
"Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)",
),
(
"Offset",
"YDeviceTable",
None,
None,
"Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)",
),
],
),
(
"MarkArray",
[
("uint16", "MarkCount", None, None, "Number of MarkRecords"),
(
"struct",
"MarkRecord",
"MarkCount",
0,
"Array of MarkRecords-in Coverage order",
),
],
),
(
"MarkRecord",
[
("uint16", "Class", None, None, "Class defined for this mark"),
(
"Offset",
"MarkAnchor",
None,
None,
"Offset to Anchor table-from beginning of MarkArray table",
),
],
),
#
# gsub
#
(
"GSUB",
[
(
"Version",
"Version",
None,
None,
"Version of the GSUB table- 0x00010000 or 0x00010001",
),
(
"Offset",
"ScriptList",
None,
None,
"Offset to ScriptList table-from beginning of GSUB table",
),
(
"Offset",
"FeatureList",
None,
None,
"Offset to FeatureList table-from beginning of GSUB table",
),
(
"Offset",
"LookupList",
None,
None,
"Offset to LookupList table-from beginning of GSUB table",
),
(
"LOffset",
"FeatureVariations",
None,
"Version >= 0x00010001",
"Offset to FeatureVariations table-from beginning of GSUB table",
),
],
),
(
"SingleSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"uint16",
"DeltaGlyphID",
None,
None,
"Add to original GlyphID modulo 65536 to get substitute GlyphID",
),
],
),
(
"SingleSubstFormat2",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"uint16",
"GlyphCount",
None,
None,
"Number of GlyphIDs in the Substitute array",
),
(
"GlyphID",
"Substitute",
"GlyphCount",
0,
"Array of substitute GlyphIDs-ordered by Coverage Index",
),
],
),
(
"MultipleSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"uint16",
"SequenceCount",
None,
None,
"Number of Sequence table offsets in the Sequence array",
),
(
"Offset",
"Sequence",
"SequenceCount",
0,
"Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index",
),
],
),
(
"Sequence",
[
(
"uint16",
"GlyphCount",
None,
None,
"Number of GlyphIDs in the Substitute array. This should always be greater than 0.",
),
(
"GlyphID",
"Substitute",
"GlyphCount",
0,
"String of GlyphIDs to substitute",
),
],
),
(
"AlternateSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"uint16",
"AlternateSetCount",
None,
None,
"Number of AlternateSet tables",
),
(
"Offset",
"AlternateSet",
"AlternateSetCount",
0,
"Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index",
),
],
),
(
"AlternateSet",
[
(
"uint16",
"GlyphCount",
None,
None,
"Number of GlyphIDs in the Alternate array",
),
(
"GlyphID",
"Alternate",
"GlyphCount",
0,
"Array of alternate GlyphIDs-in arbitrary order",
),
],
),
(
"LigatureSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
("uint16", "LigSetCount", None, None, "Number of LigatureSet tables"),
(
"Offset",
"LigatureSet",
"LigSetCount",
0,
"Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index",
),
],
),
(
"LigatureSet",
[
("uint16", "LigatureCount", None, None, "Number of Ligature tables"),
(
"Offset",
"Ligature",
"LigatureCount",
0,
"Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference",
),
],
),
(
"Ligature",
[
("GlyphID", "LigGlyph", None, None, "GlyphID of ligature to substitute"),
("uint16", "CompCount", None, None, "Number of components in the ligature"),
(
"GlyphID",
"Component",
"CompCount",
-1,
"Array of component GlyphIDs-start with the second component-ordered in writing direction",
),
],
),
(
"SubstLookupRecord",
[
(
"uint16",
"SequenceIndex",
None,
None,
"Index into current glyph sequence-first glyph = 0",
),
(
"uint16",
"LookupListIndex",
None,
None,
"Lookup to apply to that position-zero-based",
),
],
),
(
"ContextSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"uint16",
"SubRuleSetCount",
None,
None,
"Number of SubRuleSet tables-must equal GlyphCount in Coverage table",
),
(
"Offset",
"SubRuleSet",
"SubRuleSetCount",
0,
"Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index",
),
],
),
(
"SubRuleSet",
[
("uint16", "SubRuleCount", None, None, "Number of SubRule tables"),
(
"Offset",
"SubRule",
"SubRuleCount",
0,
"Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference",
),
],
),
(
"SubRule",
[
(
"uint16",
"GlyphCount",
None,
None,
"Total number of glyphs in input glyph sequence-includes the first glyph",
),
("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
(
"GlyphID",
"Input",
"GlyphCount",
-1,
"Array of input GlyphIDs-start with second glyph",
),
(
"struct",
"SubstLookupRecord",
"SubstCount",
0,
"Array of SubstLookupRecords-in design order",
),
],
),
(
"ContextSubstFormat2",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"Offset",
"ClassDef",
None,
None,
"Offset to glyph ClassDef table-from beginning of Substitution table",
),
("uint16", "SubClassSetCount", None, None, "Number of SubClassSet tables"),
(
"Offset",
"SubClassSet",
"SubClassSetCount",
0,
"Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL",
),
],
),
(
"SubClassSet",
[
(
"uint16",
"SubClassRuleCount",
None,
None,
"Number of SubClassRule tables",
),
(
"Offset",
"SubClassRule",
"SubClassRuleCount",
0,
"Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference",
),
],
),
(
"SubClassRule",
[
(
"uint16",
"GlyphCount",
None,
None,
"Total number of classes specified for the context in the rule-includes the first class",
),
("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
(
"uint16",
"Class",
"GlyphCount",
-1,
"Array of classes-beginning with the second class-to be matched to the input glyph class sequence",
),
(
"struct",
"SubstLookupRecord",
"SubstCount",
0,
"Array of Substitution lookups-in design order",
),
],
),
(
"ContextSubstFormat3",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 3"),
(
"uint16",
"GlyphCount",
None,
None,
"Number of glyphs in the input glyph sequence",
),
("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
(
"Offset",
"Coverage",
"GlyphCount",
0,
"Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order",
),
(
"struct",
"SubstLookupRecord",
"SubstCount",
0,
"Array of SubstLookupRecords-in design order",
),
],
),
(
"ChainContextSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"uint16",
"ChainSubRuleSetCount",
None,
None,
"Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table",
),
(
"Offset",
"ChainSubRuleSet",
"ChainSubRuleSetCount",
0,
"Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index",
),
],
),
(
"ChainSubRuleSet",
[
(
"uint16",
"ChainSubRuleCount",
None,
None,
"Number of ChainSubRule tables",
),
(
"Offset",
"ChainSubRule",
"ChainSubRuleCount",
0,
"Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference",
),
],
),
(
"ChainSubRule",
[
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
),
(
"GlyphID",
"Backtrack",
"BacktrackGlyphCount",
0,
"Array of backtracking GlyphID's (to be matched before the input sequence)",
),
(
"uint16",
"InputGlyphCount",
None,
None,
"Total number of glyphs in the input sequence (includes the first glyph)",
),
(
"GlyphID",
"Input",
"InputGlyphCount",
-1,
"Array of input GlyphIDs (start with second glyph)",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)",
),
(
"GlyphID",
"LookAhead",
"LookAheadGlyphCount",
0,
"Array of lookahead GlyphID's (to be matched after the input sequence)",
),
("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
(
"struct",
"SubstLookupRecord",
"SubstCount",
0,
"Array of SubstLookupRecords (in design order)",
),
],
),
(
"ChainContextSubstFormat2",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 2"),
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table-from beginning of Substitution table",
),
(
"Offset",
"BacktrackClassDef",
None,
None,
"Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table",
),
(
"Offset",
"InputClassDef",
None,
None,
"Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table",
),
(
"Offset",
"LookAheadClassDef",
None,
None,
"Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table",
),
(
"uint16",
"ChainSubClassSetCount",
None,
None,
"Number of ChainSubClassSet tables",
),
(
"Offset",
"ChainSubClassSet",
"ChainSubClassSetCount",
0,
"Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL",
),
],
),
(
"ChainSubClassSet",
[
(
"uint16",
"ChainSubClassRuleCount",
None,
None,
"Number of ChainSubClassRule tables",
),
(
"Offset",
"ChainSubClassRule",
"ChainSubClassRuleCount",
0,
"Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference",
),
],
),
(
"ChainSubClassRule",
[
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)",
),
(
"uint16",
"Backtrack",
"BacktrackGlyphCount",
0,
"Array of backtracking classes(to be matched before the input sequence)",
),
(
"uint16",
"InputGlyphCount",
None,
None,
"Total number of classes in the input sequence (includes the first class)",
),
(
"uint16",
"Input",
"InputGlyphCount",
-1,
"Array of input classes(start with second class; to be matched with the input glyph sequence)",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)",
),
(
"uint16",
"LookAhead",
"LookAheadGlyphCount",
0,
"Array of lookahead classes(to be matched after the input sequence)",
),
("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
(
"struct",
"SubstLookupRecord",
"SubstCount",
0,
"Array of SubstLookupRecords (in design order)",
),
],
),
(
"ChainContextSubstFormat3",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 3"),
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Number of glyphs in the backtracking sequence",
),
(
"Offset",
"BacktrackCoverage",
"BacktrackGlyphCount",
0,
"Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
),
(
"uint16",
"InputGlyphCount",
None,
None,
"Number of glyphs in input sequence",
),
(
"Offset",
"InputCoverage",
"InputGlyphCount",
0,
"Array of offsets to coverage tables in input sequence, in glyph sequence order",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Number of glyphs in lookahead sequence",
),
(
"Offset",
"LookAheadCoverage",
"LookAheadGlyphCount",
0,
"Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
),
("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"),
(
"struct",
"SubstLookupRecord",
"SubstCount",
0,
"Array of SubstLookupRecords, in design order",
),
],
),
(
"ExtensionSubstFormat1",
[
("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."),
(
"uint16",
"ExtensionLookupType",
None,
None,
"Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).",
),
(
"LOffset",
"ExtSubTable",
None,
None,
"Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)",
),
],
),
(
"ReverseChainSingleSubstFormat1",
[
("uint16", "SubstFormat", None, None, "Format identifier-format = 1"),
(
"Offset",
"Coverage",
None,
0,
"Offset to Coverage table - from beginning of Substitution table",
),
(
"uint16",
"BacktrackGlyphCount",
None,
None,
"Number of glyphs in the backtracking sequence",
),
(
"Offset",
"BacktrackCoverage",
"BacktrackGlyphCount",
0,
"Array of offsets to coverage tables in backtracking sequence, in glyph sequence order",
),
(
"uint16",
"LookAheadGlyphCount",
None,
None,
"Number of glyphs in lookahead sequence",
),
(
"Offset",
"LookAheadCoverage",
"LookAheadGlyphCount",
0,
"Array of offsets to coverage tables in lookahead sequence, in glyph sequence order",
),
(
"uint16",
"GlyphCount",
None,
None,
"Number of GlyphIDs in the Substitute array",
),
(
"GlyphID",
"Substitute",
"GlyphCount",
0,
"Array of substitute GlyphIDs-ordered by Coverage index",
),
],
),
#
# gdef
#
(
"GDEF",
[
(
"Version",
"Version",
None,
None,
"Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003",
),
(
"Offset",
"GlyphClassDef",
None,
None,
"Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)",
),
(
"Offset",
"AttachList",
None,
None,
"Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)",
),
(
"Offset",
"LigCaretList",
None,
None,
"Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)",
),
(
"Offset",
"MarkAttachClassDef",
None,
None,
"Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)",
),
(
"Offset",
"MarkGlyphSetsDef",
None,
"Version >= 0x00010002",
"Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)",
),
(
"LOffset",
"VarStore",
None,
"Version >= 0x00010003",
"Offset to variation store (may be NULL)",
),
],
),
(
"AttachList",
[
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table - from beginning of AttachList table",
),
(
"uint16",
"GlyphCount",
None,
None,
"Number of glyphs with attachment points",
),
(
"Offset",
"AttachPoint",
"GlyphCount",
0,
"Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order",
),
],
),
(
"AttachPoint",
[
(
"uint16",
"PointCount",
None,
None,
"Number of attachment points on this glyph",
),
(
"uint16",
"PointIndex",
"PointCount",
0,
"Array of contour point indices -in increasing numerical order",
),
],
),
(
"LigCaretList",
[
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table - from beginning of LigCaretList table",
),
("uint16", "LigGlyphCount", None, None, "Number of ligature glyphs"),
(
"Offset",
"LigGlyph",
"LigGlyphCount",
0,
"Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order",
),
],
),
(
"LigGlyph",
[
(
"uint16",
"CaretCount",
None,
None,
"Number of CaretValues for this ligature (components - 1)",
),
(
"Offset",
"CaretValue",
"CaretCount",
0,
"Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order",
),
],
),
(
"CaretValueFormat1",
[
("uint16", "CaretValueFormat", None, None, "Format identifier-format = 1"),
("int16", "Coordinate", None, None, "X or Y value, in design units"),
],
),
(
"CaretValueFormat2",
[
("uint16", "CaretValueFormat", None, None, "Format identifier-format = 2"),
("uint16", "CaretValuePoint", None, None, "Contour point index on glyph"),
],
),
(
"CaretValueFormat3",
[
("uint16", "CaretValueFormat", None, None, "Format identifier-format = 3"),
("int16", "Coordinate", None, None, "X or Y value, in design units"),
(
"Offset",
"DeviceTable",
None,
None,
"Offset to Device table for X or Y value-from beginning of CaretValue table",
),
],
),
(
"MarkGlyphSetsDef",
[
("uint16", "MarkSetTableFormat", None, None, "Format identifier == 1"),
("uint16", "MarkSetCount", None, None, "Number of mark sets defined"),
(
"LOffset",
"Coverage",
"MarkSetCount",
0,
"Array of offsets to mark set coverage tables.",
),
],
),
#
# base
#
(
"BASE",
[
(
"Version",
"Version",
None,
None,
"Version of the BASE table-initially 0x00010000",
),
(
"Offset",
"HorizAxis",
None,
None,
"Offset to horizontal Axis table-from beginning of BASE table-may be NULL",
),
(
"Offset",
"VertAxis",
None,
None,
"Offset to vertical Axis table-from beginning of BASE table-may be NULL",
),
(
"LOffset",
"VarStore",
None,
"Version >= 0x00010001",
"Offset to variation store (may be NULL)",
),
],
),
(
"Axis",
[
(
"Offset",
"BaseTagList",
None,
None,
"Offset to BaseTagList table-from beginning of Axis table-may be NULL",
),
(
"Offset",
"BaseScriptList",
None,
None,
"Offset to BaseScriptList table-from beginning of Axis table",
),
],
),
(
"BaseTagList",
[
(
"uint16",
"BaseTagCount",
None,
None,
"Number of baseline identification tags in this text direction-may be zero (0)",
),
(
"Tag",
"BaselineTag",
"BaseTagCount",
0,
"Array of 4-byte baseline identification tags-must be in alphabetical order",
),
],
),
(
"BaseScriptList",
[
(
"uint16",
"BaseScriptCount",
None,
None,
"Number of BaseScriptRecords defined",
),
(
"struct",
"BaseScriptRecord",
"BaseScriptCount",
0,
"Array of BaseScriptRecords-in alphabetical order by BaseScriptTag",
),
],
),
(
"BaseScriptRecord",
[
("Tag", "BaseScriptTag", None, None, "4-byte script identification tag"),
(
"Offset",
"BaseScript",
None,
None,
"Offset to BaseScript table-from beginning of BaseScriptList",
),
],
),
(
"BaseScript",
[
(
"Offset",
"BaseValues",
None,
None,
"Offset to BaseValues table-from beginning of BaseScript table-may be NULL",
),
(
"Offset",
"DefaultMinMax",
None,
None,
"Offset to MinMax table- from beginning of BaseScript table-may be NULL",
),
(
"uint16",
"BaseLangSysCount",
None,
None,
"Number of BaseLangSysRecords defined-may be zero (0)",
),
(
"struct",
"BaseLangSysRecord",
"BaseLangSysCount",
0,
"Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag",
),
],
),
(
"BaseLangSysRecord",
[
(
"Tag",
"BaseLangSysTag",
None,
None,
"4-byte language system identification tag",
),
(
"Offset",
"MinMax",
None,
None,
"Offset to MinMax table-from beginning of BaseScript table",
),
],
),
(
"BaseValues",
[
(
"uint16",
"DefaultIndex",
None,
None,
"Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList",
),
(
"uint16",
"BaseCoordCount",
None,
None,
"Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList",
),
(
"Offset",
"BaseCoord",
"BaseCoordCount",
0,
"Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList",
),
],
),
(
"MinMax",
[
(
"Offset",
"MinCoord",
None,
None,
"Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL",
),
(
"Offset",
"MaxCoord",
None,
None,
"Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL",
),
(
"uint16",
"FeatMinMaxCount",
None,
None,
"Number of FeatMinMaxRecords-may be zero (0)",
),
(
"struct",
"FeatMinMaxRecord",
"FeatMinMaxCount",
0,
"Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag",
),
],
),
(
"FeatMinMaxRecord",
[
(
"Tag",
"FeatureTableTag",
None,
None,
"4-byte feature identification tag-must match FeatureTag in FeatureList",
),
(
"Offset",
"MinCoord",
None,
None,
"Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL",
),
(
"Offset",
"MaxCoord",
None,
None,
"Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL",
),
],
),
(
"BaseCoordFormat1",
[
("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 1"),
("int16", "Coordinate", None, None, "X or Y value, in design units"),
],
),
(
"BaseCoordFormat2",
[
("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 2"),
("int16", "Coordinate", None, None, "X or Y value, in design units"),
("GlyphID", "ReferenceGlyph", None, None, "GlyphID of control glyph"),
(
"uint16",
"BaseCoordPoint",
None,
None,
"Index of contour point on the ReferenceGlyph",
),
],
),
(
"BaseCoordFormat3",
[
("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 3"),
("int16", "Coordinate", None, None, "X or Y value, in design units"),
(
"Offset",
"DeviceTable",
None,
None,
"Offset to Device table for X or Y value",
),
],
),
#
# jstf
#
(
"JSTF",
[
(
"Version",
"Version",
None,
None,
"Version of the JSTF table-initially set to 0x00010000",
),
(
"uint16",
"JstfScriptCount",
None,
None,
"Number of JstfScriptRecords in this table",
),
(
"struct",
"JstfScriptRecord",
"JstfScriptCount",
0,
"Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag",
),
],
),
(
"JstfScriptRecord",
[
("Tag", "JstfScriptTag", None, None, "4-byte JstfScript identification"),
(
"Offset",
"JstfScript",
None,
None,
"Offset to JstfScript table-from beginning of JSTF Header",
),
],
),
(
"JstfScript",
[
(
"Offset",
"ExtenderGlyph",
None,
None,
"Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL",
),
(
"Offset",
"DefJstfLangSys",
None,
None,
"Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL",
),
(
"uint16",
"JstfLangSysCount",
None,
None,
"Number of JstfLangSysRecords in this table- may be zero (0)",
),
(
"struct",
"JstfLangSysRecord",
"JstfLangSysCount",
0,
"Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag",
),
],
),
(
"JstfLangSysRecord",
[
("Tag", "JstfLangSysTag", None, None, "4-byte JstfLangSys identifier"),
(
"Offset",
"JstfLangSys",
None,
None,
"Offset to JstfLangSys table-from beginning of JstfScript table",
),
],
),
(
"ExtenderGlyph",
[
(
"uint16",
"GlyphCount",
None,
None,
"Number of Extender Glyphs in this script",
),
(
"GlyphID",
"ExtenderGlyph",
"GlyphCount",
0,
"GlyphIDs-in increasing numerical order",
),
],
),
(
"JstfLangSys",
[
(
"uint16",
"JstfPriorityCount",
None,
None,
"Number of JstfPriority tables",
),
(
"Offset",
"JstfPriority",
"JstfPriorityCount",
0,
"Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order",
),
],
),
(
"JstfPriority",
[
(
"Offset",
"ShrinkageEnableGSUB",
None,
None,
"Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
),
(
"Offset",
"ShrinkageDisableGSUB",
None,
None,
"Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
),
(
"Offset",
"ShrinkageEnableGPOS",
None,
None,
"Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL",
),
(
"Offset",
"ShrinkageDisableGPOS",
None,
None,
"Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL",
),
(
"Offset",
"ShrinkageJstfMax",
None,
None,
"Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL",
),
(
"Offset",
"ExtensionEnableGSUB",
None,
None,
"Offset to Extension Enable JstfGSUBModList table-may be NULL",
),
(
"Offset",
"ExtensionDisableGSUB",
None,
None,
"Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
),
(
"Offset",
"ExtensionEnableGPOS",
None,
None,
"Offset to Extension Enable JstfGSUBModList table-may be NULL",
),
(
"Offset",
"ExtensionDisableGPOS",
None,
None,
"Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL",
),
(
"Offset",
"ExtensionJstfMax",
None,
None,
"Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL",
),
],
),
(
"JstfGSUBModList",
[
(
"uint16",
"LookupCount",
None,
None,
"Number of lookups for this modification",
),
(
"uint16",
"GSUBLookupIndex",
"LookupCount",
0,
"Array of LookupIndex identifiers in GSUB-in increasing numerical order",
),
],
),
(
"JstfGPOSModList",
[
(
"uint16",
"LookupCount",
None,
None,
"Number of lookups for this modification",
),
(
"uint16",
"GPOSLookupIndex",
"LookupCount",
0,
"Array of LookupIndex identifiers in GPOS-in increasing numerical order",
),
],
),
(
"JstfMax",
[
(
"uint16",
"LookupCount",
None,
None,
"Number of lookup Indices for this modification",
),
(
"Offset",
"Lookup",
"LookupCount",
0,
"Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order",
),
],
),
#
# STAT
#
(
"STAT",
[
(
"Version",
"Version",
None,
None,
"Version of the table-initially set to 0x00010000, currently 0x00010002.",
),
(
"uint16",
"DesignAxisRecordSize",
None,
None,
"Size in bytes of each design axis record",
),
("uint16", "DesignAxisCount", None, None, "Number of design axis records"),
(
"LOffsetTo(AxisRecordArray)",
"DesignAxisRecord",
None,
None,
"Offset in bytes from the beginning of the STAT table to the start of the design axes array",
),
("uint16", "AxisValueCount", None, None, "Number of axis value tables"),
(
"LOffsetTo(AxisValueArray)",
"AxisValueArray",
None,
None,
"Offset in bytes from the beginning of the STAT table to the start of the axes value offset array",
),
(
"NameID",
"ElidedFallbackNameID",
None,
"Version >= 0x00010001",
"NameID to use when all style attributes are elided.",
),
],
),
(
"AxisRecordArray",
[
("AxisRecord", "Axis", "DesignAxisCount", 0, "Axis records"),
],
),
(
"AxisRecord",
[
(
"Tag",
"AxisTag",
None,
None,
"A tag identifying the axis of design variation",
),
(
"NameID",
"AxisNameID",
None,
None,
'The name ID for entries in the "name" table that provide a display string for this axis',
),
(
"uint16",
"AxisOrdering",
None,
None,
"A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names",
),
(
"uint8",
"MoreBytes",
"DesignAxisRecordSize",
-8,
"Extra bytes. Set to empty array.",
),
],
),
(
"AxisValueArray",
[
("Offset", "AxisValue", "AxisValueCount", 0, "Axis values"),
],
),
(
"AxisValueFormat1",
[
("uint16", "Format", None, None, "Format, = 1"),
(
"uint16",
"AxisIndex",
None,
None,
"Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
),
("STATFlags", "Flags", None, None, "Flags."),
("NameID", "ValueNameID", None, None, ""),
("Fixed", "Value", None, None, ""),
],
),
(
"AxisValueFormat2",
[
("uint16", "Format", None, None, "Format, = 2"),
(
"uint16",
"AxisIndex",
None,
None,
"Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
),
("STATFlags", "Flags", None, None, "Flags."),
("NameID", "ValueNameID", None, None, ""),
("Fixed", "NominalValue", None, None, ""),
("Fixed", "RangeMinValue", None, None, ""),
("Fixed", "RangeMaxValue", None, None, ""),
],
),
(
"AxisValueFormat3",
[
("uint16", "Format", None, None, "Format, = 3"),
(
"uint16",
"AxisIndex",
None,
None,
"Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
),
("STATFlags", "Flags", None, None, "Flags."),
("NameID", "ValueNameID", None, None, ""),
("Fixed", "Value", None, None, ""),
("Fixed", "LinkedValue", None, None, ""),
],
),
(
"AxisValueFormat4",
[
("uint16", "Format", None, None, "Format, = 4"),
(
"uint16",
"AxisCount",
None,
None,
"The total number of axes contributing to this axis-values combination.",
),
("STATFlags", "Flags", None, None, "Flags."),
("NameID", "ValueNameID", None, None, ""),
(
"struct",
"AxisValueRecord",
"AxisCount",
0,
"Array of AxisValue records that provide the combination of axis values, one for each contributing axis. ",
),
],
),
(
"AxisValueRecord",
[
(
"uint16",
"AxisIndex",
None,
None,
"Index into the axis record array identifying the axis of design variation to which the axis value record applies.",
),
("Fixed", "Value", None, None, "A numeric value for this attribute value."),
],
),
#
# Variation fonts
#
# GSUB/GPOS FeatureVariations
(
"FeatureVariations",
[
(
"Version",
"Version",
None,
None,
"Version of the table-initially set to 0x00010000",
),
(
"uint32",
"FeatureVariationCount",
None,
None,
"Number of records in the FeatureVariationRecord array",
),
(
"struct",
"FeatureVariationRecord",
"FeatureVariationCount",
0,
"Array of FeatureVariationRecord",
),
],
),
(
"FeatureVariationRecord",
[
(
"LOffset",
"ConditionSet",
None,
None,
"Offset to a ConditionSet table, from beginning of the FeatureVariations table.",
),
(
"LOffset",
"FeatureTableSubstitution",
None,
None,
"Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table",
),
],
),
(
"ConditionSet",
[
(
"uint16",
"ConditionCount",
None,
None,
"Number of condition tables in the ConditionTable array",
),
(
"LOffset",
"ConditionTable",
"ConditionCount",
0,
"Array of condition tables.",
),
],
),
(
"ConditionTableFormat1",
[
("uint16", "Format", None, None, "Format, = 1"),
(
"uint16",
"AxisIndex",
None,
None,
"Index for the variation axis within the fvar table, base 0.",
),
(
"F2Dot14",
"FilterRangeMinValue",
None,
None,
"Minimum normalized axis value of the font variation instances that satisfy this condition.",
),
(
"F2Dot14",
"FilterRangeMaxValue",
None,
None,
"Maximum value that satisfies this condition.",
),
],
),
(
"FeatureTableSubstitution",
[
(
"Version",
"Version",
None,
None,
"Version of the table-initially set to 0x00010000",
),
(
"uint16",
"SubstitutionCount",
None,
None,
"Number of records in the FeatureVariationRecords array",
),
(
"FeatureTableSubstitutionRecord",
"SubstitutionRecord",
"SubstitutionCount",
0,
"Array of FeatureTableSubstitutionRecord",
),
],
),
(
"FeatureTableSubstitutionRecord",
[
("uint16", "FeatureIndex", None, None, "The feature table index to match."),
(
"LOffset",
"Feature",
None,
None,
"Offset to an alternate feature table, from start of the FeatureTableSubstitution table.",
),
],
),
# VariationStore
(
"VarRegionAxis",
[
("F2Dot14", "StartCoord", None, None, ""),
("F2Dot14", "PeakCoord", None, None, ""),
("F2Dot14", "EndCoord", None, None, ""),
],
),
(
"VarRegion",
[
("struct", "VarRegionAxis", "RegionAxisCount", 0, ""),
],
),
(
"VarRegionList",
[
("uint16", "RegionAxisCount", None, None, ""),
("uint16", "RegionCount", None, None, ""),
("VarRegion", "Region", "RegionCount", 0, ""),
],
),
(
"VarData",
[
("uint16", "ItemCount", None, None, ""),
("uint16", "NumShorts", None, None, ""),
("uint16", "VarRegionCount", None, None, ""),
("uint16", "VarRegionIndex", "VarRegionCount", 0, ""),
("VarDataValue", "Item", "ItemCount", 0, ""),
],
),
(
"VarStore",
[
("uint16", "Format", None, None, "Set to 1."),
("LOffset", "VarRegionList", None, None, ""),
("uint16", "VarDataCount", None, None, ""),
("LOffset", "VarData", "VarDataCount", 0, ""),
],
),
# Variation helpers
(
"VarIdxMap",
[
("uint16", "EntryFormat", None, None, ""), # Automatically computed
("uint16", "MappingCount", None, None, ""), # Automatically computed
("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
],
),
(
"DeltaSetIndexMapFormat0",
[
("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 0"),
("uint8", "EntryFormat", None, None, ""), # Automatically computed
("uint16", "MappingCount", None, None, ""), # Automatically computed
("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
],
),
(
"DeltaSetIndexMapFormat1",
[
("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 1"),
("uint8", "EntryFormat", None, None, ""), # Automatically computed
("uint32", "MappingCount", None, None, ""), # Automatically computed
("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"),
],
),
# Glyph advance variations
(
"HVAR",
[
(
"Version",
"Version",
None,
None,
"Version of the HVAR table-initially = 0x00010000",
),
("LOffset", "VarStore", None, None, ""),
("LOffsetTo(VarIdxMap)", "AdvWidthMap", None, None, ""),
("LOffsetTo(VarIdxMap)", "LsbMap", None, None, ""),
("LOffsetTo(VarIdxMap)", "RsbMap", None, None, ""),
],
),
(
"VVAR",
[
(
"Version",
"Version",
None,
None,
"Version of the VVAR table-initially = 0x00010000",
),
("LOffset", "VarStore", None, None, ""),
("LOffsetTo(VarIdxMap)", "AdvHeightMap", None, None, ""),
("LOffsetTo(VarIdxMap)", "TsbMap", None, None, ""),
("LOffsetTo(VarIdxMap)", "BsbMap", None, None, ""),
("LOffsetTo(VarIdxMap)", "VOrgMap", None, None, "Vertical origin mapping."),
],
),
# Font-wide metrics variations
(
"MetricsValueRecord",
[
("Tag", "ValueTag", None, None, "4-byte font-wide measure identifier"),
("uint32", "VarIdx", None, None, "Combined outer-inner variation index"),
(
"uint8",
"MoreBytes",
"ValueRecordSize",
-8,
"Extra bytes. Set to empty array.",
),
],
),
(
"MVAR",
[
(
"Version",
"Version",
None,
None,
"Version of the MVAR table-initially = 0x00010000",
),
("uint16", "Reserved", None, None, "Set to 0"),
("uint16", "ValueRecordSize", None, None, ""),
("uint16", "ValueRecordCount", None, None, ""),
("Offset", "VarStore", None, None, ""),
("MetricsValueRecord", "ValueRecord", "ValueRecordCount", 0, ""),
],
),
#
# math
#
(
"MATH",
[
(
"Version",
"Version",
None,
None,
"Version of the MATH table-initially set to 0x00010000.",
),
(
"Offset",
"MathConstants",
None,
None,
"Offset to MathConstants table - from the beginning of MATH table.",
),
(
"Offset",
"MathGlyphInfo",
None,
None,
"Offset to MathGlyphInfo table - from the beginning of MATH table.",
),
(
"Offset",
"MathVariants",
None,
None,
"Offset to MathVariants table - from the beginning of MATH table.",
),
],
),
(
"MathValueRecord",
[
("int16", "Value", None, None, "The X or Y value in design units."),
(
"Offset",
"DeviceTable",
None,
None,
"Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.",
),
],
),
(
"MathConstants",
[
(
"int16",
"ScriptPercentScaleDown",
None,
None,
"Percentage of scaling down for script level 1. Suggested value: 80%.",
),
(
"int16",
"ScriptScriptPercentScaleDown",
None,
None,
"Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.",
),
(
"uint16",
"DelimitedSubFormulaMinHeight",
None,
None,
"Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.",
),
(
"uint16",
"DisplayOperatorMinHeight",
None,
None,
"Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.",
),
(
"MathValueRecord",
"MathLeading",
None,
None,
"White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.",
),
("MathValueRecord", "AxisHeight", None, None, "Axis height of the font."),
(
"MathValueRecord",
"AccentBaseHeight",
None,
None,
"Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.",
),
(
"MathValueRecord",
"FlattenedAccentBaseHeight",
None,
None,
"Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).",
),
(
"MathValueRecord",
"SubscriptShiftDown",
None,
None,
"The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.",
),
(
"MathValueRecord",
"SubscriptTopMax",
None,
None,
"Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.",
),
(
"MathValueRecord",
"SubscriptBaselineDropMin",
None,
None,
"Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.",
),
(
"MathValueRecord",
"SuperscriptShiftUp",
None,
None,
"Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.",
),
(
"MathValueRecord",
"SuperscriptShiftUpCramped",
None,
None,
"Standard shift of superscripts relative to the base, in cramped style.",
),
(
"MathValueRecord",
"SuperscriptBottomMin",
None,
None,
"Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.",
),
(
"MathValueRecord",
"SuperscriptBaselineDropMax",
None,
None,
"Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.",
),
(
"MathValueRecord",
"SubSuperscriptGapMin",
None,
None,
"Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.",
),
(
"MathValueRecord",
"SuperscriptBottomMaxWithSubscript",
None,
None,
"The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.",
),
(
"MathValueRecord",
"SpaceAfterScript",
None,
None,
"Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.",
),
(
"MathValueRecord",
"UpperLimitGapMin",
None,
None,
"Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.",
),
(
"MathValueRecord",
"UpperLimitBaselineRiseMin",
None,
None,
"Minimum distance between baseline of upper limit and (ink) top of the base operator.",
),
(
"MathValueRecord",
"LowerLimitGapMin",
None,
None,
"Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.",
),
(
"MathValueRecord",
"LowerLimitBaselineDropMin",
None,
None,
"Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.",
),
(
"MathValueRecord",
"StackTopShiftUp",
None,
None,
"Standard shift up applied to the top element of a stack.",
),
(
"MathValueRecord",
"StackTopDisplayStyleShiftUp",
None,
None,
"Standard shift up applied to the top element of a stack in display style.",
),
(
"MathValueRecord",
"StackBottomShiftDown",
None,
None,
"Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.",
),
(
"MathValueRecord",
"StackBottomDisplayStyleShiftDown",
None,
None,
"Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.",
),
(
"MathValueRecord",
"StackGapMin",
None,
None,
"Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.",
),
(
"MathValueRecord",
"StackDisplayStyleGapMin",
None,
None,
"Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.",
),
(
"MathValueRecord",
"StretchStackTopShiftUp",
None,
None,
"Standard shift up applied to the top element of the stretch stack.",
),
(
"MathValueRecord",
"StretchStackBottomShiftDown",
None,
None,
"Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.",
),
(
"MathValueRecord",
"StretchStackGapAboveMin",
None,
None,
"Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin",
),
(
"MathValueRecord",
"StretchStackGapBelowMin",
None,
None,
"Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.",
),
(
"MathValueRecord",
"FractionNumeratorShiftUp",
None,
None,
"Standard shift up applied to the numerator.",
),
(
"MathValueRecord",
"FractionNumeratorDisplayStyleShiftUp",
None,
None,
"Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.",
),
(
"MathValueRecord",
"FractionDenominatorShiftDown",
None,
None,
"Standard shift down applied to the denominator. Positive for moving in the downward direction.",
),
(
"MathValueRecord",
"FractionDenominatorDisplayStyleShiftDown",
None,
None,
"Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.",
),
(
"MathValueRecord",
"FractionNumeratorGapMin",
None,
None,
"Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness",
),
(
"MathValueRecord",
"FractionNumDisplayStyleGapMin",
None,
None,
"Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.",
),
(
"MathValueRecord",
"FractionRuleThickness",
None,
None,
"Thickness of the fraction bar. Suggested: default rule thickness.",
),
(
"MathValueRecord",
"FractionDenominatorGapMin",
None,
None,
"Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness",
),
(
"MathValueRecord",
"FractionDenomDisplayStyleGapMin",
None,
None,
"Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.",
),
(
"MathValueRecord",
"SkewedFractionHorizontalGap",
None,
None,
"Horizontal distance between the top and bottom elements of a skewed fraction.",
),
(
"MathValueRecord",
"SkewedFractionVerticalGap",
None,
None,
"Vertical distance between the ink of the top and bottom elements of a skewed fraction.",
),
(
"MathValueRecord",
"OverbarVerticalGap",
None,
None,
"Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.",
),
(
"MathValueRecord",
"OverbarRuleThickness",
None,
None,
"Thickness of overbar. Suggested: default rule thickness.",
),
(
"MathValueRecord",
"OverbarExtraAscender",
None,
None,
"Extra white space reserved above the overbar. Suggested: default rule thickness.",
),
(
"MathValueRecord",
"UnderbarVerticalGap",
None,
None,
"Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.",
),
(
"MathValueRecord",
"UnderbarRuleThickness",
None,
None,
"Thickness of underbar. Suggested: default rule thickness.",
),
(
"MathValueRecord",
"UnderbarExtraDescender",
None,
None,
"Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.",
),
(
"MathValueRecord",
"RadicalVerticalGap",
None,
None,
"Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.",
),
(
"MathValueRecord",
"RadicalDisplayStyleVerticalGap",
None,
None,
"Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.",
),
(
"MathValueRecord",
"RadicalRuleThickness",
None,
None,
"Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.",
),
(
"MathValueRecord",
"RadicalExtraAscender",
None,
None,
"Extra white space reserved above the radical. Suggested: RadicalRuleThickness.",
),
(
"MathValueRecord",
"RadicalKernBeforeDegree",
None,
None,
"Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.",
),
(
"MathValueRecord",
"RadicalKernAfterDegree",
None,
None,
"Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.",
),
(
"uint16",
"RadicalDegreeBottomRaisePercent",
None,
None,
"Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.",
),
],
),
(
"MathGlyphInfo",
[
(
"Offset",
"MathItalicsCorrectionInfo",
None,
None,
"Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.",
),
(
"Offset",
"MathTopAccentAttachment",
None,
None,
"Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.",
),
(
"Offset",
"ExtendedShapeCoverage",
None,
None,
"Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.",
),
(
"Offset",
"MathKernInfo",
None,
None,
"Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.",
),
],
),
(
"MathItalicsCorrectionInfo",
[
(
"Offset",
"Coverage",
None,
None,
"Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.",
),
(
"uint16",
"ItalicsCorrectionCount",
None,
None,
"Number of italics correction values. Should coincide with the number of covered glyphs.",
),
(
"MathValueRecord",
"ItalicsCorrection",
"ItalicsCorrectionCount",
0,
"Array of MathValueRecords defining italics correction values for each covered glyph.",
),
],
),
(
"MathTopAccentAttachment",
[
(
"Offset",
"TopAccentCoverage",
None,
None,
"Offset to Coverage table - from the beginning of MathTopAccentAttachment table.",
),
(
"uint16",
"TopAccentAttachmentCount",
None,
None,
"Number of top accent attachment point values. Should coincide with the number of covered glyphs",
),
(
"MathValueRecord",
"TopAccentAttachment",
"TopAccentAttachmentCount",
0,
"Array of MathValueRecords defining top accent attachment points for each covered glyph",
),
],
),
(
"MathKernInfo",
[
(
"Offset",
"MathKernCoverage",
None,
None,
"Offset to Coverage table - from the beginning of the MathKernInfo table.",
),
("uint16", "MathKernCount", None, None, "Number of MathKernInfoRecords."),
(
"MathKernInfoRecord",
"MathKernInfoRecords",
"MathKernCount",
0,
"Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.",
),
],
),
(
"MathKernInfoRecord",
[
(
"Offset",
"TopRightMathKern",
None,
None,
"Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.",
),
(
"Offset",
"TopLeftMathKern",
None,
None,
"Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.",
),
(
"Offset",
"BottomRightMathKern",
None,
None,
"Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.",
),
(
"Offset",
"BottomLeftMathKern",
None,
None,
"Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.",
),
],
),
(
"MathKern",
[
(
"uint16",
"HeightCount",
None,
None,
"Number of heights on which the kern value changes.",
),
(
"MathValueRecord",
"CorrectionHeight",
"HeightCount",
0,
"Array of correction heights at which the kern value changes. Sorted by the height value in design units.",
),
(
"MathValueRecord",
"KernValue",
"HeightCount",
1,
"Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.",
),
],
),
(
"MathVariants",
[
(
"uint16",
"MinConnectorOverlap",
None,
None,
"Minimum overlap of connecting glyphs during glyph construction, in design units.",
),
(
"Offset",
"VertGlyphCoverage",
None,
None,
"Offset to Coverage table - from the beginning of MathVariants table.",
),
(
"Offset",
"HorizGlyphCoverage",
None,
None,
"Offset to Coverage table - from the beginning of MathVariants table.",
),
(
"uint16",
"VertGlyphCount",
None,
None,
"Number of glyphs for which information is provided for vertically growing variants.",
),
(
"uint16",
"HorizGlyphCount",
None,
None,
"Number of glyphs for which information is provided for horizontally growing variants.",
),
(
"Offset",
"VertGlyphConstruction",
"VertGlyphCount",
0,
"Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.",
),
(
"Offset",
"HorizGlyphConstruction",
"HorizGlyphCount",
0,
"Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.",
),
],
),
(
"MathGlyphConstruction",
[
(
"Offset",
"GlyphAssembly",
None,
None,
"Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL",
),
(
"uint16",
"VariantCount",
None,
None,
"Count of glyph growing variants for this glyph.",
),
(
"MathGlyphVariantRecord",
"MathGlyphVariantRecord",
"VariantCount",
0,
"MathGlyphVariantRecords for alternative variants of the glyphs.",
),
],
),
(
"MathGlyphVariantRecord",
[
("GlyphID", "VariantGlyph", None, None, "Glyph ID for the variant."),
(
"uint16",
"AdvanceMeasurement",
None,
None,
"Advance width/height, in design units, of the variant, in the direction of requested glyph extension.",
),
],
),
(
"GlyphAssembly",
[
(
"MathValueRecord",
"ItalicsCorrection",
None,
None,
"Italics correction of this GlyphAssembly. Should not depend on the assembly size.",
),
("uint16", "PartCount", None, None, "Number of parts in this assembly."),
(
"GlyphPartRecord",
"PartRecords",
"PartCount",
0,
"Array of part records, from left to right and bottom to top.",
),
],
),
(
"GlyphPartRecord",
[
("GlyphID", "glyph", None, None, "Glyph ID for the part."),
(
"uint16",
"StartConnectorLength",
None,
None,
"Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.",
),
(
"uint16",
"EndConnectorLength",
None,
None,
"Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.",
),
(
"uint16",
"FullAdvance",
None,
None,
"Full advance width/height for this part, in the direction of the extension. In design units.",
),
(
"uint16",
"PartFlags",
None,
None,
"Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved",
),
],
),
##
## Apple Advanced Typography (AAT) tables
##
(
"AATLookupSegment",
[
("uint16", "lastGlyph", None, None, "Last glyph index in this segment."),
("uint16", "firstGlyph", None, None, "First glyph index in this segment."),
(
"uint16",
"value",
None,
None,
"A 16-bit offset from the start of the table to the data.",
),
],
),
#
# ankr
#
(
"ankr",
[
("struct", "AnchorPoints", None, None, "Anchor points table."),
],
),
(
"AnchorPointsFormat0",
[
("uint16", "Format", None, None, "Format of the anchor points table, = 0."),
("uint16", "Flags", None, None, "Flags. Currenty unused, set to zero."),
(
"AATLookupWithDataOffset(AnchorGlyphData)",
"Anchors",
None,
None,
"Table of with anchor overrides for each glyph.",
),
],
),
(
"AnchorGlyphData",
[
(
"uint32",
"AnchorPointCount",
None,
None,
"Number of anchor points for this glyph.",
),
(
"struct",
"AnchorPoint",
"AnchorPointCount",
0,
"Individual anchor points.",
),
],
),
(
"AnchorPoint",
[
("int16", "XCoordinate", None, None, "X coordinate of this anchor point."),
("int16", "YCoordinate", None, None, "Y coordinate of this anchor point."),
],
),
#
# bsln
#
(
"bsln",
[
(
"Version",
"Version",
None,
None,
"Version number of the AAT baseline table (0x00010000 for the initial version).",
),
("struct", "Baseline", None, None, "Baseline table."),
],
),
(
"BaselineFormat0",
[
("uint16", "Format", None, None, "Format of the baseline table, = 0."),
(
"uint16",
"DefaultBaseline",
None,
None,
"Default baseline value for all glyphs. This value can be from 0 through 31.",
),
(
"uint16",
"Delta",
32,
0,
"These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.",
),
],
),
(
"BaselineFormat1",
[
("uint16", "Format", None, None, "Format of the baseline table, = 1."),
(
"uint16",
"DefaultBaseline",
None,
None,
"Default baseline value for all glyphs. This value can be from 0 through 31.",
),
(
"uint16",
"Delta",
32,
0,
"These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.",
),
(
"AATLookup(uint16)",
"BaselineValues",
None,
None,
"Lookup table that maps glyphs to their baseline values.",
),
],
),
(
"BaselineFormat2",
[
("uint16", "Format", None, None, "Format of the baseline table, = 1."),
(
"uint16",
"DefaultBaseline",
None,
None,
"Default baseline value for all glyphs. This value can be from 0 through 31.",
),
(
"GlyphID",
"StandardGlyph",
None,
None,
"Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.",
),
(
"uint16",
"ControlPoint",
32,
0,
"Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.",
),
],
),
(
"BaselineFormat3",
[
("uint16", "Format", None, None, "Format of the baseline table, = 1."),
(
"uint16",
"DefaultBaseline",
None,
None,
"Default baseline value for all glyphs. This value can be from 0 through 31.",
),
(
"GlyphID",
"StandardGlyph",
None,
None,
"Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.",
),
(
"uint16",
"ControlPoint",
32,
0,
"Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.",
),
(
"AATLookup(uint16)",
"BaselineValues",
None,
None,
"Lookup table that maps glyphs to their baseline values.",
),
],
),
#
# cidg
#
(
"cidg",
[
("struct", "CIDGlyphMapping", None, None, "CID-to-glyph mapping table."),
],
),
(
"CIDGlyphMappingFormat0",
[
(
"uint16",
"Format",
None,
None,
"Format of the CID-to-glyph mapping table, = 0.",
),
("uint16", "DataFormat", None, None, "Currenty unused, set to zero."),
("uint32", "StructLength", None, None, "Size of the table in bytes."),
("uint16", "Registry", None, None, "The registry ID."),
(
"char64",
"RegistryName",
None,
None,
"The registry name in ASCII; unused bytes should be set to 0.",
),
("uint16", "Order", None, None, "The order ID."),
(
"char64",
"OrderName",
None,
None,
"The order name in ASCII; unused bytes should be set to 0.",
),
("uint16", "SupplementVersion", None, None, "The supplement version."),
(
"CIDGlyphMap",
"Mapping",
None,
None,
"A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used",
),
],
),
#
# feat
#
(
"feat",
[
(
"Version",
"Version",
None,
None,
"Version of the feat table-initially set to 0x00010000.",
),
("FeatureNames", "FeatureNames", None, None, "The feature names."),
],
),
(
"FeatureNames",
[
(
"uint16",
"FeatureNameCount",
None,
None,
"Number of entries in the feature name array.",
),
("uint16", "Reserved1", None, None, "Reserved (set to zero)."),
("uint32", "Reserved2", None, None, "Reserved (set to zero)."),
(
"FeatureName",
"FeatureName",
"FeatureNameCount",
0,
"The feature name array.",
),
],
),
(
"FeatureName",
[
("uint16", "FeatureType", None, None, "Feature type."),
(
"uint16",
"SettingsCount",
None,
None,
"The number of records in the setting name array.",
),
(
"LOffset",
"Settings",
None,
None,
"Offset to setting table for this feature.",
),
(
"uint16",
"FeatureFlags",
None,
None,
"Single-bit flags associated with the feature type.",
),
(
"NameID",
"FeatureNameID",
None,
None,
"The name table index for the feature name.",
),
],
),
(
"Settings",
[
("Setting", "Setting", "SettingsCount", 0, "The setting array."),
],
),
(
"Setting",
[
("uint16", "SettingValue", None, None, "The setting."),
(
"NameID",
"SettingNameID",
None,
None,
"The name table index for the setting name.",
),
],
),
#
# gcid
#
(
"gcid",
[
("struct", "GlyphCIDMapping", None, None, "Glyph to CID mapping table."),
],
),
(
"GlyphCIDMappingFormat0",
[
(
"uint16",
"Format",
None,
None,
"Format of the glyph-to-CID mapping table, = 0.",
),
("uint16", "DataFormat", None, None, "Currenty unused, set to zero."),
("uint32", "StructLength", None, None, "Size of the table in bytes."),
("uint16", "Registry", None, None, "The registry ID."),
(
"char64",
"RegistryName",
None,
None,
"The registry name in ASCII; unused bytes should be set to 0.",
),
("uint16", "Order", None, None, "The order ID."),
(
"char64",
"OrderName",
None,
None,
"The order name in ASCII; unused bytes should be set to 0.",
),
("uint16", "SupplementVersion", None, None, "The supplement version."),
(
"GlyphCIDMap",
"Mapping",
None,
None,
"The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used",
),
],
),
#
# lcar
#
(
"lcar",
[
(
"Version",
"Version",
None,
None,
"Version number of the ligature caret table (0x00010000 for the initial version).",
),
("struct", "LigatureCarets", None, None, "Ligature carets table."),
],
),
(
"LigatureCaretsFormat0",
[
(
"uint16",
"Format",
None,
None,
"Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.",
),
(
"AATLookup(LigCaretDistances)",
"Carets",
None,
None,
"Lookup table associating ligature glyphs with their caret positions, in font unit distances.",
),
],
),
(
"LigatureCaretsFormat1",
[
(
"uint16",
"Format",
None,
None,
"Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.",
),
(
"AATLookup(LigCaretPoints)",
"Carets",
None,
None,
"Lookup table associating ligature glyphs with their caret positions, as control points.",
),
],
),
(
"LigCaretDistances",
[
("uint16", "DivsionPointCount", None, None, "Number of division points."),
(
"int16",
"DivisionPoint",
"DivsionPointCount",
0,
"Distance in font units through which a subdivision is made orthogonally to the baseline.",
),
],
),
(
"LigCaretPoints",
[
("uint16", "DivsionPointCount", None, None, "Number of division points."),
(
"int16",
"DivisionPoint",
"DivsionPointCount",
0,
"The number of the control point through which a subdivision is made orthogonally to the baseline.",
),
],
),
#
# mort
#
(
"mort",
[
("Version", "Version", None, None, "Version of the mort table."),
(
"uint32",
"MorphChainCount",
None,
None,
"Number of metamorphosis chains.",
),
(
"MortChain",
"MorphChain",
"MorphChainCount",
0,
"Array of metamorphosis chains.",
),
],
),
(
"MortChain",
[
(
"Flags32",
"DefaultFlags",
None,
None,
"The default specification for subtables.",
),
(
"uint32",
"StructLength",
None,
None,
"Total byte count, including this header; must be a multiple of 4.",
),
(
"uint16",
"MorphFeatureCount",
None,
None,
"Number of metamorphosis feature entries.",
),
(
"uint16",
"MorphSubtableCount",
None,
None,
"The number of subtables in the chain.",
),
(
"struct",
"MorphFeature",
"MorphFeatureCount",
0,
"Array of metamorphosis features.",
),
(
"MortSubtable",
"MorphSubtable",
"MorphSubtableCount",
0,
"Array of metamorphosis subtables.",
),
],
),
(
"MortSubtable",
[
(
"uint16",
"StructLength",
None,
None,
"Total subtable length, including this header.",
),
(
"uint8",
"CoverageFlags",
None,
None,
"Most significant byte of coverage flags.",
),
("uint8", "MorphType", None, None, "Subtable type."),
(
"Flags32",
"SubFeatureFlags",
None,
None,
"The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).",
),
("SubStruct", "SubStruct", None, None, "SubTable."),
],
),
#
# morx
#
(
"morx",
[
("uint16", "Version", None, None, "Version of the morx table."),
("uint16", "Reserved", None, None, "Reserved (set to zero)."),
(
"uint32",
"MorphChainCount",
None,
None,
"Number of extended metamorphosis chains.",
),
(
"MorxChain",
"MorphChain",
"MorphChainCount",
0,
"Array of extended metamorphosis chains.",
),
],
),
(
"MorxChain",
[
(
"Flags32",
"DefaultFlags",
None,
None,
"The default specification for subtables.",
),
(
"uint32",
"StructLength",
None,
None,
"Total byte count, including this header; must be a multiple of 4.",
),
(
"uint32",
"MorphFeatureCount",
None,
None,
"Number of feature subtable entries.",
),
(
"uint32",
"MorphSubtableCount",
None,
None,
"The number of subtables in the chain.",
),
(
"MorphFeature",
"MorphFeature",
"MorphFeatureCount",
0,
"Array of metamorphosis features.",
),
(
"MorxSubtable",
"MorphSubtable",
"MorphSubtableCount",
0,
"Array of extended metamorphosis subtables.",
),
],
),
(
"MorphFeature",
[
("uint16", "FeatureType", None, None, "The type of feature."),
(
"uint16",
"FeatureSetting",
None,
None,
"The feature's setting (aka selector).",
),
(
"Flags32",
"EnableFlags",
None,
None,
"Flags for the settings that this feature and setting enables.",
),
(
"Flags32",
"DisableFlags",
None,
None,
"Complement of flags for the settings that this feature and setting disable.",
),
],
),
# Apple TrueType Reference Manual, chapter “The ‘morx’ table”,
# section “Metamorphosis Subtables”.
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
(
"MorxSubtable",
[
(
"uint32",
"StructLength",
None,
None,
"Total subtable length, including this header.",
),
(
"uint8",
"CoverageFlags",
None,
None,
"Most significant byte of coverage flags.",
),
("uint16", "Reserved", None, None, "Unused."),
("uint8", "MorphType", None, None, "Subtable type."),
(
"Flags32",
"SubFeatureFlags",
None,
None,
"The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).",
),
("SubStruct", "SubStruct", None, None, "SubTable."),
],
),
(
"StateHeader",
[
(
"uint32",
"ClassCount",
None,
None,
"Number of classes, which is the number of 16-bit entry indices in a single line in the state array.",
),
(
"uint32",
"MorphClass",
None,
None,
"Offset from the start of this state table header to the start of the class table.",
),
(
"uint32",
"StateArrayOffset",
None,
None,
"Offset from the start of this state table header to the start of the state array.",
),
(
"uint32",
"EntryTableOffset",
None,
None,
"Offset from the start of this state table header to the start of the entry table.",
),
],
),
(
"RearrangementMorph",
[
(
"STXHeader(RearrangementMorphAction)",
"StateTable",
None,
None,
"Finite-state transducer table for indic rearrangement.",
),
],
),
(
"ContextualMorph",
[
(
"STXHeader(ContextualMorphAction)",
"StateTable",
None,
None,
"Finite-state transducer for contextual glyph substitution.",
),
],
),
(
"LigatureMorph",
[
(
"STXHeader(LigatureMorphAction)",
"StateTable",
None,
None,
"Finite-state transducer for ligature substitution.",
),
],
),
(
"NoncontextualMorph",
[
(
"AATLookup(GlyphID)",
"Substitution",
None,
None,
"The noncontextual glyph substitution table.",
),
],
),
(
"InsertionMorph",
[
(
"STXHeader(InsertionMorphAction)",
"StateTable",
None,
None,
"Finite-state transducer for glyph insertion.",
),
],
),
(
"MorphClass",
[
(
"uint16",
"FirstGlyph",
None,
None,
"Glyph index of the first glyph in the class table.",
),
# ('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'),
# ('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'),
],
),
# If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below.
# ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
#
# prop
#
(
"prop",
[
(
"Fixed",
"Version",
None,
None,
"Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.",
),
("struct", "GlyphProperties", None, None, "Glyph properties."),
],
),
(
"GlyphPropertiesFormat0",
[
("uint16", "Format", None, None, "Format, = 0."),
(
"uint16",
"DefaultProperties",
None,
None,
"Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.",
),
],
),
(
"GlyphPropertiesFormat1",
[
("uint16", "Format", None, None, "Format, = 1."),
(
"uint16",
"DefaultProperties",
None,
None,
"Default properties applied to a glyph if that glyph is not present in the Properties lookup table.",
),
(
"AATLookup(uint16)",
"Properties",
None,
None,
"Lookup data associating glyphs with their properties.",
),
],
),
#
# opbd
#
(
"opbd",
[
(
"Version",
"Version",
None,
None,
"Version number of the optical bounds table (0x00010000 for the initial version).",
),
("struct", "OpticalBounds", None, None, "Optical bounds table."),
],
),
(
"OpticalBoundsFormat0",
[
(
"uint16",
"Format",
None,
None,
"Format of the optical bounds table, = 0.",
),
(
"AATLookup(OpticalBoundsDeltas)",
"OpticalBoundsDeltas",
None,
None,
"Lookup table associating glyphs with their optical bounds, given as deltas in font units.",
),
],
),
(
"OpticalBoundsFormat1",
[
(
"uint16",
"Format",
None,
None,
"Format of the optical bounds table, = 1.",
),
(
"AATLookup(OpticalBoundsPoints)",
"OpticalBoundsPoints",
None,
None,
"Lookup table associating glyphs with their optical bounds, given as references to control points.",
),
],
),
(
"OpticalBoundsDeltas",
[
(
"int16",
"Left",
None,
None,
"Delta value for the left-side optical edge.",
),
("int16", "Top", None, None, "Delta value for the top-side optical edge."),
(
"int16",
"Right",
None,
None,
"Delta value for the right-side optical edge.",
),
(
"int16",
"Bottom",
None,
None,
"Delta value for the bottom-side optical edge.",
),
],
),
(
"OpticalBoundsPoints",
[
(
"int16",
"Left",
None,
None,
"Control point index for the left-side optical edge, or -1 if this glyph has none.",
),
(
"int16",
"Top",
None,
None,
"Control point index for the top-side optical edge, or -1 if this glyph has none.",
),
(
"int16",
"Right",
None,
None,
"Control point index for the right-side optical edge, or -1 if this glyph has none.",
),
(
"int16",
"Bottom",
None,
None,
"Control point index for the bottom-side optical edge, or -1 if this glyph has none.",
),
],
),
#
# TSIC
#
(
"TSIC",
[
(
"Version",
"Version",
None,
None,
"Version of table initially set to 0x00010000.",
),
("uint16", "Flags", None, None, "TSIC flags - set to 0"),
("uint16", "AxisCount", None, None, "Axis count from fvar"),
("uint16", "RecordCount", None, None, "TSIC record count"),
("uint16", "Reserved", None, None, "Set to 0"),
("Tag", "AxisArray", "AxisCount", 0, "Array of axis tags in fvar order"),
(
"LocationRecord",
"RecordLocations",
"RecordCount",
0,
"Location in variation space of TSIC record",
),
("TSICRecord", "Record", "RecordCount", 0, "Array of TSIC records"),
],
),
(
"LocationRecord",
[
("F2Dot14", "Axis", "AxisCount", 0, "Axis record"),
],
),
(
"TSICRecord",
[
("uint16", "Flags", None, None, "Record flags - set to 0"),
("uint16", "NumCVTEntries", None, None, "Number of CVT number value pairs"),
("uint16", "NameLength", None, None, "Length of optional user record name"),
("uint16", "NameArray", "NameLength", 0, "Unicode 16 name"),
("uint16", "CVTArray", "NumCVTEntries", 0, "CVT number array"),
("int16", "CVTValueArray", "NumCVTEntries", 0, "CVT value"),
],
),
#
# COLR
#
(
"COLR",
[
("uint16", "Version", None, None, "Table version number (starts at 0)."),
(
"uint16",
"BaseGlyphRecordCount",
None,
None,
"Number of Base Glyph Records.",
),
(
"LOffset",
"BaseGlyphRecordArray",
None,
None,
"Offset (from beginning of COLR table) to Base Glyph records.",
),
(
"LOffset",
"LayerRecordArray",
None,
None,
"Offset (from beginning of COLR table) to Layer Records.",
),
("uint16", "LayerRecordCount", None, None, "Number of Layer Records."),
(
"LOffset",
"BaseGlyphList",
None,
"Version >= 1",
"Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.",
),
(
"LOffset",
"LayerList",
None,
"Version >= 1",
"Offset (from beginning of COLR table) to LayerList.",
),
(
"LOffset",
"ClipList",
None,
"Version >= 1",
"Offset to ClipList table (may be NULL)",
),
(
"LOffsetTo(DeltaSetIndexMap)",
"VarIndexMap",
None,
"Version >= 1",
"Offset to DeltaSetIndexMap table (may be NULL)",
),
(
"LOffset",
"VarStore",
None,
"Version >= 1",
"Offset to variation store (may be NULL)",
),
],
),
(
"BaseGlyphRecordArray",
[
(
"BaseGlyphRecord",
"BaseGlyphRecord",
"BaseGlyphRecordCount",
0,
"Base Glyph records.",
),
],
),
(
"BaseGlyphRecord",
[
(
"GlyphID",
"BaseGlyph",
None,
None,
"Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.",
),
(
"uint16",
"FirstLayerIndex",
None,
None,
"Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.",
),
(
"uint16",
"NumLayers",
None,
None,
"Number of color layers associated with this glyph.",
),
],
),
(
"LayerRecordArray",
[
("LayerRecord", "LayerRecord", "LayerRecordCount", 0, "Layer records."),
],
),
(
"LayerRecord",
[
(
"GlyphID",
"LayerGlyph",
None,
None,
"Glyph ID of layer glyph (must be in z-order from bottom to top).",
),
(
"uint16",
"PaletteIndex",
None,
None,
"Index value to use with a selected color palette.",
),
],
),
(
"BaseGlyphList",
[
(
"uint32",
"BaseGlyphCount",
None,
None,
"Number of Version-1 Base Glyph records",
),
(
"struct",
"BaseGlyphPaintRecord",
"BaseGlyphCount",
0,
"Array of Version-1 Base Glyph records",
),
],
),
(
"BaseGlyphPaintRecord",
[
("GlyphID", "BaseGlyph", None, None, "Glyph ID of reference glyph."),
(
"LOffset",
"Paint",
None,
None,
"Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.",
),
],
),
(
"LayerList",
[
("uint32", "LayerCount", None, None, "Number of Version-1 Layers"),
(
"LOffset",
"Paint",
"LayerCount",
0,
"Array of offsets to Paint tables, from the start of the LayerList table.",
),
],
),
(
"ClipListFormat1",
[
(
"uint8",
"Format",
None,
None,
"Format for ClipList with 16bit glyph IDs: 1",
),
("uint32", "ClipCount", None, None, "Number of Clip records."),
(
"struct",
"ClipRecord",
"ClipCount",
0,
"Array of Clip records sorted by glyph ID.",
),
],
),
(
"ClipRecord",
[
("uint16", "StartGlyphID", None, None, "First glyph ID in the range."),
("uint16", "EndGlyphID", None, None, "Last glyph ID in the range."),
("Offset24", "ClipBox", None, None, "Offset to a ClipBox table."),
],
),
(
"ClipBoxFormat1",
[
(
"uint8",
"Format",
None,
None,
"Format for ClipBox without variation: set to 1.",
),
("int16", "xMin", None, None, "Minimum x of clip box."),
("int16", "yMin", None, None, "Minimum y of clip box."),
("int16", "xMax", None, None, "Maximum x of clip box."),
("int16", "yMax", None, None, "Maximum y of clip box."),
],
),
(
"ClipBoxFormat2",
[
("uint8", "Format", None, None, "Format for variable ClipBox: set to 2."),
("int16", "xMin", None, None, "Minimum x of clip box. VarIndexBase + 0."),
("int16", "yMin", None, None, "Minimum y of clip box. VarIndexBase + 1."),
("int16", "xMax", None, None, "Maximum x of clip box. VarIndexBase + 2."),
("int16", "yMax", None, None, "Maximum y of clip box. VarIndexBase + 3."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# COLRv1 Affine2x3 uses the same column-major order to serialize a 2D
# Affine Transformation as the one used by fontTools.misc.transform.
# However, for historical reasons, the labels 'xy' and 'yx' are swapped.
# Their fundamental meaning is the same though.
# COLRv1 Affine2x3 follows the names found in FreeType and Cairo.
# In all case, the second element in the 6-tuple correspond to the
# y-part of the x basis vector, and the third to the x-part of the y
# basis vector.
# See https://github.com/googlefonts/colr-gradients-spec/pull/85
(
"Affine2x3",
[
("Fixed", "xx", None, None, "x-part of x basis vector"),
("Fixed", "yx", None, None, "y-part of x basis vector"),
("Fixed", "xy", None, None, "x-part of y basis vector"),
("Fixed", "yy", None, None, "y-part of y basis vector"),
("Fixed", "dx", None, None, "Translation in x direction"),
("Fixed", "dy", None, None, "Translation in y direction"),
],
),
(
"VarAffine2x3",
[
("Fixed", "xx", None, None, "x-part of x basis vector. VarIndexBase + 0."),
("Fixed", "yx", None, None, "y-part of x basis vector. VarIndexBase + 1."),
("Fixed", "xy", None, None, "x-part of y basis vector. VarIndexBase + 2."),
("Fixed", "yy", None, None, "y-part of y basis vector. VarIndexBase + 3."),
(
"Fixed",
"dx",
None,
None,
"Translation in x direction. VarIndexBase + 4.",
),
(
"Fixed",
"dy",
None,
None,
"Translation in y direction. VarIndexBase + 5.",
),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
(
"ColorStop",
[
("F2Dot14", "StopOffset", None, None, ""),
("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"),
],
),
(
"VarColorStop",
[
("F2Dot14", "StopOffset", None, None, "VarIndexBase + 0."),
("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
(
"F2Dot14",
"Alpha",
None,
None,
"Values outsided [0.,1.] reserved. VarIndexBase + 1.",
),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
(
"ColorLine",
[
(
"ExtendMode",
"Extend",
None,
None,
"Enum {PAD = 0, REPEAT = 1, REFLECT = 2}",
),
("uint16", "StopCount", None, None, "Number of Color stops."),
("ColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."),
],
),
(
"VarColorLine",
[
(
"ExtendMode",
"Extend",
None,
None,
"Enum {PAD = 0, REPEAT = 1, REFLECT = 2}",
),
("uint16", "StopCount", None, None, "Number of Color stops."),
("VarColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."),
],
),
# PaintColrLayers
(
"PaintFormat1",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 1"),
(
"uint8",
"NumLayers",
None,
None,
"Number of offsets to Paint to read from LayerList.",
),
("uint32", "FirstLayerIndex", None, None, "Index into LayerList."),
],
),
# PaintSolid
(
"PaintFormat2",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 2"),
("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"),
],
),
# PaintVarSolid
(
"PaintFormat3",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 3"),
("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."),
(
"F2Dot14",
"Alpha",
None,
None,
"Values outsided [0.,1.] reserved. VarIndexBase + 0.",
),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintLinearGradient
(
"PaintFormat4",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 4"),
(
"Offset24",
"ColorLine",
None,
None,
"Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.",
),
("int16", "x0", None, None, ""),
("int16", "y0", None, None, ""),
("int16", "x1", None, None, ""),
("int16", "y1", None, None, ""),
("int16", "x2", None, None, ""),
("int16", "y2", None, None, ""),
],
),
# PaintVarLinearGradient
(
"PaintFormat5",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 5"),
(
"LOffset24To(VarColorLine)",
"ColorLine",
None,
None,
"Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.",
),
("int16", "x0", None, None, "VarIndexBase + 0."),
("int16", "y0", None, None, "VarIndexBase + 1."),
("int16", "x1", None, None, "VarIndexBase + 2."),
("int16", "y1", None, None, "VarIndexBase + 3."),
("int16", "x2", None, None, "VarIndexBase + 4."),
("int16", "y2", None, None, "VarIndexBase + 5."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintRadialGradient
(
"PaintFormat6",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 6"),
(
"Offset24",
"ColorLine",
None,
None,
"Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.",
),
("int16", "x0", None, None, ""),
("int16", "y0", None, None, ""),
("uint16", "r0", None, None, ""),
("int16", "x1", None, None, ""),
("int16", "y1", None, None, ""),
("uint16", "r1", None, None, ""),
],
),
# PaintVarRadialGradient
(
"PaintFormat7",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 7"),
(
"LOffset24To(VarColorLine)",
"ColorLine",
None,
None,
"Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.",
),
("int16", "x0", None, None, "VarIndexBase + 0."),
("int16", "y0", None, None, "VarIndexBase + 1."),
("uint16", "r0", None, None, "VarIndexBase + 2."),
("int16", "x1", None, None, "VarIndexBase + 3."),
("int16", "y1", None, None, "VarIndexBase + 4."),
("uint16", "r1", None, None, "VarIndexBase + 5."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintSweepGradient
(
"PaintFormat8",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 8"),
(
"Offset24",
"ColorLine",
None,
None,
"Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.",
),
("int16", "centerX", None, None, "Center x coordinate."),
("int16", "centerY", None, None, "Center y coordinate."),
(
"BiasedAngle",
"startAngle",
None,
None,
"Start of the angular range of the gradient.",
),
(
"BiasedAngle",
"endAngle",
None,
None,
"End of the angular range of the gradient.",
),
],
),
# PaintVarSweepGradient
(
"PaintFormat9",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 9"),
(
"LOffset24To(VarColorLine)",
"ColorLine",
None,
None,
"Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.",
),
("int16", "centerX", None, None, "Center x coordinate. VarIndexBase + 0."),
("int16", "centerY", None, None, "Center y coordinate. VarIndexBase + 1."),
(
"BiasedAngle",
"startAngle",
None,
None,
"Start of the angular range of the gradient. VarIndexBase + 2.",
),
(
"BiasedAngle",
"endAngle",
None,
None,
"End of the angular range of the gradient. VarIndexBase + 3.",
),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintGlyph
(
"PaintFormat10",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 10"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintGlyph table) to Paint subtable.",
),
("GlyphID", "Glyph", None, None, "Glyph ID for the source outline."),
],
),
# PaintColrGlyph
(
"PaintFormat11",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 11"),
(
"GlyphID",
"Glyph",
None,
None,
"Virtual glyph ID for a BaseGlyphList base glyph.",
),
],
),
# PaintTransform
(
"PaintFormat12",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 12"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintTransform table) to Paint subtable.",
),
(
"LOffset24To(Affine2x3)",
"Transform",
None,
None,
"2x3 matrix for 2D affine transformations.",
),
],
),
# PaintVarTransform
(
"PaintFormat13",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 13"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarTransform table) to Paint subtable.",
),
(
"LOffset24To(VarAffine2x3)",
"Transform",
None,
None,
"2x3 matrix for 2D affine transformations.",
),
],
),
# PaintTranslate
(
"PaintFormat14",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 14"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintTranslate table) to Paint subtable.",
),
("int16", "dx", None, None, "Translation in x direction."),
("int16", "dy", None, None, "Translation in y direction."),
],
),
# PaintVarTranslate
(
"PaintFormat15",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 15"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarTranslate table) to Paint subtable.",
),
(
"int16",
"dx",
None,
None,
"Translation in x direction. VarIndexBase + 0.",
),
(
"int16",
"dy",
None,
None,
"Translation in y direction. VarIndexBase + 1.",
),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintScale
(
"PaintFormat16",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 16"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintScale table) to Paint subtable.",
),
("F2Dot14", "scaleX", None, None, ""),
("F2Dot14", "scaleY", None, None, ""),
],
),
# PaintVarScale
(
"PaintFormat17",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 17"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarScale table) to Paint subtable.",
),
("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."),
("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintScaleAroundCenter
(
"PaintFormat18",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 18"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.",
),
("F2Dot14", "scaleX", None, None, ""),
("F2Dot14", "scaleY", None, None, ""),
("int16", "centerX", None, None, ""),
("int16", "centerY", None, None, ""),
],
),
# PaintVarScaleAroundCenter
(
"PaintFormat19",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 19"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.",
),
("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."),
("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."),
("int16", "centerX", None, None, "VarIndexBase + 2."),
("int16", "centerY", None, None, "VarIndexBase + 3."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintScaleUniform
(
"PaintFormat20",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 20"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintScaleUniform table) to Paint subtable.",
),
("F2Dot14", "scale", None, None, ""),
],
),
# PaintVarScaleUniform
(
"PaintFormat21",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 21"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.",
),
("F2Dot14", "scale", None, None, "VarIndexBase + 0."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintScaleUniformAroundCenter
(
"PaintFormat22",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 22"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.",
),
("F2Dot14", "scale", None, None, ""),
("int16", "centerX", None, None, ""),
("int16", "centerY", None, None, ""),
],
),
# PaintVarScaleUniformAroundCenter
(
"PaintFormat23",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 23"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.",
),
("F2Dot14", "scale", None, None, "VarIndexBase + 0"),
("int16", "centerX", None, None, "VarIndexBase + 1"),
("int16", "centerY", None, None, "VarIndexBase + 2"),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintRotate
(
"PaintFormat24",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 24"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintRotate table) to Paint subtable.",
),
("Angle", "angle", None, None, ""),
],
),
# PaintVarRotate
(
"PaintFormat25",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 25"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarRotate table) to Paint subtable.",
),
("Angle", "angle", None, None, "VarIndexBase + 0."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintRotateAroundCenter
(
"PaintFormat26",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 26"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.",
),
("Angle", "angle", None, None, ""),
("int16", "centerX", None, None, ""),
("int16", "centerY", None, None, ""),
],
),
# PaintVarRotateAroundCenter
(
"PaintFormat27",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 27"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.",
),
("Angle", "angle", None, None, "VarIndexBase + 0."),
("int16", "centerX", None, None, "VarIndexBase + 1."),
("int16", "centerY", None, None, "VarIndexBase + 2."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintSkew
(
"PaintFormat28",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 28"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintSkew table) to Paint subtable.",
),
("Angle", "xSkewAngle", None, None, ""),
("Angle", "ySkewAngle", None, None, ""),
],
),
# PaintVarSkew
(
"PaintFormat29",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 29"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarSkew table) to Paint subtable.",
),
("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."),
("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintSkewAroundCenter
(
"PaintFormat30",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 30"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.",
),
("Angle", "xSkewAngle", None, None, ""),
("Angle", "ySkewAngle", None, None, ""),
("int16", "centerX", None, None, ""),
("int16", "centerY", None, None, ""),
],
),
# PaintVarSkewAroundCenter
(
"PaintFormat31",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 31"),
(
"Offset24",
"Paint",
None,
None,
"Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.",
),
("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."),
("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."),
("int16", "centerX", None, None, "VarIndexBase + 2."),
("int16", "centerY", None, None, "VarIndexBase + 3."),
(
"VarIndex",
"VarIndexBase",
None,
None,
"Base index into DeltaSetIndexMap.",
),
],
),
# PaintComposite
(
"PaintFormat32",
[
("uint8", "PaintFormat", None, None, "Format identifier-format = 32"),
(
"LOffset24To(Paint)",
"SourcePaint",
None,
None,
"Offset (from beginning of PaintComposite table) to source Paint subtable.",
),
(
"CompositeMode",
"CompositeMode",
None,
None,
"A CompositeMode enumeration value.",
),
(
"LOffset24To(Paint)",
"BackdropPaint",
None,
None,
"Offset (from beginning of PaintComposite table) to backdrop Paint subtable.",
),
],
),
#
# avar
#
(
"AxisValueMap",
[
(
"F2Dot14",
"FromCoordinate",
None,
None,
"A normalized coordinate value obtained using default normalization",
),
(
"F2Dot14",
"ToCoordinate",
None,
None,
"The modified, normalized coordinate value",
),
],
),
(
"AxisSegmentMap",
[
(
"uint16",
"PositionMapCount",
None,
None,
"The number of correspondence pairs for this axis",
),
(
"AxisValueMap",
"AxisValueMap",
"PositionMapCount",
0,
"The array of axis value map records for this axis",
),
],
),
(
"avar",
[
(
"Version",
"Version",
None,
None,
"Version of the avar table- 0x00010000 or 0x00020000",
),
("uint16", "Reserved", None, None, "Permanently reserved; set to zero"),
(
"uint16",
"AxisCount",
None,
None,
'The number of variation axes for this font. This must be the same number as axisCount in the "fvar" table',
),
(
"AxisSegmentMap",
"AxisSegmentMap",
"AxisCount",
0,
'The segment maps array — one segment map for each axis, in the order of axes specified in the "fvar" table',
),
(
"LOffsetTo(DeltaSetIndexMap)",
"VarIdxMap",
None,
"Version >= 0x00020000",
"",
),
("LOffset", "VarStore", None, "Version >= 0x00020000", ""),
],
),
]
def buildConverters(tableSpec, tableNamespace):
"""Given a table spec from otData.py, build a converter object for each
field of the table. This is called for each table in otData.py, and
the results are assigned to the corresponding class in otTables.py."""
converters = []
convertersByName = {}
for tp, name, repeat, aux, descr in tableSpec:
tableName = name
if name.startswith("ValueFormat"):
assert tp == "uint16"
converterClass = ValueFormat
elif name.endswith("Count") or name in ("StructLength", "MorphType"):
converterClass = {
"uint8": ComputedUInt8,
"uint16": ComputedUShort,
"uint32": ComputedULong,
}[tp]
elif name == "SubTable":
converterClass = SubTable
elif name == "ExtSubTable":
converterClass = ExtSubTable
elif name == "SubStruct":
converterClass = SubStruct
elif name == "FeatureParams":
converterClass = FeatureParams
elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
converterClass = StructWithLength
else:
if not tp in converterMapping and "(" not in tp:
tableName = tp
converterClass = Struct
else:
converterClass = eval(tp, tableNamespace, converterMapping)
conv = converterClass(name, repeat, aux, description=descr)
if conv.tableClass:
# A "template" such as OffsetTo(AType) knowss the table class already
tableClass = conv.tableClass
elif tp in ("MortChain", "MortSubtable", "MorxChain"):
tableClass = tableNamespace.get(tp)
else:
tableClass = tableNamespace.get(tableName)
if not conv.tableClass:
conv.tableClass = tableClass
if name in ["SubTable", "ExtSubTable", "SubStruct"]:
conv.lookupTypes = tableNamespace["lookupTypes"]
# also create reverse mapping
for t in conv.lookupTypes.values():
for cls in t.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
if name == "FeatureParams":
conv.featureParamTypes = tableNamespace["featureParamTypes"]
conv.defaultFeatureParams = tableNamespace["FeatureParams"]
for cls in conv.featureParamTypes.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
converters.append(conv)
assert name not in convertersByName, name
convertersByName[name] = conv
return converters, convertersByName
def _buildClasses():
import re
from .otData import otData
formatPat = re.compile(r"([A-Za-z0-9]+)Format(\d+)$")
namespace = globals()
# populate module with classes
for name, table in otData:
baseClass = BaseTable
m = formatPat.match(name)
if m:
# XxxFormatN subtable, we only add the "base" table
name = m.group(1)
# the first row of a format-switching otData table describes the Format;
# the first column defines the type of the Format field.
# Currently this can be either 'uint16' or 'uint8'.
formatType = table[0][0]
baseClass = getFormatSwitchingBaseTableClass(formatType)
if name not in namespace:
# the class doesn't exist yet, so the base implementation is used.
cls = type(name, (baseClass,), {})
if name in ("GSUB", "GPOS"):
cls.DontShare = True
namespace[name] = cls
# link Var{Table} <-> {Table} (e.g. ColorStop <-> VarColorStop, etc.)
for name, _ in otData:
if name.startswith("Var") and len(name) > 3 and name[3:] in namespace:
varType = namespace[name]
noVarType = namespace[name[3:]]
varType.NoVarType = noVarType
noVarType.VarType = varType
for base, alts in _equivalents.items():
base = namespace[base]
for alt in alts:
namespace[alt] = base
global lookupTypes
lookupTypes = {
"GSUB": {
1: SingleSubst,
2: MultipleSubst,
3: AlternateSubst,
4: LigatureSubst,
5: ContextSubst,
6: ChainContextSubst,
7: ExtensionSubst,
8: ReverseChainSingleSubst,
},
"GPOS": {
1: SinglePos,
2: PairPos,
3: CursivePos,
4: MarkBasePos,
5: MarkLigPos,
6: MarkMarkPos,
7: ContextPos,
8: ChainContextPos,
9: ExtensionPos,
},
"mort": {
4: NoncontextualMorph,
},
"morx": {
0: RearrangementMorph,
1: ContextualMorph,
2: LigatureMorph,
# 3: Reserved,
4: NoncontextualMorph,
5: InsertionMorph,
},
}
lookupTypes["JSTF"] = lookupTypes["GPOS"] # JSTF contains GPOS
for lookupEnum in lookupTypes.values():
for enum, cls in lookupEnum.items():
cls.LookupType = enum
global featureParamTypes
featureParamTypes = {
"size": FeatureParamsSize,
}
for i in range(1, 20 + 1):
featureParamTypes["ss%02d" % i] = FeatureParamsStylisticSet
for i in range(1, 99 + 1):
featureParamTypes["cv%02d" % i] = FeatureParamsCharacterVariants
# add converters to classes
from .otConverters import buildConverters
for name, table in otData:
m = formatPat.match(name)
if m:
# XxxFormatN subtable, add converter to "base" table
name, format = m.groups()
format = int(format)
cls = namespace[name]
if not hasattr(cls, "converters"):
cls.converters = {}
cls.convertersByName = {}
converters, convertersByName = buildConverters(table[1:], namespace)
cls.converters[format] = converters
cls.convertersByName[format] = convertersByName
# XXX Add staticSize?
else:
cls = namespace[name]
cls.converters, cls.convertersByName = buildConverters(table, namespace)
# XXX Add staticSize? | null |
175,429 | import copy
from enum import IntEnum
from functools import reduce
from math import radians
import itertools
from collections import defaultdict, namedtuple
from fontTools.ttLib.tables.otTraverse import dfs_base_table
from fontTools.misc.arrayTools import quantizeRect
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform, Identity
from fontTools.misc.textTools import bytesjoin, pad, safeEval
from fontTools.pens.boundsPen import ControlBoundsPen
from fontTools.pens.transformPen import TransformPen
from .otBase import (
BaseTable,
FormatSwitchingBaseTable,
ValueRecord,
CountReference,
getFormatSwitchingBaseTableClass,
)
from fontTools.feaLib.lookupDebugInfo import LookupDebugInfo, LOOKUP_DEBUG_INFO_KEY
import logging
import struct
from typing import TYPE_CHECKING, Iterator, List, Optional, Set
def _getGlyphsFromCoverageTable(coverage):
if coverage is None:
# empty coverage table
return []
else:
return coverage.glyphs | null |
175,430 | from collections import namedtuple
from fontTools.misc import sstruct
from fontTools import ttLib
from fontTools import version
from fontTools.misc.transform import DecomposedTransform
from fontTools.misc.textTools import tostr, safeEval, pad
from fontTools.misc.arrayTools import calcIntBounds, pointInRect
from fontTools.misc.bezierTools import calcQuadraticBounds
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
otRound,
)
from numbers import Number
from . import DefaultTable
from . import ttProgram
import sys
import struct
import array
import logging
import os
from fontTools.misc import xmlWriter
from fontTools.misc.filenames import userNameToFileName
from fontTools.misc.loggingTools import deprecateFunction
from enum import IntFlag
flagOnCurve = 0x01
flagXShort = 0x02
flagYShort = 0x04
flagXsame = 0x10
flagYsame = 0x20
The provided code snippet includes necessary dependencies for implementing the `flagBest` function. Write a Python function `def flagBest(x, y, onCurve)` to solve the following problem:
For a given x,y delta pair, returns the flag that packs this pair most efficiently, as well as the number of byte cost of such flag.
Here is the function:
def flagBest(x, y, onCurve):
"""For a given x,y delta pair, returns the flag that packs this pair
most efficiently, as well as the number of byte cost of such flag."""
flag = flagOnCurve if onCurve else 0
cost = 0
# do x
if x == 0:
flag = flag | flagXsame
elif -255 <= x <= 255:
flag = flag | flagXShort
if x > 0:
flag = flag | flagXsame
cost += 1
else:
cost += 2
# do y
if y == 0:
flag = flag | flagYsame
elif -255 <= y <= 255:
flag = flag | flagYShort
if y > 0:
flag = flag | flagYsame
cost += 1
else:
cost += 2
return flag, cost | For a given x,y delta pair, returns the flag that packs this pair most efficiently, as well as the number of byte cost of such flag. |
175,431 | from collections import namedtuple
from fontTools.misc import sstruct
from fontTools import ttLib
from fontTools import version
from fontTools.misc.transform import DecomposedTransform
from fontTools.misc.textTools import tostr, safeEval, pad
from fontTools.misc.arrayTools import calcIntBounds, pointInRect
from fontTools.misc.bezierTools import calcQuadraticBounds
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
otRound,
)
from numbers import Number
from . import DefaultTable
from . import ttProgram
import sys
import struct
import array
import logging
import os
from fontTools.misc import xmlWriter
from fontTools.misc.filenames import userNameToFileName
from fontTools.misc.loggingTools import deprecateFunction
from enum import IntFlag
flagOnCurve = 0x01
flagXShort = 0x02
flagYShort = 0x04
flagXsame = 0x10
flagYsame = 0x20
def flagFits(newFlag, oldFlag, mask):
def flagSupports(newFlag, oldFlag):
return (
(oldFlag & flagOnCurve) == (newFlag & flagOnCurve)
and flagFits(newFlag, oldFlag, flagXsame | flagXShort)
and flagFits(newFlag, oldFlag, flagYsame | flagYShort)
) | null |
175,432 | from collections import namedtuple
from fontTools.misc import sstruct
from fontTools import ttLib
from fontTools import version
from fontTools.misc.transform import DecomposedTransform
from fontTools.misc.textTools import tostr, safeEval, pad
from fontTools.misc.arrayTools import calcIntBounds, pointInRect
from fontTools.misc.bezierTools import calcQuadraticBounds
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
otRound,
)
from numbers import Number
from . import DefaultTable
from . import ttProgram
import sys
import struct
import array
import logging
import os
from fontTools.misc import xmlWriter
from fontTools.misc.filenames import userNameToFileName
from fontTools.misc.loggingTools import deprecateFunction
from enum import IntFlag
flagXShort = 0x02
flagYShort = 0x04
flagXsame = 0x10
flagYsame = 0x20
def flagEncodeCoord(flag, mask, coord, coordBytes):
byteCount = _flagSignBytes[flag & mask]
if byteCount == 1:
coordBytes.append(coord)
elif byteCount == -1:
coordBytes.append(-coord)
elif byteCount == 2:
coordBytes.extend(struct.pack(">h", coord))
def flagEncodeCoords(flag, x, y, xBytes, yBytes):
flagEncodeCoord(flag, flagXsame | flagXShort, x, xBytes)
flagEncodeCoord(flag, flagYsame | flagYShort, y, yBytes) | null |
175,433 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
tobytes,
tostr,
safeEval,
)
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import newTable
from . import DefaultTable
import struct
import logging
log = logging.getLogger(__name__)
def makeName(string, nameID, platformID, platEncID, langID):
name = NameRecord()
name.string, name.nameID, name.platformID, name.platEncID, name.langID = (
string,
nameID,
platformID,
platEncID,
langID,
)
return name
_WINDOWS_LANGUAGE_CODES = {
lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()
}
The provided code snippet includes necessary dependencies for implementing the `_makeWindowsName` function. Write a Python function `def _makeWindowsName(name, nameID, language)` to solve the following problem:
Create a NameRecord for the Microsoft Windows platform 'language' is an arbitrary IETF BCP 47 language identifier such as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows does not support the desired language, the result will be None. Future versions of fonttools might return a NameRecord for the OpenType 'name' table format 1, but this is not implemented yet.
Here is the function:
def _makeWindowsName(name, nameID, language):
"""Create a NameRecord for the Microsoft Windows platform
'language' is an arbitrary IETF BCP 47 language identifier such
as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows
does not support the desired language, the result will be None.
Future versions of fonttools might return a NameRecord for the
OpenType 'name' table format 1, but this is not implemented yet.
"""
langID = _WINDOWS_LANGUAGE_CODES.get(language.lower())
if langID is not None:
return makeName(name, nameID, 3, 1, langID)
else:
log.warning(
"cannot add Windows name in language %s "
"because fonttools does not yet support "
"name table format 1" % language
)
return None | Create a NameRecord for the Microsoft Windows platform 'language' is an arbitrary IETF BCP 47 language identifier such as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows does not support the desired language, the result will be None. Future versions of fonttools might return a NameRecord for the OpenType 'name' table format 1, but this is not implemented yet. |
175,434 | from fontTools.misc import sstruct
from fontTools.misc.textTools import (
bytechr,
byteord,
bytesjoin,
strjoin,
tobytes,
tostr,
safeEval,
)
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import newTable
from . import DefaultTable
import struct
import logging
log = logging.getLogger(__name__)
def makeName(string, nameID, platformID, platEncID, langID):
name = NameRecord()
name.string, name.nameID, name.platformID, name.platEncID, name.langID = (
string,
nameID,
platformID,
platEncID,
langID,
)
return name
_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()}
_MAC_LANGUAGE_TO_SCRIPT = {
0: 0, # langEnglish → smRoman
1: 0, # langFrench → smRoman
2: 0, # langGerman → smRoman
3: 0, # langItalian → smRoman
4: 0, # langDutch → smRoman
5: 0, # langSwedish → smRoman
6: 0, # langSpanish → smRoman
7: 0, # langDanish → smRoman
8: 0, # langPortuguese → smRoman
9: 0, # langNorwegian → smRoman
10: 5, # langHebrew → smHebrew
11: 1, # langJapanese → smJapanese
12: 4, # langArabic → smArabic
13: 0, # langFinnish → smRoman
14: 6, # langGreek → smGreek
15: 0, # langIcelandic → smRoman (modified)
16: 0, # langMaltese → smRoman
17: 0, # langTurkish → smRoman (modified)
18: 0, # langCroatian → smRoman (modified)
19: 2, # langTradChinese → smTradChinese
20: 4, # langUrdu → smArabic
21: 9, # langHindi → smDevanagari
22: 21, # langThai → smThai
23: 3, # langKorean → smKorean
24: 29, # langLithuanian → smCentralEuroRoman
25: 29, # langPolish → smCentralEuroRoman
26: 29, # langHungarian → smCentralEuroRoman
27: 29, # langEstonian → smCentralEuroRoman
28: 29, # langLatvian → smCentralEuroRoman
29: 0, # langSami → smRoman
30: 0, # langFaroese → smRoman (modified)
31: 4, # langFarsi → smArabic (modified)
32: 7, # langRussian → smCyrillic
33: 25, # langSimpChinese → smSimpChinese
34: 0, # langFlemish → smRoman
35: 0, # langIrishGaelic → smRoman (modified)
36: 0, # langAlbanian → smRoman
37: 0, # langRomanian → smRoman (modified)
38: 29, # langCzech → smCentralEuroRoman
39: 29, # langSlovak → smCentralEuroRoman
40: 0, # langSlovenian → smRoman (modified)
41: 5, # langYiddish → smHebrew
42: 7, # langSerbian → smCyrillic
43: 7, # langMacedonian → smCyrillic
44: 7, # langBulgarian → smCyrillic
45: 7, # langUkrainian → smCyrillic (modified)
46: 7, # langByelorussian → smCyrillic
47: 7, # langUzbek → smCyrillic
48: 7, # langKazakh → smCyrillic
49: 7, # langAzerbaijani → smCyrillic
50: 4, # langAzerbaijanAr → smArabic
51: 24, # langArmenian → smArmenian
52: 23, # langGeorgian → smGeorgian
53: 7, # langMoldavian → smCyrillic
54: 7, # langKirghiz → smCyrillic
55: 7, # langTajiki → smCyrillic
56: 7, # langTurkmen → smCyrillic
57: 27, # langMongolian → smMongolian
58: 7, # langMongolianCyr → smCyrillic
59: 4, # langPashto → smArabic
60: 4, # langKurdish → smArabic
61: 4, # langKashmiri → smArabic
62: 4, # langSindhi → smArabic
63: 26, # langTibetan → smTibetan
64: 9, # langNepali → smDevanagari
65: 9, # langSanskrit → smDevanagari
66: 9, # langMarathi → smDevanagari
67: 13, # langBengali → smBengali
68: 13, # langAssamese → smBengali
69: 11, # langGujarati → smGujarati
70: 10, # langPunjabi → smGurmukhi
71: 12, # langOriya → smOriya
72: 17, # langMalayalam → smMalayalam
73: 16, # langKannada → smKannada
74: 14, # langTamil → smTamil
75: 15, # langTelugu → smTelugu
76: 18, # langSinhalese → smSinhalese
77: 19, # langBurmese → smBurmese
78: 20, # langKhmer → smKhmer
79: 22, # langLao → smLao
80: 30, # langVietnamese → smVietnamese
81: 0, # langIndonesian → smRoman
82: 0, # langTagalog → smRoman
83: 0, # langMalayRoman → smRoman
84: 4, # langMalayArabic → smArabic
85: 28, # langAmharic → smEthiopic
86: 28, # langTigrinya → smEthiopic
87: 28, # langOromo → smEthiopic
88: 0, # langSomali → smRoman
89: 0, # langSwahili → smRoman
90: 0, # langKinyarwanda → smRoman
91: 0, # langRundi → smRoman
92: 0, # langNyanja → smRoman
93: 0, # langMalagasy → smRoman
94: 0, # langEsperanto → smRoman
128: 0, # langWelsh → smRoman (modified)
129: 0, # langBasque → smRoman
130: 0, # langCatalan → smRoman
131: 0, # langLatin → smRoman
132: 0, # langQuechua → smRoman
133: 0, # langGuarani → smRoman
134: 0, # langAymara → smRoman
135: 7, # langTatar → smCyrillic
136: 4, # langUighur → smArabic
137: 26, # langDzongkha → smTibetan
138: 0, # langJavaneseRom → smRoman
139: 0, # langSundaneseRom → smRoman
140: 0, # langGalician → smRoman
141: 0, # langAfrikaans → smRoman
142: 0, # langBreton → smRoman (modified)
143: 28, # langInuktitut → smEthiopic (modified)
144: 0, # langScottishGaelic → smRoman (modified)
145: 0, # langManxGaelic → smRoman (modified)
146: 0, # langIrishGaelicScript → smRoman (modified)
147: 0, # langTongan → smRoman
148: 6, # langGreekAncient → smRoman
149: 0, # langGreenlandic → smRoman
150: 0, # langAzerbaijanRoman → smRoman
151: 0, # langNynorsk → smRoman
}
def tobytes(s, encoding="ascii", errors="strict"):
if isinstance(s, str):
return s.encode(encoding, errors)
else:
return bytes(s)
def getEncoding(platformID, platEncID, langID, default=None):
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
triplet. If encoding for these values is not known, by default None is
returned. That can be overriden by passing a value to the default argument.
"""
encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
if isinstance(encoding, dict):
encoding = encoding.get(langID, encoding[Ellipsis])
return encoding
The provided code snippet includes necessary dependencies for implementing the `_makeMacName` function. Write a Python function `def _makeMacName(name, nameID, language, font=None)` to solve the following problem:
Create a NameRecord for Apple platforms 'language' is an arbitrary IETF BCP 47 language identifier such as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we create a Macintosh NameRecord that is understood by old applications (platform ID 1 and an old-style Macintosh language enum). If this is not possible, we create a Unicode NameRecord (platform ID 0) whose language points to the font’s 'ltag' table. The latter can encode any string in any language, but legacy applications might not recognize the format (in which case they will ignore those names). 'font' should be the TTFont for which you want to create a name. If 'font' is None, we only return NameRecords for legacy Macintosh; in that case, the result will be None for names that need to be encoded with an 'ltag' table. See the section “The language identifier” in Apple’s specification: https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
Here is the function:
def _makeMacName(name, nameID, language, font=None):
"""Create a NameRecord for Apple platforms
'language' is an arbitrary IETF BCP 47 language identifier such
as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we
create a Macintosh NameRecord that is understood by old applications
(platform ID 1 and an old-style Macintosh language enum). If this
is not possible, we create a Unicode NameRecord (platform ID 0)
whose language points to the font’s 'ltag' table. The latter
can encode any string in any language, but legacy applications
might not recognize the format (in which case they will ignore
those names).
'font' should be the TTFont for which you want to create a name.
If 'font' is None, we only return NameRecords for legacy Macintosh;
in that case, the result will be None for names that need to
be encoded with an 'ltag' table.
See the section “The language identifier” in Apple’s specification:
https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
"""
macLang = _MAC_LANGUAGE_CODES.get(language.lower())
macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang)
if macLang is not None and macScript is not None:
encoding = getEncoding(1, macScript, macLang, default="ascii")
# Check if we can actually encode this name. If we can't,
# for example because we have no support for the legacy
# encoding, or because the name string contains Unicode
# characters that the legacy encoding cannot represent,
# we fall back to encoding the name in Unicode and put
# the language tag into the ltag table.
try:
_ = tobytes(name, encoding, errors="strict")
return makeName(name, nameID, 1, macScript, macLang)
except UnicodeEncodeError:
pass
if font is not None:
ltag = font.tables.get("ltag")
if ltag is None:
ltag = font["ltag"] = newTable("ltag")
# 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)”
# “The preferred platform-specific code for Unicode would be 3 or 4.”
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html
return makeName(name, nameID, 0, 4, ltag.addTag(language))
else:
log.warning(
"cannot store language %s into 'ltag' table "
"without having access to the TTFont object" % language
)
return None | Create a NameRecord for Apple platforms 'language' is an arbitrary IETF BCP 47 language identifier such as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we create a Macintosh NameRecord that is understood by old applications (platform ID 1 and an old-style Macintosh language enum). If this is not possible, we create a Unicode NameRecord (platform ID 0) whose language points to the font’s 'ltag' table. The latter can encode any string in any language, but legacy applications might not recognize the format (in which case they will ignore those names). 'font' should be the TTFont for which you want to create a name. If 'font' is None, we only return NameRecords for legacy Macintosh; in that case, the result will be None for names that need to be encoded with an 'ltag' table. See the section “The language identifier” in Apple’s specification: https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html |
175,435 | from . import DefaultTable
import struct
def fixlongs(glyphID, textLength, textOffset):
return int(glyphID), int(textLength), textOffset | null |
175,436 | from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
from typing import List
import re
import logging
def bitRepr(value, bits):
s = ""
for i in range(bits):
s = "01"[value & 0x1] + s
value = value >> 1
return s | null |
175,437 | from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
from typing import List
import re
import logging
_mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$")
opcodeDict, mnemonicDict = _makeDict(instructions)
def _makeDict(instructionList):
opcodeDict = {}
mnemonicDict = {}
for op, mnemonic, argBits, name, pops, pushes in instructionList:
assert _mnemonicPat.match(mnemonic)
mnemonicDict[mnemonic] = op, argBits, name
if argBits:
argoffset = op
for i in range(1 << argBits):
opcodeDict[op + i] = mnemonic, argBits, argoffset, name
else:
opcodeDict[op] = mnemonic, 0, 0, name
return opcodeDict, mnemonicDict | null |
175,438 | from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
from typing import List
import re
import logging
_whiteRE = re.compile(r"\s*")
def _skipWhite(data, pos):
m = _whiteRE.match(data, pos)
newPos = m.regs[0][1]
assert newPos >= pos
return newPos | null |
175,439 | from __future__ import annotations
from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin
import array
from io import StringIO
from typing import List
import re
import logging
class Program(object):
def __init__(self) -> None:
pass
def fromBytecode(self, bytecode: bytes) -> None:
self.bytecode = array.array("B", bytecode)
if hasattr(self, "assembly"):
del self.assembly
def fromAssembly(self, assembly: List[str] | str) -> None:
if isinstance(assembly, list):
self.assembly = assembly
elif isinstance(assembly, str):
self.assembly = assembly.splitlines()
else:
raise TypeError(f"expected str or List[str], got {type(assembly).__name__}")
if hasattr(self, "bytecode"):
del self.bytecode
def getBytecode(self) -> bytes:
if not hasattr(self, "bytecode"):
self._assemble()
return self.bytecode.tobytes()
def getAssembly(self, preserve=True) -> List[str]:
if not hasattr(self, "assembly"):
self._disassemble(preserve=preserve)
return self.assembly
def toXML(self, writer, ttFont) -> None:
if (
not hasattr(ttFont, "disassembleInstructions")
or ttFont.disassembleInstructions
):
try:
assembly = self.getAssembly()
except:
import traceback
tmp = StringIO()
traceback.print_exc(file=tmp)
msg = "An exception occurred during the decompilation of glyph program:\n\n"
msg += tmp.getvalue()
log.error(msg)
writer.begintag("bytecode")
writer.newline()
writer.comment(msg.strip())
writer.newline()
writer.dumphex(self.getBytecode())
writer.endtag("bytecode")
writer.newline()
else:
if not assembly:
return
writer.begintag("assembly")
writer.newline()
i = 0
indent = 0
nInstr = len(assembly)
while i < nInstr:
instr = assembly[i]
if _unindentRE.match(instr):
indent -= 1
writer.write(writer.indentwhite * indent)
writer.write(instr)
writer.newline()
m = _pushCountPat.match(instr)
i = i + 1
if m:
nValues = int(m.group(1))
line: List[str] = []
j = 0
for j in range(nValues):
if j and not (j % 25):
writer.write(writer.indentwhite * indent)
writer.write(" ".join(line))
writer.newline()
line = []
line.append(assembly[i + j])
writer.write(writer.indentwhite * indent)
writer.write(" ".join(line))
writer.newline()
i = i + j + 1
if _indentRE.match(instr):
indent += 1
writer.endtag("assembly")
writer.newline()
else:
bytecode = self.getBytecode()
if not bytecode:
return
writer.begintag("bytecode")
writer.newline()
writer.dumphex(bytecode)
writer.endtag("bytecode")
writer.newline()
def fromXML(self, name, attrs, content, ttFont) -> None:
if name == "assembly":
self.fromAssembly(strjoin(content))
self._assemble()
del self.assembly
else:
assert name == "bytecode"
self.fromBytecode(readHex(content))
def _assemble(self) -> None:
assembly = " ".join(getattr(self, "assembly", []))
bytecode: List[int] = []
push = bytecode.append
lenAssembly = len(assembly)
pos = _skipWhite(assembly, 0)
while pos < lenAssembly:
m = _tokenRE.match(assembly, pos)
if m is None:
raise tt_instructions_error(
"Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15]
)
dummy, mnemonic, arg, number, comment = m.groups()
pos = m.regs[0][1]
if comment:
pos = _skipWhite(assembly, pos)
continue
arg = arg.strip()
if mnemonic.startswith("INSTR"):
# Unknown instruction
op = int(mnemonic[5:])
push(op)
elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
op, argBits, name = mnemonicDict[mnemonic]
if len(arg) != argBits:
raise tt_instructions_error(
"Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)
)
if arg:
arg = binary2num(arg)
push(op + arg)
else:
push(op)
else:
args = []
pos = _skipWhite(assembly, pos)
while pos < lenAssembly:
m = _tokenRE.match(assembly, pos)
if m is None:
raise tt_instructions_error(
"Syntax error in TT program (%s)" % assembly[pos : pos + 15]
)
dummy, _mnemonic, arg, number, comment = m.groups()
if number is None and comment is None:
break
pos = m.regs[0][1]
pos = _skipWhite(assembly, pos)
if comment is not None:
continue
args.append(int(number))
nArgs = len(args)
if mnemonic == "PUSH":
# Automatically choose the most compact representation
nWords = 0
while nArgs:
while (
nWords < nArgs
and nWords < 255
and not (0 <= args[nWords] <= 255)
):
nWords += 1
nBytes = 0
while (
nWords + nBytes < nArgs
and nBytes < 255
and 0 <= args[nWords + nBytes] <= 255
):
nBytes += 1
if (
nBytes < 2
and nWords + nBytes < 255
and nWords + nBytes != nArgs
):
# Will write bytes as words
nWords += nBytes
continue
# Write words
if nWords:
if nWords <= 8:
op, argBits, name = streamMnemonicDict["PUSHW"]
op = op + nWords - 1
push(op)
else:
op, argBits, name = streamMnemonicDict["NPUSHW"]
push(op)
push(nWords)
for value in args[:nWords]:
assert -32768 <= value < 32768, (
"PUSH value out of range %d" % value
)
push((value >> 8) & 0xFF)
push(value & 0xFF)
# Write bytes
if nBytes:
pass
if nBytes <= 8:
op, argBits, name = streamMnemonicDict["PUSHB"]
op = op + nBytes - 1
push(op)
else:
op, argBits, name = streamMnemonicDict["NPUSHB"]
push(op)
push(nBytes)
for value in args[nWords : nWords + nBytes]:
push(value)
nTotal = nWords + nBytes
args = args[nTotal:]
nArgs -= nTotal
nWords = 0
else:
# Write exactly what we've been asked to
words = mnemonic[-1] == "W"
op, argBits, name = streamMnemonicDict[mnemonic]
if mnemonic[0] != "N":
assert nArgs <= 8, nArgs
op = op + nArgs - 1
push(op)
else:
assert nArgs < 256
push(op)
push(nArgs)
if words:
for value in args:
assert -32768 <= value < 32768, (
"PUSHW value out of range %d" % value
)
push((value >> 8) & 0xFF)
push(value & 0xFF)
else:
for value in args:
assert 0 <= value < 256, (
"PUSHB value out of range %d" % value
)
push(value)
pos = _skipWhite(assembly, pos)
if bytecode:
assert max(bytecode) < 256 and min(bytecode) >= 0
self.bytecode = array.array("B", bytecode)
def _disassemble(self, preserve=False) -> None:
assembly = []
i = 0
bytecode = getattr(self, "bytecode", [])
numBytecode = len(bytecode)
while i < numBytecode:
op = bytecode[i]
try:
mnemonic, argBits, argoffset, name = opcodeDict[op]
except KeyError:
if op in streamOpcodeDict:
values = []
# Merge consecutive PUSH operations
while bytecode[i] in streamOpcodeDict:
op = bytecode[i]
mnemonic, argBits, argoffset, name = streamOpcodeDict[op]
words = mnemonic[-1] == "W"
if argBits:
nValues = op - argoffset + 1
else:
i = i + 1
nValues = bytecode[i]
i = i + 1
assert nValues > 0
if not words:
for j in range(nValues):
value = bytecode[i]
values.append(repr(value))
i = i + 1
else:
for j in range(nValues):
# cast to signed int16
value = (bytecode[i] << 8) | bytecode[i + 1]
if value >= 0x8000:
value = value - 0x10000
values.append(repr(value))
i = i + 2
if preserve:
break
if not preserve:
mnemonic = "PUSH"
nValues = len(values)
if nValues == 1:
assembly.append("%s[ ] /* 1 value pushed */" % mnemonic)
else:
assembly.append(
"%s[ ] /* %s values pushed */" % (mnemonic, nValues)
)
assembly.extend(values)
else:
assembly.append("INSTR%d[ ]" % op)
i = i + 1
else:
if argBits:
assembly.append(
mnemonic
+ "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)
)
else:
assembly.append(mnemonic + "[ ] /* %s */" % name)
i = i + 1
self.assembly = assembly
def __bool__(self) -> bool:
"""
>>> p = Program()
>>> bool(p)
False
>>> bc = array.array("B", [0])
>>> p.fromBytecode(bc)
>>> bool(p)
True
>>> p.bytecode.pop()
0
>>> bool(p)
False
>>> p = Program()
>>> asm = ['SVTCA[0]']
>>> p.fromAssembly(asm)
>>> bool(p)
True
>>> p.assembly.pop()
'SVTCA[0]'
>>> bool(p)
False
"""
return (hasattr(self, "assembly") and len(self.assembly) > 0) or (
hasattr(self, "bytecode") and len(self.bytecode) > 0
)
__nonzero__ = __bool__
def __eq__(self, other) -> bool:
if type(self) != type(other):
return NotImplemented
return self.__dict__ == other.__dict__
def __ne__(self, other) -> bool:
result = self.__eq__(other)
return result if result is NotImplemented else not result
The provided code snippet includes necessary dependencies for implementing the `_test` function. Write a Python function `def _test()` to solve the following problem:
>>> _test() True
Here is the function:
def _test():
"""
>>> _test()
True
"""
bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
p = Program()
p.fromBytecode(bc)
asm = p.getAssembly(preserve=True)
p.fromAssembly(asm)
print(bc == p.getBytecode()) | >>> _test() True |
175,440 | from fontTools import ttLib
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
from fontTools.misc import sstruct
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
from . import DefaultTable
import sys
import struct
import array
import logging
log = logging.getLogger(__name__)
def tostr(s, encoding="ascii", errors="strict"):
def byteord(c):
def unpackPStrings(data, n):
# extract n Pascal strings from data.
# if there is not enough data, use ""
strings = []
index = 0
dataLen = len(data)
for _ in range(n):
if dataLen <= index:
length = 0
else:
length = byteord(data[index])
index += 1
if dataLen <= index + length - 1:
name = ""
else:
name = tostr(data[index : index + length], encoding="latin1")
strings.append(name)
index += length
if index < dataLen:
log.warning("%d extra bytes in post.stringData array", dataLen - index)
elif dataLen < index:
log.warning("not enough data in post.stringData array")
return strings | null |
175,441 | from fontTools import ttLib
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
from fontTools.misc import sstruct
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
from . import DefaultTable
import sys
import struct
import array
import logging
def tobytes(s, encoding="ascii", errors="strict"):
if isinstance(s, str):
return s.encode(encoding, errors)
else:
return bytes(s)
def bytechr(n):
return bytes([n])
def packPStrings(strings):
data = b""
for s in strings:
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
return data | null |
175,442 | from fontTools.misc.textTools import bytesjoin
from fontTools.misc import sstruct
from . import E_B_D_T_
from .BitmapGlyphMetrics import (
BigGlyphMetrics,
bigGlyphMetricsFormat,
SmallGlyphMetrics,
smallGlyphMetricsFormat,
)
from .E_B_D_T_ import (
BitmapGlyph,
BitmapPlusSmallMetricsMixin,
BitmapPlusBigMetricsMixin,
)
import struct
def _removeUnsupportedForColor(dataFunctions):
dataFunctions = dict(dataFunctions)
del dataFunctions["row"]
return dataFunctions | null |
175,443 | import struct, warnings
try:
import lz4
except ImportError:
lz4 = None
else:
import lz4.block
def decompress(data):
(compression,) = struct.unpack(">L", data[4:8])
scheme = compression >> 27
size = compression & 0x07FFFFFF
if scheme == 0:
pass
elif scheme == 1 and lz4:
res = lz4.block.decompress(struct.pack("<L", size) + data[8:])
if len(res) != size:
warnings.warn("Table decompression failed.")
else:
data = res
else:
warnings.warn("Table is compressed with an unsupported compression scheme")
return (data, scheme) | null |
175,444 | import struct, warnings
try:
import lz4
except ImportError:
lz4 = None
else:
import lz4.block
def compress(scheme, data):
hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07FFFFFF))
if scheme == 0:
return data
elif scheme == 1 and lz4:
res = lz4.block.compress(
data, mode="high_compression", compression=16, store_size=False
)
return hdr + res
else:
warnings.warn("Table failed to compress by unsupported compression scheme")
return data | null |
175,445 | import struct, warnings
def _entries(attrs, sameval):
ak = 0
vals = []
lastv = 0
for k, v in attrs:
if len(vals) and (k != ak + 1 or (sameval and v != lastv)):
yield (ak - len(vals) + 1, len(vals), vals)
vals = []
ak = k
vals.append(v)
lastv = v
yield (ak - len(vals) + 1, len(vals), vals)
def entries(attributes, sameval=False):
g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval)
return g | null |
175,446 | import struct, warnings
def bininfo(num, size=1):
if num == 0:
return struct.pack(">4H", 0, 0, 0, 0)
srange = 1
select = 0
while srange <= num:
srange *= 2
select += 1
select -= 1
srange //= 2
srange *= size
shift = num * size - srange
return struct.pack(">4H", num, srange, select, shift) | null |
175,447 | import struct, warnings
def num2tag(n):
if n < 0x200000:
return str(n)
else:
return (
struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode()
) | null |
175,448 | import struct, warnings
def tag2num(n):
try:
return int(n)
except ValueError:
n = (n + " ")[:4]
return struct.unpack(">L", n.encode("ascii"))[0] | null |
175,449 | from collections import deque
from typing import Callable, Deque, Iterable, List, Optional, Tuple
from .otBase import BaseTable
class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]):
def __str__(self) -> str:
path_parts = []
for entry in self:
path_part = entry.name
if entry.index is not None:
path_part += f"[{entry.index}]"
path_parts.append(path_part)
return ".".join(path_parts)
def _traverse_ot_data(
root: BaseTable,
root_accessor: Optional[str],
skip_root: bool,
predicate: Optional[Callable[[SubTablePath], bool]],
add_to_frontier_fn: AddToFrontierFn,
iter_subtables_fn: Optional[
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
] = None,
) -> Iterable[SubTablePath]:
# no visited because general otData cannot cycle (forward-offset only)
if root_accessor is None:
root_accessor = type(root).__name__
if predicate is None:
def predicate(path):
return True
if iter_subtables_fn is None:
def iter_subtables_fn(table):
return table.iterSubTables()
frontier: Deque[SubTablePath] = deque()
root_entry = BaseTable.SubTableEntry(root_accessor, root)
if not skip_root:
frontier.append((root_entry,))
else:
add_to_frontier_fn(
frontier,
[
(root_entry, subtable_entry)
for subtable_entry in iter_subtables_fn(root)
],
)
while frontier:
# path is (value, attr_name) tuples. attr_name is attr of parent to get value
path = frontier.popleft()
current = path[-1].value
if not predicate(path):
continue
yield SubTablePath(path)
new_entries = [
path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current)
]
add_to_frontier_fn(frontier, new_entries)
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
Optional: _SpecialForm = ...
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class BaseTable(object):
"""Generic base class for all OpenType (sub)tables."""
def __getattr__(self, attr):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
return getattr(self, attr)
raise AttributeError(attr)
def ensureDecompiled(self, recurse=False):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
if recurse:
for subtable in self.iterSubTables():
subtable.value.ensureDecompiled(recurse)
def __getstate__(self):
# before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader
# https://github.com/fonttools/fonttools/issues/2965
if "reader" in self.__dict__:
state = self.__dict__.copy()
state["reader"] = self.__dict__["reader"].copy()
return state
return self.__dict__
def getRecordSize(cls, reader):
totalSize = 0
for conv in cls.converters:
size = conv.getRecordSize(reader)
if size is NotImplemented:
return NotImplemented
countValue = 1
if conv.repeat:
if conv.repeat in reader:
countValue = reader[conv.repeat] + conv.aux
else:
return NotImplemented
totalSize += size * countValue
return totalSize
def getConverters(self):
return self.converters
def getConverterByName(self, name):
return self.convertersByName[name]
def populateDefaults(self, propagator=None):
for conv in self.getConverters():
if conv.repeat:
if not hasattr(self, conv.name):
setattr(self, conv.name, [])
countValue = len(getattr(self, conv.name)) - conv.aux
try:
count_conv = self.getConverterByName(conv.repeat)
setattr(self, conv.repeat, countValue)
except KeyError:
# conv.repeat is a propagated count
if propagator and conv.repeat in propagator:
propagator[conv.repeat].setValue(countValue)
else:
if conv.aux and not eval(conv.aux, None, self.__dict__):
continue
if hasattr(self, conv.name):
continue # Warn if it should NOT be present?!
if hasattr(conv, "writeNullOffset"):
setattr(self, conv.name, None) # Warn?
# elif not conv.isCount:
# # Warn?
# pass
if hasattr(conv, "DEFAULT"):
# OptionalValue converters (e.g. VarIndex)
setattr(self, conv.name, conv.DEFAULT)
def decompile(self, reader, font):
self.readFormat(reader)
table = {}
self.__rawTable = table # for debugging
for conv in self.getConverters():
if conv.name == "SubTable":
conv = conv.getConverter(reader.tableTag, table["LookupType"])
if conv.name == "ExtSubTable":
conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"])
if conv.name == "FeatureParams":
conv = conv.getConverter(reader["FeatureTag"])
if conv.name == "SubStruct":
conv = conv.getConverter(reader.tableTag, table["MorphType"])
try:
if conv.repeat:
if isinstance(conv.repeat, int):
countValue = conv.repeat
elif conv.repeat in table:
countValue = table[conv.repeat]
else:
# conv.repeat is a propagated count
countValue = reader[conv.repeat]
countValue += conv.aux
table[conv.name] = conv.readArray(reader, font, table, countValue)
else:
if conv.aux and not eval(conv.aux, None, table):
continue
table[conv.name] = conv.read(reader, font, table)
if conv.isPropagated:
reader[conv.name] = table[conv.name]
except Exception as e:
name = conv.name
e.args = e.args + (name,)
raise
if hasattr(self, "postRead"):
self.postRead(table, font)
else:
self.__dict__.update(table)
del self.__rawTable # succeeded, get rid of debugging info
def compile(self, writer, font):
self.ensureDecompiled()
# TODO Following hack to be removed by rewriting how FormatSwitching tables
# are handled.
# https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
if hasattr(self, "preWrite"):
deleteFormat = not hasattr(self, "Format")
table = self.preWrite(font)
deleteFormat = deleteFormat and hasattr(self, "Format")
else:
deleteFormat = False
table = self.__dict__.copy()
# some count references may have been initialized in a custom preWrite; we set
# these in the writer's state beforehand (instead of sequentially) so they will
# be propagated to all nested subtables even if the count appears in the current
# table only *after* the offset to the subtable that it is counting.
for conv in self.getConverters():
if conv.isCount and conv.isPropagated:
value = table.get(conv.name)
if isinstance(value, CountReference):
writer[conv.name] = value
if hasattr(self, "sortCoverageLast"):
writer.sortCoverageLast = 1
if hasattr(self, "DontShare"):
writer.DontShare = True
if hasattr(self.__class__, "LookupType"):
writer["LookupType"].setValue(self.__class__.LookupType)
self.writeFormat(writer)
for conv in self.getConverters():
value = table.get(
conv.name
) # TODO Handle defaults instead of defaulting to None!
if conv.repeat:
if value is None:
value = []
countValue = len(value) - conv.aux
if isinstance(conv.repeat, int):
assert len(value) == conv.repeat, "expected %d values, got %d" % (
conv.repeat,
len(value),
)
elif conv.repeat in table:
CountReference(table, conv.repeat, value=countValue)
else:
# conv.repeat is a propagated count
writer[conv.repeat].setValue(countValue)
try:
conv.writeArray(writer, font, table, value)
except Exception as e:
e.args = e.args + (conv.name + "[]",)
raise
elif conv.isCount:
# Special-case Count values.
# Assumption: a Count field will *always* precede
# the actual array(s).
# We need a default value, as it may be set later by a nested
# table. We will later store it here.
# We add a reference: by the time the data is assembled
# the Count value will be filled in.
# We ignore the current count value since it will be recomputed,
# unless it's a CountReference that was already initialized in a custom preWrite.
if isinstance(value, CountReference):
ref = value
ref.size = conv.staticSize
writer.writeData(ref)
table[conv.name] = ref.getValue()
else:
ref = writer.writeCountReference(table, conv.name, conv.staticSize)
table[conv.name] = None
if conv.isPropagated:
writer[conv.name] = ref
elif conv.isLookupType:
# We make sure that subtables have the same lookup type,
# and that the type is the same as the one set on the
# Lookup object, if any is set.
if conv.name not in table:
table[conv.name] = None
ref = writer.writeCountReference(
table, conv.name, conv.staticSize, table[conv.name]
)
writer["LookupType"] = ref
else:
if conv.aux and not eval(conv.aux, None, table):
continue
try:
conv.write(writer, font, table, value)
except Exception as e:
name = value.__class__.__name__ if value is not None else conv.name
e.args = e.args + (name,)
raise
if conv.isPropagated:
writer[conv.name] = value
if deleteFormat:
del self.Format
def readFormat(self, reader):
pass
def writeFormat(self, writer):
pass
def toXML(self, xmlWriter, font, attrs=None, name=None):
tableName = name if name else self.__class__.__name__
if attrs is None:
attrs = []
if hasattr(self, "Format"):
attrs = attrs + [("Format", self.Format)]
xmlWriter.begintag(tableName, attrs)
xmlWriter.newline()
self.toXML2(xmlWriter, font)
xmlWriter.endtag(tableName)
xmlWriter.newline()
def toXML2(self, xmlWriter, font):
# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
# This is because in TTX our parent writes our main tag, and in otBase.py we
# do it ourselves. I think I'm getting schizophrenic...
for conv in self.getConverters():
if conv.repeat:
value = getattr(self, conv.name, [])
for i in range(len(value)):
item = value[i]
conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)])
else:
if conv.aux and not eval(conv.aux, None, vars(self)):
continue
value = getattr(
self, conv.name, None
) # TODO Handle defaults instead of defaulting to None!
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
def fromXML(self, name, attrs, content, font):
try:
conv = self.getConverterByName(name)
except KeyError:
raise # XXX on KeyError, raise nice error
value = conv.xmlRead(attrs, content, font)
if conv.repeat:
seq = getattr(self, conv.name, None)
if seq is None:
seq = []
setattr(self, conv.name, seq)
seq.append(value)
else:
setattr(self, conv.name, value)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
self.ensureDecompiled()
other.ensureDecompiled()
return self.__dict__ == other.__dict__
class SubTableEntry(NamedTuple):
"""See BaseTable.iterSubTables()"""
name: str
value: "BaseTable"
index: Optional[int] = None # index into given array, None for single values
def iterSubTables(self) -> Iterator[SubTableEntry]:
"""Yield (name, value, index) namedtuples for all subtables of current table.
A sub-table is an instance of BaseTable (or subclass thereof) that is a child
of self, the current parent table.
The tuples also contain the attribute name (str) of the of parent table to get
a subtable, and optionally, for lists of subtables (i.e. attributes associated
with a converter that has a 'repeat'), an index into the list containing the
given subtable value.
This method can be useful to traverse trees of otTables.
"""
for conv in self.getConverters():
name = conv.name
value = getattr(self, name, None)
if value is None:
continue
if isinstance(value, BaseTable):
yield self.SubTableEntry(name, value)
elif isinstance(value, list):
yield from (
self.SubTableEntry(name, v, index=i)
for i, v in enumerate(value)
if isinstance(v, BaseTable)
)
# instance (not @class)method for consistency with FormatSwitchingBaseTable
def getVariableAttrs(self):
return getVariableAttrs(self.__class__)
The provided code snippet includes necessary dependencies for implementing the `dfs_base_table` function. Write a Python function `def dfs_base_table( root: BaseTable, root_accessor: Optional[str] = None, skip_root: bool = False, predicate: Optional[Callable[[SubTablePath], bool]] = None, iter_subtables_fn: Optional[ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] ] = None, ) -> Iterable[SubTablePath]` to solve the following problem:
Depth-first search tree of BaseTables. Args: root (BaseTable): the root of the tree. root_accessor (Optional[str]): attribute name for the root table, if any (mostly useful for debugging). skip_root (Optional[bool]): if True, the root itself is not visited, only its children. predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out paths. If True, the path is yielded and its subtables are added to the queue. If False, the path is skipped and its subtables are not traversed. iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): function to iterate over subtables of a table. If None, the default BaseTable.iterSubTables() is used. Yields: SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples for each of the nodes in the tree. The last entry in a path is the current subtable, whereas preceding ones refer to its parent tables all the way up to the root.
Here is the function:
def dfs_base_table(
root: BaseTable,
root_accessor: Optional[str] = None,
skip_root: bool = False,
predicate: Optional[Callable[[SubTablePath], bool]] = None,
iter_subtables_fn: Optional[
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
] = None,
) -> Iterable[SubTablePath]:
"""Depth-first search tree of BaseTables.
Args:
root (BaseTable): the root of the tree.
root_accessor (Optional[str]): attribute name for the root table, if any (mostly
useful for debugging).
skip_root (Optional[bool]): if True, the root itself is not visited, only its
children.
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
paths. If True, the path is yielded and its subtables are added to the
queue. If False, the path is skipped and its subtables are not traversed.
iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
function to iterate over subtables of a table. If None, the default
BaseTable.iterSubTables() is used.
Yields:
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
for each of the nodes in the tree. The last entry in a path is the current
subtable, whereas preceding ones refer to its parent tables all the way up to
the root.
"""
yield from _traverse_ot_data(
root,
root_accessor,
skip_root,
predicate,
lambda frontier, new: frontier.extendleft(reversed(new)),
iter_subtables_fn,
) | Depth-first search tree of BaseTables. Args: root (BaseTable): the root of the tree. root_accessor (Optional[str]): attribute name for the root table, if any (mostly useful for debugging). skip_root (Optional[bool]): if True, the root itself is not visited, only its children. predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out paths. If True, the path is yielded and its subtables are added to the queue. If False, the path is skipped and its subtables are not traversed. iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): function to iterate over subtables of a table. If None, the default BaseTable.iterSubTables() is used. Yields: SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples for each of the nodes in the tree. The last entry in a path is the current subtable, whereas preceding ones refer to its parent tables all the way up to the root. |
175,450 | from collections import deque
from typing import Callable, Deque, Iterable, List, Optional, Tuple
from .otBase import BaseTable
class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]):
def __str__(self) -> str:
path_parts = []
for entry in self:
path_part = entry.name
if entry.index is not None:
path_part += f"[{entry.index}]"
path_parts.append(path_part)
return ".".join(path_parts)
def _traverse_ot_data(
root: BaseTable,
root_accessor: Optional[str],
skip_root: bool,
predicate: Optional[Callable[[SubTablePath], bool]],
add_to_frontier_fn: AddToFrontierFn,
iter_subtables_fn: Optional[
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
] = None,
) -> Iterable[SubTablePath]:
# no visited because general otData cannot cycle (forward-offset only)
if root_accessor is None:
root_accessor = type(root).__name__
if predicate is None:
def predicate(path):
return True
if iter_subtables_fn is None:
def iter_subtables_fn(table):
return table.iterSubTables()
frontier: Deque[SubTablePath] = deque()
root_entry = BaseTable.SubTableEntry(root_accessor, root)
if not skip_root:
frontier.append((root_entry,))
else:
add_to_frontier_fn(
frontier,
[
(root_entry, subtable_entry)
for subtable_entry in iter_subtables_fn(root)
],
)
while frontier:
# path is (value, attr_name) tuples. attr_name is attr of parent to get value
path = frontier.popleft()
current = path[-1].value
if not predicate(path):
continue
yield SubTablePath(path)
new_entries = [
path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current)
]
add_to_frontier_fn(frontier, new_entries)
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
Optional: _SpecialForm = ...
class Iterable(Protocol[_T_co]):
def __iter__(self) -> Iterator[_T_co]: ...
class BaseTable(object):
"""Generic base class for all OpenType (sub)tables."""
def __getattr__(self, attr):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
return getattr(self, attr)
raise AttributeError(attr)
def ensureDecompiled(self, recurse=False):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
if recurse:
for subtable in self.iterSubTables():
subtable.value.ensureDecompiled(recurse)
def __getstate__(self):
# before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader
# https://github.com/fonttools/fonttools/issues/2965
if "reader" in self.__dict__:
state = self.__dict__.copy()
state["reader"] = self.__dict__["reader"].copy()
return state
return self.__dict__
def getRecordSize(cls, reader):
totalSize = 0
for conv in cls.converters:
size = conv.getRecordSize(reader)
if size is NotImplemented:
return NotImplemented
countValue = 1
if conv.repeat:
if conv.repeat in reader:
countValue = reader[conv.repeat] + conv.aux
else:
return NotImplemented
totalSize += size * countValue
return totalSize
def getConverters(self):
return self.converters
def getConverterByName(self, name):
return self.convertersByName[name]
def populateDefaults(self, propagator=None):
for conv in self.getConverters():
if conv.repeat:
if not hasattr(self, conv.name):
setattr(self, conv.name, [])
countValue = len(getattr(self, conv.name)) - conv.aux
try:
count_conv = self.getConverterByName(conv.repeat)
setattr(self, conv.repeat, countValue)
except KeyError:
# conv.repeat is a propagated count
if propagator and conv.repeat in propagator:
propagator[conv.repeat].setValue(countValue)
else:
if conv.aux and not eval(conv.aux, None, self.__dict__):
continue
if hasattr(self, conv.name):
continue # Warn if it should NOT be present?!
if hasattr(conv, "writeNullOffset"):
setattr(self, conv.name, None) # Warn?
# elif not conv.isCount:
# # Warn?
# pass
if hasattr(conv, "DEFAULT"):
# OptionalValue converters (e.g. VarIndex)
setattr(self, conv.name, conv.DEFAULT)
def decompile(self, reader, font):
self.readFormat(reader)
table = {}
self.__rawTable = table # for debugging
for conv in self.getConverters():
if conv.name == "SubTable":
conv = conv.getConverter(reader.tableTag, table["LookupType"])
if conv.name == "ExtSubTable":
conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"])
if conv.name == "FeatureParams":
conv = conv.getConverter(reader["FeatureTag"])
if conv.name == "SubStruct":
conv = conv.getConverter(reader.tableTag, table["MorphType"])
try:
if conv.repeat:
if isinstance(conv.repeat, int):
countValue = conv.repeat
elif conv.repeat in table:
countValue = table[conv.repeat]
else:
# conv.repeat is a propagated count
countValue = reader[conv.repeat]
countValue += conv.aux
table[conv.name] = conv.readArray(reader, font, table, countValue)
else:
if conv.aux and not eval(conv.aux, None, table):
continue
table[conv.name] = conv.read(reader, font, table)
if conv.isPropagated:
reader[conv.name] = table[conv.name]
except Exception as e:
name = conv.name
e.args = e.args + (name,)
raise
if hasattr(self, "postRead"):
self.postRead(table, font)
else:
self.__dict__.update(table)
del self.__rawTable # succeeded, get rid of debugging info
def compile(self, writer, font):
self.ensureDecompiled()
# TODO Following hack to be removed by rewriting how FormatSwitching tables
# are handled.
# https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
if hasattr(self, "preWrite"):
deleteFormat = not hasattr(self, "Format")
table = self.preWrite(font)
deleteFormat = deleteFormat and hasattr(self, "Format")
else:
deleteFormat = False
table = self.__dict__.copy()
# some count references may have been initialized in a custom preWrite; we set
# these in the writer's state beforehand (instead of sequentially) so they will
# be propagated to all nested subtables even if the count appears in the current
# table only *after* the offset to the subtable that it is counting.
for conv in self.getConverters():
if conv.isCount and conv.isPropagated:
value = table.get(conv.name)
if isinstance(value, CountReference):
writer[conv.name] = value
if hasattr(self, "sortCoverageLast"):
writer.sortCoverageLast = 1
if hasattr(self, "DontShare"):
writer.DontShare = True
if hasattr(self.__class__, "LookupType"):
writer["LookupType"].setValue(self.__class__.LookupType)
self.writeFormat(writer)
for conv in self.getConverters():
value = table.get(
conv.name
) # TODO Handle defaults instead of defaulting to None!
if conv.repeat:
if value is None:
value = []
countValue = len(value) - conv.aux
if isinstance(conv.repeat, int):
assert len(value) == conv.repeat, "expected %d values, got %d" % (
conv.repeat,
len(value),
)
elif conv.repeat in table:
CountReference(table, conv.repeat, value=countValue)
else:
# conv.repeat is a propagated count
writer[conv.repeat].setValue(countValue)
try:
conv.writeArray(writer, font, table, value)
except Exception as e:
e.args = e.args + (conv.name + "[]",)
raise
elif conv.isCount:
# Special-case Count values.
# Assumption: a Count field will *always* precede
# the actual array(s).
# We need a default value, as it may be set later by a nested
# table. We will later store it here.
# We add a reference: by the time the data is assembled
# the Count value will be filled in.
# We ignore the current count value since it will be recomputed,
# unless it's a CountReference that was already initialized in a custom preWrite.
if isinstance(value, CountReference):
ref = value
ref.size = conv.staticSize
writer.writeData(ref)
table[conv.name] = ref.getValue()
else:
ref = writer.writeCountReference(table, conv.name, conv.staticSize)
table[conv.name] = None
if conv.isPropagated:
writer[conv.name] = ref
elif conv.isLookupType:
# We make sure that subtables have the same lookup type,
# and that the type is the same as the one set on the
# Lookup object, if any is set.
if conv.name not in table:
table[conv.name] = None
ref = writer.writeCountReference(
table, conv.name, conv.staticSize, table[conv.name]
)
writer["LookupType"] = ref
else:
if conv.aux and not eval(conv.aux, None, table):
continue
try:
conv.write(writer, font, table, value)
except Exception as e:
name = value.__class__.__name__ if value is not None else conv.name
e.args = e.args + (name,)
raise
if conv.isPropagated:
writer[conv.name] = value
if deleteFormat:
del self.Format
def readFormat(self, reader):
pass
def writeFormat(self, writer):
pass
def toXML(self, xmlWriter, font, attrs=None, name=None):
tableName = name if name else self.__class__.__name__
if attrs is None:
attrs = []
if hasattr(self, "Format"):
attrs = attrs + [("Format", self.Format)]
xmlWriter.begintag(tableName, attrs)
xmlWriter.newline()
self.toXML2(xmlWriter, font)
xmlWriter.endtag(tableName)
xmlWriter.newline()
def toXML2(self, xmlWriter, font):
# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
# This is because in TTX our parent writes our main tag, and in otBase.py we
# do it ourselves. I think I'm getting schizophrenic...
for conv in self.getConverters():
if conv.repeat:
value = getattr(self, conv.name, [])
for i in range(len(value)):
item = value[i]
conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)])
else:
if conv.aux and not eval(conv.aux, None, vars(self)):
continue
value = getattr(
self, conv.name, None
) # TODO Handle defaults instead of defaulting to None!
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
def fromXML(self, name, attrs, content, font):
try:
conv = self.getConverterByName(name)
except KeyError:
raise # XXX on KeyError, raise nice error
value = conv.xmlRead(attrs, content, font)
if conv.repeat:
seq = getattr(self, conv.name, None)
if seq is None:
seq = []
setattr(self, conv.name, seq)
seq.append(value)
else:
setattr(self, conv.name, value)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
self.ensureDecompiled()
other.ensureDecompiled()
return self.__dict__ == other.__dict__
class SubTableEntry(NamedTuple):
"""See BaseTable.iterSubTables()"""
name: str
value: "BaseTable"
index: Optional[int] = None # index into given array, None for single values
def iterSubTables(self) -> Iterator[SubTableEntry]:
"""Yield (name, value, index) namedtuples for all subtables of current table.
A sub-table is an instance of BaseTable (or subclass thereof) that is a child
of self, the current parent table.
The tuples also contain the attribute name (str) of the of parent table to get
a subtable, and optionally, for lists of subtables (i.e. attributes associated
with a converter that has a 'repeat'), an index into the list containing the
given subtable value.
This method can be useful to traverse trees of otTables.
"""
for conv in self.getConverters():
name = conv.name
value = getattr(self, name, None)
if value is None:
continue
if isinstance(value, BaseTable):
yield self.SubTableEntry(name, value)
elif isinstance(value, list):
yield from (
self.SubTableEntry(name, v, index=i)
for i, v in enumerate(value)
if isinstance(v, BaseTable)
)
# instance (not @class)method for consistency with FormatSwitchingBaseTable
def getVariableAttrs(self):
return getVariableAttrs(self.__class__)
The provided code snippet includes necessary dependencies for implementing the `bfs_base_table` function. Write a Python function `def bfs_base_table( root: BaseTable, root_accessor: Optional[str] = None, skip_root: bool = False, predicate: Optional[Callable[[SubTablePath], bool]] = None, iter_subtables_fn: Optional[ Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] ] = None, ) -> Iterable[SubTablePath]` to solve the following problem:
Breadth-first search tree of BaseTables. Args: the root of the tree. root_accessor (Optional[str]): attribute name for the root table, if any (mostly useful for debugging). skip_root (Optional[bool]): if True, the root itself is not visited, only its children. predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out paths. If True, the path is yielded and its subtables are added to the queue. If False, the path is skipped and its subtables are not traversed. iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): function to iterate over subtables of a table. If None, the default BaseTable.iterSubTables() is used. Yields: SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples for each of the nodes in the tree. The last entry in a path is the current subtable, whereas preceding ones refer to its parent tables all the way up to the root.
Here is the function:
def bfs_base_table(
root: BaseTable,
root_accessor: Optional[str] = None,
skip_root: bool = False,
predicate: Optional[Callable[[SubTablePath], bool]] = None,
iter_subtables_fn: Optional[
Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]
] = None,
) -> Iterable[SubTablePath]:
"""Breadth-first search tree of BaseTables.
Args:
the root of the tree.
root_accessor (Optional[str]): attribute name for the root table, if any (mostly
useful for debugging).
skip_root (Optional[bool]): if True, the root itself is not visited, only its
children.
predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out
paths. If True, the path is yielded and its subtables are added to the
queue. If False, the path is skipped and its subtables are not traversed.
iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]):
function to iterate over subtables of a table. If None, the default
BaseTable.iterSubTables() is used.
Yields:
SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples
for each of the nodes in the tree. The last entry in a path is the current
subtable, whereas preceding ones refer to its parent tables all the way up to
the root.
"""
yield from _traverse_ot_data(
root,
root_accessor,
skip_root,
predicate,
lambda frontier, new: frontier.extend(new),
iter_subtables_fn,
) | Breadth-first search tree of BaseTables. Args: the root of the tree. root_accessor (Optional[str]): attribute name for the root table, if any (mostly useful for debugging). skip_root (Optional[bool]): if True, the root itself is not visited, only its children. predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out paths. If True, the path is yielded and its subtables are added to the queue. If False, the path is skipped and its subtables are not traversed. iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): function to iterate over subtables of a table. If None, the default BaseTable.iterSubTables() is used. Yields: SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples for each of the nodes in the tree. The last entry in a path is the current subtable, whereas preceding ones refer to its parent tables all the way up to the root. |
175,451 | from collections import UserDict, deque
from functools import partial
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import array
import itertools
import logging
import struct
import sys
import fontTools.ttLib.tables.TupleVariation as tv
def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices):
tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
variations, pointCount, axisTags, sharedCoordIndices
)
if tupleVariationCount == 0:
return b""
result = [struct.pack(">HH", tupleVariationCount, 4 + len(tuples)), tuples, data]
if (len(tuples) + len(data)) % 2 != 0:
result.append(b"\0") # padding
return b"".join(result) | null |
175,452 | from collections import UserDict, deque
from functools import partial
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import array
import itertools
import logging
import struct
import sys
import fontTools.ttLib.tables.TupleVariation as tv
def decompileGlyph_(pointCount, sharedTuples, axisTags, data):
if len(data) < 4:
return []
tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
dataPos = offsetToData
return tv.decompileTupleVariationStore(
"gvar",
axisTags,
tupleVariationCount,
pointCount,
sharedTuples,
data,
4,
offsetToData,
) | null |
175,453 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
def disassemble(aCode):
def writecode(tag, writer, instrs):
writer.begintag(tag)
writer.newline()
for l in disassemble(instrs):
writer.write(l)
writer.newline()
writer.endtag(tag)
writer.newline() | null |
175,454 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
def assemble(instrs):
res = b""
for inst in instrs:
m = instre.match(inst)
if not m or not m.group(1) in aCode_map:
continue
opcode, parmfmt = aCode_map[m.group(1)]
res += struct.pack("B", opcode)
if m.group(2):
if parmfmt == 0:
continue
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
if parmfmt == -1:
l = len(parms)
res += struct.pack(("%dB" % (l + 1)), l, *parms)
else:
res += struct.pack(parmfmt, *parms)
return res
def content_string(contents):
res = ""
for element in contents:
if isinstance(element, tuple):
continue
res += element
return res.strip()
def readcode(content):
res = []
for e in content_string(content).split("\n"):
e = e.strip()
if not len(e):
continue
res.append(e)
return assemble(res) | null |
175,455 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
def writesimple(tag, self, writer, *attrkeys):
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
writer.simpletag(tag, **attrs)
writer.newline() | null |
175,456 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
safeEval = ast.literal_eval
def getSimple(self, attrs, *attr_list):
for k in attr_list:
if k in attrs:
setattr(self, k, int(safeEval(attrs[k]))) | null |
175,457 | from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import byteord, safeEval
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
def wrapline(writer, dat, length=80):
currline = ""
for d in dat:
if len(currline) > length:
writer.write(currline[:-1])
writer.newline()
currline = ""
currline += d + " "
if len(currline):
writer.write(currline[:-1])
writer.newline() | null |
175,458 | from fontTools.ttLib.ttVisitor import TTVisitor
import fontTools.ttLib as ttLib
import fontTools.ttLib.tables.otBase as otBase
import fontTools.ttLib.tables.otTables as otTables
from fontTools.cffLib import VarStoreData
import fontTools.cffLib.specializer as cffSpecializer
from fontTools.varLib import builder
from fontTools.misc.fixedTools import otRound
from fontTools.ttLib.tables._g_l_y_f import VarComponentFlags
class ScalerVisitor(TTVisitor):
def __init__(self, scaleFactor):
self.scaleFactor = scaleFactor
def scale(self, v):
return otRound(v * self.scaleFactor)
(
(ttLib.getTableClass("head"), ("unitsPerEm", "xMin", "yMin", "xMax", "yMax")),
(ttLib.getTableClass("post"), ("underlinePosition", "underlineThickness")),
(ttLib.getTableClass("VORG"), ("defaultVertOriginY")),
(
ttLib.getTableClass("hhea"),
(
"ascent",
"descent",
"lineGap",
"advanceWidthMax",
"minLeftSideBearing",
"minRightSideBearing",
"xMaxExtent",
"caretOffset",
),
),
(
ttLib.getTableClass("vhea"),
(
"ascent",
"descent",
"lineGap",
"advanceHeightMax",
"minTopSideBearing",
"minBottomSideBearing",
"yMaxExtent",
"caretOffset",
),
),
(
ttLib.getTableClass("OS/2"),
(
"xAvgCharWidth",
"ySubscriptXSize",
"ySubscriptYSize",
"ySubscriptXOffset",
"ySubscriptYOffset",
"ySuperscriptXSize",
"ySuperscriptYSize",
"ySuperscriptXOffset",
"ySuperscriptYOffset",
"yStrikeoutSize",
"yStrikeoutPosition",
"sTypoAscender",
"sTypoDescender",
"sTypoLineGap",
"usWinAscent",
"usWinDescent",
"sxHeight",
"sCapHeight",
),
),
(
otTables.ValueRecord,
("XAdvance", "YAdvance", "XPlacement", "YPlacement"),
), # GPOS
(otTables.Anchor, ("XCoordinate", "YCoordinate")), # GPOS
(otTables.CaretValue, ("Coordinate")), # GDEF
(otTables.BaseCoord, ("Coordinate")), # BASE
(otTables.MathValueRecord, ("Value")), # MATH
(otTables.ClipBox, ("xMin", "yMin", "xMax", "yMax")), # COLR
)
def visit(visitor, obj, attr, value):
setattr(obj, attr, visitor.scale(value))
(ttLib.getTableClass("hmtx"), ttLib.getTableClass("vmtx")), "metrics"
def visit(visitor, obj, attr, metrics):
for g in metrics:
advance, lsb = metrics[g]
metrics[g] = visitor.scale(advance), visitor.scale(lsb)
def visit(visitor, obj, attr, VOriginRecords):
for g in VOriginRecords:
VOriginRecords[g] = visitor.scale(VOriginRecords[g])
def visit(visitor, obj, attr, glyphs):
for g in glyphs.values():
for attr in ("xMin", "xMax", "yMin", "yMax"):
v = getattr(g, attr, None)
if v is not None:
setattr(g, attr, visitor.scale(v))
if g.isComposite():
for component in g.components:
component.x = visitor.scale(component.x)
component.y = visitor.scale(component.y)
continue
if g.isVarComposite():
for component in g.components:
for attr in ("translateX", "translateY", "tCenterX", "tCenterY"):
v = getattr(component.transform, attr)
setattr(component.transform, attr, visitor.scale(v))
continue
if hasattr(g, "coordinates"):
coordinates = g.coordinates
for i, (x, y) in enumerate(coordinates):
coordinates[i] = visitor.scale(x), visitor.scale(y)
def visit(visitor, obj, attr, variations):
# VarComposites are a pain to handle :-(
glyfTable = visitor.font["glyf"]
for glyphName, varlist in variations.items():
glyph = glyfTable[glyphName]
isVarComposite = glyph.isVarComposite()
for var in varlist:
coordinates = var.coordinates
if not isVarComposite:
for i, xy in enumerate(coordinates):
if xy is None:
continue
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
continue
# VarComposite glyph
i = 0
for component in glyph.components:
if component.flags & VarComponentFlags.AXES_HAVE_VARIATION:
i += len(component.location)
if component.flags & (
VarComponentFlags.HAVE_TRANSLATE_X
| VarComponentFlags.HAVE_TRANSLATE_Y
):
xy = coordinates[i]
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
i += 1
if component.flags & VarComponentFlags.HAVE_ROTATION:
i += 1
if component.flags & (
VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
):
i += 1
if component.flags & (
VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y
):
i += 1
if component.flags & (
VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
):
xy = coordinates[i]
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
i += 1
# Phantom points
assert i + 4 == len(coordinates)
for i in range(i, len(coordinates)):
xy = coordinates[i]
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
def visit(visitor, obj, attr, kernTables):
for table in kernTables:
kernTable = table.kernTable
for k in kernTable.keys():
kernTable[k] = visitor.scale(kernTable[k])
def visit(visitor, obj, attr, cff):
cff.desubroutinize()
topDict = cff.topDictIndex[0]
varStore = getattr(topDict, "VarStore", None)
getNumRegions = varStore.getNumRegions if varStore is not None else None
privates = set()
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
privates.add(c.private)
commands = cffSpecializer.programToCommands(
c.program, getNumRegions=getNumRegions
)
for op, args in commands:
if op == "vsindex":
continue
_cff_scale(visitor, args)
c.program[:] = cffSpecializer.commandsToProgram(commands)
# Annoying business of scaling numbers that do not matter whatsoever
for attr in (
"UnderlinePosition",
"UnderlineThickness",
"FontBBox",
"StrokeWidth",
):
value = getattr(topDict, attr, None)
if value is None:
continue
if isinstance(value, list):
_cff_scale(visitor, value)
else:
setattr(topDict, attr, visitor.scale(value))
for i in range(6):
topDict.FontMatrix[i] /= visitor.scaleFactor
for private in privates:
for attr in (
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
# "BlueScale",
# "BlueShift",
# "BlueFuzz",
"StdHW",
"StdVW",
"StemSnapH",
"StemSnapV",
"defaultWidthX",
"nominalWidthX",
):
value = getattr(private, attr, None)
if value is None:
continue
if isinstance(value, list):
_cff_scale(visitor, value)
else:
setattr(private, attr, visitor.scale(value))
def visit(visitor, varData):
for item in varData.Item:
for i, v in enumerate(item):
item[i] = visitor.scale(v)
varData.calculateNumShorts()
def visit(visitor, record):
oldPaint = record.Paint
scale = otTables.Paint()
_setup_scale_paint(scale, visitor.scaleFactor)
scale.Paint = oldPaint
record.Paint = scale
return True
def visit(visitor, paint):
if paint.Format != otTables.PaintFormat.PaintGlyph:
return True
newPaint = otTables.Paint()
newPaint.Format = paint.Format
newPaint.Paint = paint.Paint
newPaint.Glyph = paint.Glyph
del paint.Paint
del paint.Glyph
_setup_scale_paint(paint, 1 / visitor.scaleFactor)
paint.Paint = newPaint
visitor.visit(newPaint.Paint)
return False
The provided code snippet includes necessary dependencies for implementing the `scale_upem` function. Write a Python function `def scale_upem(font, new_upem)` to solve the following problem:
Change the units-per-EM of font to the new value.
Here is the function:
def scale_upem(font, new_upem):
"""Change the units-per-EM of font to the new value."""
upem = font["head"].unitsPerEm
visitor = ScalerVisitor(new_upem / upem)
visitor.visit(font) | Change the units-per-EM of font to the new value. |
175,459 | from io import BytesIO
from types import SimpleNamespace
from fontTools.misc.textTools import Tag
from fontTools.misc import sstruct
from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
import struct
from collections import OrderedDict
import logging
ZLIB_COMPRESSION_LEVEL = 6
USE_ZOPFLI = False
ZOPFLI_LEVELS = {
# 0: 0, # can't do 0 iterations...
1: 1,
2: 3,
3: 5,
4: 8,
5: 10,
6: 15,
7: 25,
8: 50,
9: 100,
}
The provided code snippet includes necessary dependencies for implementing the `compress` function. Write a Python function `def compress(data, level=ZLIB_COMPRESSION_LEVEL)` to solve the following problem:
Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, zopfli is used instead of the zlib module. The compression 'level' must be between 0 and 9. 1 gives best speed, 9 gives best compression (0 gives no compression at all). The default value is a compromise between speed and compression (6).
Here is the function:
def compress(data, level=ZLIB_COMPRESSION_LEVEL):
"""Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True,
zopfli is used instead of the zlib module.
The compression 'level' must be between 0 and 9. 1 gives best speed,
9 gives best compression (0 gives no compression at all).
The default value is a compromise between speed and compression (6).
"""
if not (0 <= level <= 9):
raise ValueError("Bad compression level: %s" % level)
if not USE_ZOPFLI or level == 0:
from zlib import compress
return compress(data, level)
else:
from zopfli.zlib import compress
return compress(data, numiterations=ZOPFLI_LEVELS[level]) | Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, zopfli is used instead of the zlib module. The compression 'level' must be between 0 and 9. 1 gives best speed, 9 gives best compression (0 gives no compression at all). The default value is a compromise between speed and compression (6). |
175,460 | from io import BytesIO
from types import SimpleNamespace
from fontTools.misc.textTools import Tag
from fontTools.misc import sstruct
from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
import struct
from collections import OrderedDict
import logging
ttcHeaderFormat = """
> # big endian
TTCTag: 4s # "ttcf"
Version: L # 0x00010000 or 0x00020000
numFonts: L # number of fonts
# OffsetTable[numFonts]: L # array with offsets from beginning of file
# ulDsigTag: L # version 2.0 only
# ulDsigLength: L # version 2.0 only
# ulDsigOffset: L # version 2.0 only
"""
ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat)
class SimpleNamespace:
def __init__(self, **kwargs: Any) -> None:
def __getattribute__(self, name: str) -> Any:
def __setattr__(self, name: str, value: Any) -> None:
def __delattr__(self, name: str) -> None:
class TTLibError(Exception):
def readTTCHeader(file):
file.seek(0)
data = file.read(ttcHeaderSize)
if len(data) != ttcHeaderSize:
raise TTLibError("Not a Font Collection (not enough data)")
self = SimpleNamespace()
sstruct.unpack(ttcHeaderFormat, data, self)
if self.TTCTag != "ttcf":
raise TTLibError("Not a Font Collection")
assert self.Version == 0x00010000 or self.Version == 0x00020000, (
"unrecognized TTC version 0x%08x" % self.Version
)
self.offsetTable = struct.unpack(
">%dL" % self.numFonts, file.read(self.numFonts * 4)
)
if self.Version == 0x00020000:
pass # ignoring version 2.0 signatures
return self | null |
175,461 | from io import BytesIO
from types import SimpleNamespace
from fontTools.misc.textTools import Tag
from fontTools.misc import sstruct
from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
import struct
from collections import OrderedDict
import logging
ttcHeaderFormat = """
> # big endian
TTCTag: 4s # "ttcf"
Version: L # 0x00010000 or 0x00020000
numFonts: L # number of fonts
# OffsetTable[numFonts]: L # array with offsets from beginning of file
# ulDsigTag: L # version 2.0 only
# ulDsigLength: L # version 2.0 only
# ulDsigOffset: L # version 2.0 only
"""
class SimpleNamespace:
def __init__(self, **kwargs: Any) -> None:
def __getattribute__(self, name: str) -> Any:
def __setattr__(self, name: str, value: Any) -> None:
def __delattr__(self, name: str) -> None:
def writeTTCHeader(file, numFonts):
self = SimpleNamespace()
self.TTCTag = "ttcf"
self.Version = 0x00010000
self.numFonts = numFonts
file.seek(0)
file.write(sstruct.pack(ttcHeaderFormat, self))
offset = file.tell()
file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
return offset | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.