ZTWHHH commited on
Commit
7dd5b35
·
verified ·
1 Parent(s): 7803540

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py +5 -0
  2. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/BitmapGlyphMetrics.py +64 -0
  3. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_B_L_C_.py +9 -0
  4. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py +13 -0
  5. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py +157 -0
  6. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py +296 -0
  7. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py +17 -0
  8. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/DefaultTable.py +49 -0
  9. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py +710 -0
  10. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py +42 -0
  11. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py +5 -0
  12. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py +5 -0
  13. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py +234 -0
  14. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_o_c.py +84 -0
  15. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/H_V_A_R_.py +5 -0
  16. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py +5 -0
  17. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/L_T_S_H_.py +48 -0
  18. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/M_E_T_A_.py +345 -0
  19. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/O_S_2f_2.py +745 -0
  20. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py +5 -0
  21. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/S_V_G_.py +215 -0
  22. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/S__i_l_f.py +1037 -0
  23. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_J_.py +5 -0
  24. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py +5 -0
  25. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py +164 -0
  26. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__3.py +20 -0
  27. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py +846 -0
  28. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py +5 -0
  29. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__init__.py +97 -0
  30. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/C_F_F__2.cpython-310.pyc +0 -0
  31. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/C_O_L_R_.cpython-310.pyc +0 -0
  32. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc +0 -0
  33. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc +0 -0
  34. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_o_c.cpython-310.pyc +0 -0
  35. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc +0 -0
  36. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_A_T_H_.cpython-310.pyc +0 -0
  37. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/O_S_2f_2.cpython-310.pyc +0 -0
  38. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_l.cpython-310.pyc +0 -0
  39. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_P_.cpython-310.pyc +0 -0
  40. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc +0 -0
  41. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc +0 -0
  42. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc +0 -0
  43. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_v_a_r.cpython-310.pyc +0 -0
  44. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_h_h_e_a.cpython-310.pyc +0 -0
  45. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc +0 -0
  46. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_x.cpython-310.pyc +0 -0
  47. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otTables.cpython-310.pyc +0 -0
  48. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/_b_s_l_n.py +6 -0
  49. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_m_a_p.py +1576 -0
  50. valley/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_v_a_r.py +86 -0
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_B_A_S_E_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/BitmapGlyphMetrics.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Since bitmap glyph metrics are shared between EBLC and EBDT
2
+ # this class gets its own python file.
3
+ from fontTools.misc import sstruct
4
+ from fontTools.misc.textTools import safeEval
5
+ import logging
6
+
7
+
8
+ log = logging.getLogger(__name__)
9
+
10
+ bigGlyphMetricsFormat = """
11
+ > # big endian
12
+ height: B
13
+ width: B
14
+ horiBearingX: b
15
+ horiBearingY: b
16
+ horiAdvance: B
17
+ vertBearingX: b
18
+ vertBearingY: b
19
+ vertAdvance: B
20
+ """
21
+
22
+ smallGlyphMetricsFormat = """
23
+ > # big endian
24
+ height: B
25
+ width: B
26
+ BearingX: b
27
+ BearingY: b
28
+ Advance: B
29
+ """
30
+
31
+
32
+ class BitmapGlyphMetrics(object):
33
+ def toXML(self, writer, ttFont):
34
+ writer.begintag(self.__class__.__name__)
35
+ writer.newline()
36
+ for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
37
+ writer.simpletag(metricName, value=getattr(self, metricName))
38
+ writer.newline()
39
+ writer.endtag(self.__class__.__name__)
40
+ writer.newline()
41
+
42
+ def fromXML(self, name, attrs, content, ttFont):
43
+ metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
44
+ for element in content:
45
+ if not isinstance(element, tuple):
46
+ continue
47
+ name, attrs, content = element
48
+ # Make sure this is a metric that is needed by GlyphMetrics.
49
+ if name in metricNames:
50
+ vars(self)[name] = safeEval(attrs["value"])
51
+ else:
52
+ log.warning(
53
+ "unknown name '%s' being ignored in %s.",
54
+ name,
55
+ self.__class__.__name__,
56
+ )
57
+
58
+
59
+ class BigGlyphMetrics(BitmapGlyphMetrics):
60
+ binaryFormat = bigGlyphMetricsFormat
61
+
62
+
63
+ class SmallGlyphMetrics(BitmapGlyphMetrics):
64
+ binaryFormat = smallGlyphMetricsFormat
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_B_L_C_.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2013 Google, Inc. All Rights Reserved.
2
+ #
3
+ # Google Author(s): Matt Fontaine
4
+
5
+ from . import E_B_L_C_
6
+
7
+
8
+ class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
9
+ dependencies = ["CBDT"]
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
3
+
4
+
5
+ class table_C_F_F__2(table_C_F_F_):
6
+ def decompile(self, data, otFont):
7
+ self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
8
+ assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
9
+
10
+ def compile(self, otFont):
11
+ f = BytesIO()
12
+ self.cff.compile(f, otFont, isCFF2=True)
13
+ return f.getvalue()
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2013 Google, Inc. All Rights Reserved.
2
+ #
3
+ # Google Author(s): Behdad Esfahbod
4
+
5
+ from fontTools.misc.textTools import safeEval
6
+ from . import DefaultTable
7
+
8
+
9
+ class table_C_O_L_R_(DefaultTable.DefaultTable):
10
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
11
+
12
+ ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
13
+
14
+ ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
15
+ """
16
+
17
+ @staticmethod
18
+ def _decompileColorLayersV0(table):
19
+ if not table.LayerRecordArray:
20
+ return {}
21
+ colorLayerLists = {}
22
+ layerRecords = table.LayerRecordArray.LayerRecord
23
+ numLayerRecords = len(layerRecords)
24
+ for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
25
+ baseGlyph = baseRec.BaseGlyph
26
+ firstLayerIndex = baseRec.FirstLayerIndex
27
+ numLayers = baseRec.NumLayers
28
+ assert firstLayerIndex + numLayers <= numLayerRecords
29
+ layers = []
30
+ for i in range(firstLayerIndex, firstLayerIndex + numLayers):
31
+ layerRec = layerRecords[i]
32
+ layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
33
+ colorLayerLists[baseGlyph] = layers
34
+ return colorLayerLists
35
+
36
+ def _toOTTable(self, ttFont):
37
+ from . import otTables
38
+ from fontTools.colorLib.builder import populateCOLRv0
39
+
40
+ tableClass = getattr(otTables, self.tableTag)
41
+ table = tableClass()
42
+ table.Version = self.version
43
+
44
+ populateCOLRv0(
45
+ table,
46
+ {
47
+ baseGlyph: [(layer.name, layer.colorID) for layer in layers]
48
+ for baseGlyph, layers in self.ColorLayers.items()
49
+ },
50
+ glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
51
+ )
52
+ return table
53
+
54
+ def decompile(self, data, ttFont):
55
+ from .otBase import OTTableReader
56
+ from . import otTables
57
+
58
+ # We use otData to decompile, but we adapt the decompiled otTables to the
59
+ # existing COLR v0 API for backward compatibility.
60
+ reader = OTTableReader(data, tableTag=self.tableTag)
61
+ tableClass = getattr(otTables, self.tableTag)
62
+ table = tableClass()
63
+ table.decompile(reader, ttFont)
64
+
65
+ self.version = table.Version
66
+ if self.version == 0:
67
+ self.ColorLayers = self._decompileColorLayersV0(table)
68
+ else:
69
+ # for new versions, keep the raw otTables around
70
+ self.table = table
71
+
72
+ def compile(self, ttFont):
73
+ from .otBase import OTTableWriter
74
+
75
+ if hasattr(self, "table"):
76
+ table = self.table
77
+ else:
78
+ table = self._toOTTable(ttFont)
79
+
80
+ writer = OTTableWriter(tableTag=self.tableTag)
81
+ table.compile(writer, ttFont)
82
+ return writer.getAllData()
83
+
84
+ def toXML(self, writer, ttFont):
85
+ if hasattr(self, "table"):
86
+ self.table.toXML2(writer, ttFont)
87
+ else:
88
+ writer.simpletag("version", value=self.version)
89
+ writer.newline()
90
+ for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
91
+ writer.begintag("ColorGlyph", name=baseGlyph)
92
+ writer.newline()
93
+ for layer in self.ColorLayers[baseGlyph]:
94
+ layer.toXML(writer, ttFont)
95
+ writer.endtag("ColorGlyph")
96
+ writer.newline()
97
+
98
+ def fromXML(self, name, attrs, content, ttFont):
99
+ if name == "version": # old COLR v0 API
100
+ setattr(self, name, safeEval(attrs["value"]))
101
+ elif name == "ColorGlyph":
102
+ if not hasattr(self, "ColorLayers"):
103
+ self.ColorLayers = {}
104
+ glyphName = attrs["name"]
105
+ for element in content:
106
+ if isinstance(element, str):
107
+ continue
108
+ layers = []
109
+ for element in content:
110
+ if isinstance(element, str):
111
+ continue
112
+ layer = LayerRecord()
113
+ layer.fromXML(element[0], element[1], element[2], ttFont)
114
+ layers.append(layer)
115
+ self.ColorLayers[glyphName] = layers
116
+ else: # new COLR v1 API
117
+ from . import otTables
118
+
119
+ if not hasattr(self, "table"):
120
+ tableClass = getattr(otTables, self.tableTag)
121
+ self.table = tableClass()
122
+ self.table.fromXML(name, attrs, content, ttFont)
123
+ self.table.populateDefaults()
124
+ self.version = self.table.Version
125
+
126
+ def __getitem__(self, glyphName):
127
+ if not isinstance(glyphName, str):
128
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
129
+ return self.ColorLayers[glyphName]
130
+
131
+ def __setitem__(self, glyphName, value):
132
+ if not isinstance(glyphName, str):
133
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
134
+ if value is not None:
135
+ self.ColorLayers[glyphName] = value
136
+ elif glyphName in self.ColorLayers:
137
+ del self.ColorLayers[glyphName]
138
+
139
+ def __delitem__(self, glyphName):
140
+ del self.ColorLayers[glyphName]
141
+
142
+
143
+ class LayerRecord(object):
144
+ def __init__(self, name=None, colorID=None):
145
+ self.name = name
146
+ self.colorID = colorID
147
+
148
+ def toXML(self, writer, ttFont):
149
+ writer.simpletag("layer", name=self.name, colorID=self.colorID)
150
+ writer.newline()
151
+
152
+ def fromXML(self, eltname, attrs, content, ttFont):
153
+ for name, value in attrs.items():
154
+ if name == "name":
155
+ setattr(self, name, value)
156
+ else:
157
+ setattr(self, name, safeEval(value))
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2013 Google, Inc. All Rights Reserved.
2
+ #
3
+ # Google Author(s): Behdad Esfahbod
4
+
5
+ from fontTools.misc.textTools import bytesjoin, safeEval
6
+ from . import DefaultTable
7
+ import array
8
+ from collections import namedtuple
9
+ import struct
10
+ import sys
11
+
12
+
13
+ class table_C_P_A_L_(DefaultTable.DefaultTable):
14
+ NO_NAME_ID = 0xFFFF
15
+ DEFAULT_PALETTE_TYPE = 0
16
+
17
+ def __init__(self, tag=None):
18
+ DefaultTable.DefaultTable.__init__(self, tag)
19
+ self.palettes = []
20
+ self.paletteTypes = []
21
+ self.paletteLabels = []
22
+ self.paletteEntryLabels = []
23
+
24
+ def decompile(self, data, ttFont):
25
+ (
26
+ self.version,
27
+ self.numPaletteEntries,
28
+ numPalettes,
29
+ numColorRecords,
30
+ goffsetFirstColorRecord,
31
+ ) = struct.unpack(">HHHHL", data[:12])
32
+ assert (
33
+ self.version <= 1
34
+ ), "Version of CPAL table is higher than I know how to handle"
35
+ self.palettes = []
36
+ pos = 12
37
+ for i in range(numPalettes):
38
+ startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
39
+ assert startIndex + self.numPaletteEntries <= numColorRecords
40
+ pos += 2
41
+ palette = []
42
+ ppos = goffsetFirstColorRecord + startIndex * 4
43
+ for j in range(self.numPaletteEntries):
44
+ palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
45
+ ppos += 4
46
+ self.palettes.append(palette)
47
+ if self.version == 0:
48
+ offsetToPaletteTypeArray = 0
49
+ offsetToPaletteLabelArray = 0
50
+ offsetToPaletteEntryLabelArray = 0
51
+ else:
52
+ pos = 12 + numPalettes * 2
53
+ (
54
+ offsetToPaletteTypeArray,
55
+ offsetToPaletteLabelArray,
56
+ offsetToPaletteEntryLabelArray,
57
+ ) = struct.unpack(">LLL", data[pos : pos + 12])
58
+ self.paletteTypes = self._decompileUInt32Array(
59
+ data,
60
+ offsetToPaletteTypeArray,
61
+ numPalettes,
62
+ default=self.DEFAULT_PALETTE_TYPE,
63
+ )
64
+ self.paletteLabels = self._decompileUInt16Array(
65
+ data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
66
+ )
67
+ self.paletteEntryLabels = self._decompileUInt16Array(
68
+ data,
69
+ offsetToPaletteEntryLabelArray,
70
+ self.numPaletteEntries,
71
+ default=self.NO_NAME_ID,
72
+ )
73
+
74
+ def _decompileUInt16Array(self, data, offset, numElements, default=0):
75
+ if offset == 0:
76
+ return [default] * numElements
77
+ result = array.array("H", data[offset : offset + 2 * numElements])
78
+ if sys.byteorder != "big":
79
+ result.byteswap()
80
+ assert len(result) == numElements, result
81
+ return result.tolist()
82
+
83
+ def _decompileUInt32Array(self, data, offset, numElements, default=0):
84
+ if offset == 0:
85
+ return [default] * numElements
86
+ result = array.array("I", data[offset : offset + 4 * numElements])
87
+ if sys.byteorder != "big":
88
+ result.byteswap()
89
+ assert len(result) == numElements, result
90
+ return result.tolist()
91
+
92
+ def compile(self, ttFont):
93
+ colorRecordIndices, colorRecords = self._compileColorRecords()
94
+ paletteTypes = self._compilePaletteTypes()
95
+ paletteLabels = self._compilePaletteLabels()
96
+ paletteEntryLabels = self._compilePaletteEntryLabels()
97
+ numColorRecords = len(colorRecords) // 4
98
+ offsetToFirstColorRecord = 12 + len(colorRecordIndices)
99
+ if self.version >= 1:
100
+ offsetToFirstColorRecord += 12
101
+ header = struct.pack(
102
+ ">HHHHL",
103
+ self.version,
104
+ self.numPaletteEntries,
105
+ len(self.palettes),
106
+ numColorRecords,
107
+ offsetToFirstColorRecord,
108
+ )
109
+ if self.version == 0:
110
+ dataList = [header, colorRecordIndices, colorRecords]
111
+ else:
112
+ pos = offsetToFirstColorRecord + len(colorRecords)
113
+ if len(paletteTypes) == 0:
114
+ offsetToPaletteTypeArray = 0
115
+ else:
116
+ offsetToPaletteTypeArray = pos
117
+ pos += len(paletteTypes)
118
+ if len(paletteLabels) == 0:
119
+ offsetToPaletteLabelArray = 0
120
+ else:
121
+ offsetToPaletteLabelArray = pos
122
+ pos += len(paletteLabels)
123
+ if len(paletteEntryLabels) == 0:
124
+ offsetToPaletteEntryLabelArray = 0
125
+ else:
126
+ offsetToPaletteEntryLabelArray = pos
127
+ pos += len(paletteLabels)
128
+ header1 = struct.pack(
129
+ ">LLL",
130
+ offsetToPaletteTypeArray,
131
+ offsetToPaletteLabelArray,
132
+ offsetToPaletteEntryLabelArray,
133
+ )
134
+ dataList = [
135
+ header,
136
+ colorRecordIndices,
137
+ header1,
138
+ colorRecords,
139
+ paletteTypes,
140
+ paletteLabels,
141
+ paletteEntryLabels,
142
+ ]
143
+ return bytesjoin(dataList)
144
+
145
+ def _compilePalette(self, palette):
146
+ assert len(palette) == self.numPaletteEntries
147
+ pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
148
+ return bytesjoin([pack(color) for color in palette])
149
+
150
+ def _compileColorRecords(self):
151
+ colorRecords, colorRecordIndices, pool = [], [], {}
152
+ for palette in self.palettes:
153
+ packedPalette = self._compilePalette(palette)
154
+ if packedPalette in pool:
155
+ index = pool[packedPalette]
156
+ else:
157
+ index = len(colorRecords)
158
+ colorRecords.append(packedPalette)
159
+ pool[packedPalette] = index
160
+ colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
161
+ return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
162
+
163
+ def _compilePaletteTypes(self):
164
+ if self.version == 0 or not any(self.paletteTypes):
165
+ return b""
166
+ assert len(self.paletteTypes) == len(self.palettes)
167
+ result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
168
+ assert len(result) == 4 * len(self.palettes)
169
+ return result
170
+
171
+ def _compilePaletteLabels(self):
172
+ if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
173
+ return b""
174
+ assert len(self.paletteLabels) == len(self.palettes)
175
+ result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
176
+ assert len(result) == 2 * len(self.palettes)
177
+ return result
178
+
179
+ def _compilePaletteEntryLabels(self):
180
+ if self.version == 0 or all(
181
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
182
+ ):
183
+ return b""
184
+ assert len(self.paletteEntryLabels) == self.numPaletteEntries
185
+ result = bytesjoin(
186
+ [struct.pack(">H", label) for label in self.paletteEntryLabels]
187
+ )
188
+ assert len(result) == 2 * self.numPaletteEntries
189
+ return result
190
+
191
+ def toXML(self, writer, ttFont):
192
+ numPalettes = len(self.palettes)
193
+ paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
194
+ paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
195
+ writer.simpletag("version", value=self.version)
196
+ writer.newline()
197
+ writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
198
+ writer.newline()
199
+ for index, palette in enumerate(self.palettes):
200
+ attrs = {"index": index}
201
+ paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
202
+ paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
203
+ if self.version > 0 and paletteLabel != self.NO_NAME_ID:
204
+ attrs["label"] = paletteLabel
205
+ if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
206
+ attrs["type"] = paletteType
207
+ writer.begintag("palette", **attrs)
208
+ writer.newline()
209
+ if (
210
+ self.version > 0
211
+ and paletteLabel != self.NO_NAME_ID
212
+ and ttFont
213
+ and "name" in ttFont
214
+ ):
215
+ name = ttFont["name"].getDebugName(paletteLabel)
216
+ if name is not None:
217
+ writer.comment(name)
218
+ writer.newline()
219
+ assert len(palette) == self.numPaletteEntries
220
+ for cindex, color in enumerate(palette):
221
+ color.toXML(writer, ttFont, cindex)
222
+ writer.endtag("palette")
223
+ writer.newline()
224
+ if self.version > 0 and not all(
225
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
226
+ ):
227
+ writer.begintag("paletteEntryLabels")
228
+ writer.newline()
229
+ for index, label in enumerate(self.paletteEntryLabels):
230
+ if label != self.NO_NAME_ID:
231
+ writer.simpletag("label", index=index, value=label)
232
+ if self.version > 0 and label and ttFont and "name" in ttFont:
233
+ name = ttFont["name"].getDebugName(label)
234
+ if name is not None:
235
+ writer.comment(name)
236
+ writer.newline()
237
+ writer.endtag("paletteEntryLabels")
238
+ writer.newline()
239
+
240
+ def fromXML(self, name, attrs, content, ttFont):
241
+ if name == "palette":
242
+ self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
243
+ self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
244
+ palette = []
245
+ for element in content:
246
+ if isinstance(element, str):
247
+ continue
248
+ attrs = element[1]
249
+ color = Color.fromHex(attrs["value"])
250
+ palette.append(color)
251
+ self.palettes.append(palette)
252
+ elif name == "paletteEntryLabels":
253
+ colorLabels = {}
254
+ for element in content:
255
+ if isinstance(element, str):
256
+ continue
257
+ elementName, elementAttr, _ = element
258
+ if elementName == "label":
259
+ labelIndex = safeEval(elementAttr["index"])
260
+ nameID = safeEval(elementAttr["value"])
261
+ colorLabels[labelIndex] = nameID
262
+ self.paletteEntryLabels = [
263
+ colorLabels.get(i, self.NO_NAME_ID)
264
+ for i in range(self.numPaletteEntries)
265
+ ]
266
+ elif "value" in attrs:
267
+ value = safeEval(attrs["value"])
268
+ setattr(self, name, value)
269
+ if name == "numPaletteEntries":
270
+ self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
271
+
272
+
273
+ class Color(namedtuple("Color", "blue green red alpha")):
274
+ def hex(self):
275
+ return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
276
+
277
+ def __repr__(self):
278
+ return self.hex()
279
+
280
+ def toXML(self, writer, ttFont, index=None):
281
+ writer.simpletag("color", value=self.hex(), index=index)
282
+ writer.newline()
283
+
284
+ @classmethod
285
+ def fromHex(cls, value):
286
+ if value[0] == "#":
287
+ value = value[1:]
288
+ red = int(value[0:2], 16)
289
+ green = int(value[2:4], 16)
290
+ blue = int(value[4:6], 16)
291
+ alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
292
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
293
+
294
+ @classmethod
295
+ def fromRGBA(cls, red, green, blue, alpha):
296
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from . import DefaultTable
4
+
5
+
6
+ class table_D__e_b_g(DefaultTable.DefaultTable):
7
+ def decompile(self, data, ttFont):
8
+ self.data = json.loads(data)
9
+
10
+ def compile(self, ttFont):
11
+ return json.dumps(self.data).encode("utf-8")
12
+
13
+ def toXML(self, writer, ttFont):
14
+ writer.writecdata(json.dumps(self.data, indent=2))
15
+
16
+ def fromXML(self, name, attrs, content, ttFont):
17
+ self.data = json.loads(content)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/DefaultTable.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc.textTools import Tag
2
+ from fontTools.ttLib import getClassTag
3
+
4
+
5
+ class DefaultTable(object):
6
+ dependencies = []
7
+
8
+ def __init__(self, tag=None):
9
+ if tag is None:
10
+ tag = getClassTag(self.__class__)
11
+ self.tableTag = Tag(tag)
12
+
13
+ def decompile(self, data, ttFont):
14
+ self.data = data
15
+
16
+ def compile(self, ttFont):
17
+ return self.data
18
+
19
+ def toXML(self, writer, ttFont, **kwargs):
20
+ if hasattr(self, "ERROR"):
21
+ writer.comment("An error occurred during the decompilation of this table")
22
+ writer.newline()
23
+ writer.comment(self.ERROR)
24
+ writer.newline()
25
+ writer.begintag("hexdata")
26
+ writer.newline()
27
+ writer.dumphex(self.compile(ttFont))
28
+ writer.endtag("hexdata")
29
+ writer.newline()
30
+
31
+ def fromXML(self, name, attrs, content, ttFont):
32
+ from fontTools.misc.textTools import readHex
33
+ from fontTools import ttLib
34
+
35
+ if name != "hexdata":
36
+ raise ttLib.TTLibError("can't handle '%s' element" % name)
37
+ self.decompile(readHex(content), ttFont)
38
+
39
+ def __repr__(self):
40
+ return "<'%s' table at %x>" % (self.tableTag, id(self))
41
+
42
+ def __eq__(self, other):
43
+ if type(self) != type(other):
44
+ return NotImplemented
45
+ return self.__dict__ == other.__dict__
46
+
47
+ def __ne__(self, other):
48
+ result = self.__eq__(other)
49
+ return result if result is NotImplemented else not result
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from . import DefaultTable
3
+ from fontTools.misc.textTools import bytesjoin, safeEval
4
+ from .BitmapGlyphMetrics import (
5
+ BigGlyphMetrics,
6
+ bigGlyphMetricsFormat,
7
+ SmallGlyphMetrics,
8
+ smallGlyphMetricsFormat,
9
+ )
10
+ import struct
11
+ import itertools
12
+ from collections import deque
13
+ import logging
14
+
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+ eblcHeaderFormat = """
19
+ > # big endian
20
+ version: 16.16F
21
+ numSizes: I
22
+ """
23
+ # The table format string is split to handle sbitLineMetrics simply.
24
+ bitmapSizeTableFormatPart1 = """
25
+ > # big endian
26
+ indexSubTableArrayOffset: I
27
+ indexTablesSize: I
28
+ numberOfIndexSubTables: I
29
+ colorRef: I
30
+ """
31
+ # The compound type for hori and vert.
32
+ sbitLineMetricsFormat = """
33
+ > # big endian
34
+ ascender: b
35
+ descender: b
36
+ widthMax: B
37
+ caretSlopeNumerator: b
38
+ caretSlopeDenominator: b
39
+ caretOffset: b
40
+ minOriginSB: b
41
+ minAdvanceSB: b
42
+ maxBeforeBL: b
43
+ minAfterBL: b
44
+ pad1: b
45
+ pad2: b
46
+ """
47
+ # hori and vert go between the two parts.
48
+ bitmapSizeTableFormatPart2 = """
49
+ > # big endian
50
+ startGlyphIndex: H
51
+ endGlyphIndex: H
52
+ ppemX: B
53
+ ppemY: B
54
+ bitDepth: B
55
+ flags: b
56
+ """
57
+
58
+ indexSubTableArrayFormat = ">HHL"
59
+ indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
60
+
61
+ indexSubHeaderFormat = ">HHL"
62
+ indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
63
+
64
+ codeOffsetPairFormat = ">HH"
65
+ codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
66
+
67
+
68
+ class table_E_B_L_C_(DefaultTable.DefaultTable):
69
+ dependencies = ["EBDT"]
70
+
71
+ # This method can be overridden in subclasses to support new formats
72
+ # without changing the other implementation. Also can be used as a
73
+ # convenience method for coverting a font file to an alternative format.
74
+ def getIndexFormatClass(self, indexFormat):
75
+ return eblc_sub_table_classes[indexFormat]
76
+
77
+ def decompile(self, data, ttFont):
78
+ # Save the original data because offsets are from the start of the table.
79
+ origData = data
80
+ i = 0
81
+
82
+ dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
83
+ i += 8
84
+
85
+ self.strikes = []
86
+ for curStrikeIndex in range(self.numSizes):
87
+ curStrike = Strike()
88
+ self.strikes.append(curStrike)
89
+ curTable = curStrike.bitmapSizeTable
90
+ dummy = sstruct.unpack2(
91
+ bitmapSizeTableFormatPart1, data[i : i + 16], curTable
92
+ )
93
+ i += 16
94
+ for metric in ("hori", "vert"):
95
+ metricObj = SbitLineMetrics()
96
+ vars(curTable)[metric] = metricObj
97
+ dummy = sstruct.unpack2(
98
+ sbitLineMetricsFormat, data[i : i + 12], metricObj
99
+ )
100
+ i += 12
101
+ dummy = sstruct.unpack(
102
+ bitmapSizeTableFormatPart2, data[i : i + 8], curTable
103
+ )
104
+ i += 8
105
+
106
+ for curStrike in self.strikes:
107
+ curTable = curStrike.bitmapSizeTable
108
+ for subtableIndex in range(curTable.numberOfIndexSubTables):
109
+ i = (
110
+ curTable.indexSubTableArrayOffset
111
+ + subtableIndex * indexSubTableArraySize
112
+ )
113
+
114
+ tup = struct.unpack(
115
+ indexSubTableArrayFormat, data[i : i + indexSubTableArraySize]
116
+ )
117
+ (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
118
+ i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
119
+
120
+ tup = struct.unpack(
121
+ indexSubHeaderFormat, data[i : i + indexSubHeaderSize]
122
+ )
123
+ (indexFormat, imageFormat, imageDataOffset) = tup
124
+
125
+ indexFormatClass = self.getIndexFormatClass(indexFormat)
126
+ indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont)
127
+ indexSubTable.firstGlyphIndex = firstGlyphIndex
128
+ indexSubTable.lastGlyphIndex = lastGlyphIndex
129
+ indexSubTable.additionalOffsetToIndexSubtable = (
130
+ additionalOffsetToIndexSubtable
131
+ )
132
+ indexSubTable.indexFormat = indexFormat
133
+ indexSubTable.imageFormat = imageFormat
134
+ indexSubTable.imageDataOffset = imageDataOffset
135
+ indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
136
+ curStrike.indexSubTables.append(indexSubTable)
137
+
138
+ def compile(self, ttFont):
139
+ dataList = []
140
+ self.numSizes = len(self.strikes)
141
+ dataList.append(sstruct.pack(eblcHeaderFormat, self))
142
+
143
+ # Data size of the header + bitmapSizeTable needs to be calculated
144
+ # in order to form offsets. This value will hold the size of the data
145
+ # in dataList after all the data is consolidated in dataList.
146
+ dataSize = len(dataList[0])
147
+
148
+ # The table will be structured in the following order:
149
+ # (0) header
150
+ # (1) Each bitmapSizeTable [1 ... self.numSizes]
151
+ # (2) Alternate between indexSubTableArray and indexSubTable
152
+ # for each bitmapSizeTable present.
153
+ #
154
+ # The issue is maintaining the proper offsets when table information
155
+ # gets moved around. All offsets and size information must be recalculated
156
+ # when building the table to allow editing within ttLib and also allow easy
157
+ # import/export to and from XML. All of this offset information is lost
158
+ # when exporting to XML so everything must be calculated fresh so importing
159
+ # from XML will work cleanly. Only byte offset and size information is
160
+ # calculated fresh. Count information like numberOfIndexSubTables is
161
+ # checked through assertions. If the information in this table was not
162
+ # touched or was changed properly then these types of values should match.
163
+ #
164
+ # The table will be rebuilt the following way:
165
+ # (0) Precompute the size of all the bitmapSizeTables. This is needed to
166
+ # compute the offsets properly.
167
+ # (1) For each bitmapSizeTable compute the indexSubTable and
168
+ # indexSubTableArray pair. The indexSubTable must be computed first
169
+ # so that the offset information in indexSubTableArray can be
170
+ # calculated. Update the data size after each pairing.
171
+ # (2) Build each bitmapSizeTable.
172
+ # (3) Consolidate all the data into the main dataList in the correct order.
173
+
174
+ for _ in self.strikes:
175
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
176
+ dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat)
177
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
178
+
179
+ indexSubTablePairDataList = []
180
+ for curStrike in self.strikes:
181
+ curTable = curStrike.bitmapSizeTable
182
+ curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
183
+ curTable.indexSubTableArrayOffset = dataSize
184
+
185
+ # Precompute the size of the indexSubTableArray. This information
186
+ # is important for correctly calculating the new value for
187
+ # additionalOffsetToIndexSubtable.
188
+ sizeOfSubTableArray = (
189
+ curTable.numberOfIndexSubTables * indexSubTableArraySize
190
+ )
191
+ lowerBound = dataSize
192
+ dataSize += sizeOfSubTableArray
193
+ upperBound = dataSize
194
+
195
+ indexSubTableDataList = []
196
+ for indexSubTable in curStrike.indexSubTables:
197
+ indexSubTable.additionalOffsetToIndexSubtable = (
198
+ dataSize - curTable.indexSubTableArrayOffset
199
+ )
200
+ glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
201
+ indexSubTable.firstGlyphIndex = min(glyphIds)
202
+ indexSubTable.lastGlyphIndex = max(glyphIds)
203
+ data = indexSubTable.compile(ttFont)
204
+ indexSubTableDataList.append(data)
205
+ dataSize += len(data)
206
+ curTable.startGlyphIndex = min(
207
+ ist.firstGlyphIndex for ist in curStrike.indexSubTables
208
+ )
209
+ curTable.endGlyphIndex = max(
210
+ ist.lastGlyphIndex for ist in curStrike.indexSubTables
211
+ )
212
+
213
+ for i in curStrike.indexSubTables:
214
+ data = struct.pack(
215
+ indexSubHeaderFormat,
216
+ i.firstGlyphIndex,
217
+ i.lastGlyphIndex,
218
+ i.additionalOffsetToIndexSubtable,
219
+ )
220
+ indexSubTablePairDataList.append(data)
221
+ indexSubTablePairDataList.extend(indexSubTableDataList)
222
+ curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
223
+
224
+ for curStrike in self.strikes:
225
+ curTable = curStrike.bitmapSizeTable
226
+ data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
227
+ dataList.append(data)
228
+ for metric in ("hori", "vert"):
229
+ metricObj = vars(curTable)[metric]
230
+ data = sstruct.pack(sbitLineMetricsFormat, metricObj)
231
+ dataList.append(data)
232
+ data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
233
+ dataList.append(data)
234
+ dataList.extend(indexSubTablePairDataList)
235
+
236
+ return bytesjoin(dataList)
237
+
238
+ def toXML(self, writer, ttFont):
239
+ writer.simpletag("header", [("version", self.version)])
240
+ writer.newline()
241
+ for curIndex, curStrike in enumerate(self.strikes):
242
+ curStrike.toXML(curIndex, writer, ttFont)
243
+
244
+ def fromXML(self, name, attrs, content, ttFont):
245
+ if name == "header":
246
+ self.version = safeEval(attrs["version"])
247
+ elif name == "strike":
248
+ if not hasattr(self, "strikes"):
249
+ self.strikes = []
250
+ strikeIndex = safeEval(attrs["index"])
251
+ curStrike = Strike()
252
+ curStrike.fromXML(name, attrs, content, ttFont, self)
253
+
254
+ # Grow the strike array to the appropriate size. The XML format
255
+ # allows for the strike index value to be out of order.
256
+ if strikeIndex >= len(self.strikes):
257
+ self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
258
+ assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
259
+ self.strikes[strikeIndex] = curStrike
260
+
261
+
262
+ class Strike(object):
263
+ def __init__(self):
264
+ self.bitmapSizeTable = BitmapSizeTable()
265
+ self.indexSubTables = []
266
+
267
+ def toXML(self, strikeIndex, writer, ttFont):
268
+ writer.begintag("strike", [("index", strikeIndex)])
269
+ writer.newline()
270
+ self.bitmapSizeTable.toXML(writer, ttFont)
271
+ writer.comment(
272
+ "GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler."
273
+ )
274
+ writer.newline()
275
+ for indexSubTable in self.indexSubTables:
276
+ indexSubTable.toXML(writer, ttFont)
277
+ writer.endtag("strike")
278
+ writer.newline()
279
+
280
+ def fromXML(self, name, attrs, content, ttFont, locator):
281
+ for element in content:
282
+ if not isinstance(element, tuple):
283
+ continue
284
+ name, attrs, content = element
285
+ if name == "bitmapSizeTable":
286
+ self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
287
+ elif name.startswith(_indexSubTableSubclassPrefix):
288
+ indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :])
289
+ indexFormatClass = locator.getIndexFormatClass(indexFormat)
290
+ indexSubTable = indexFormatClass(None, None)
291
+ indexSubTable.indexFormat = indexFormat
292
+ indexSubTable.fromXML(name, attrs, content, ttFont)
293
+ self.indexSubTables.append(indexSubTable)
294
+
295
+
296
+ class BitmapSizeTable(object):
297
+ # Returns all the simple metric names that bitmap size table
298
+ # cares about in terms of XML creation.
299
+ def _getXMLMetricNames(self):
300
+ dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
301
+ dataNames = {**dataNames, **sstruct.getformat(bitmapSizeTableFormatPart2)[1]}
302
+ # Skip the first 3 data names because they are byte offsets and counts.
303
+ return list(dataNames.keys())[3:]
304
+
305
+ def toXML(self, writer, ttFont):
306
+ writer.begintag("bitmapSizeTable")
307
+ writer.newline()
308
+ for metric in ("hori", "vert"):
309
+ getattr(self, metric).toXML(metric, writer, ttFont)
310
+ for metricName in self._getXMLMetricNames():
311
+ writer.simpletag(metricName, value=getattr(self, metricName))
312
+ writer.newline()
313
+ writer.endtag("bitmapSizeTable")
314
+ writer.newline()
315
+
316
+ def fromXML(self, name, attrs, content, ttFont):
317
+ # Create a lookup for all the simple names that make sense to
318
+ # bitmap size table. Only read the information from these names.
319
+ dataNames = set(self._getXMLMetricNames())
320
+ for element in content:
321
+ if not isinstance(element, tuple):
322
+ continue
323
+ name, attrs, content = element
324
+ if name == "sbitLineMetrics":
325
+ direction = attrs["direction"]
326
+ assert direction in (
327
+ "hori",
328
+ "vert",
329
+ ), "SbitLineMetrics direction specified invalid."
330
+ metricObj = SbitLineMetrics()
331
+ metricObj.fromXML(name, attrs, content, ttFont)
332
+ vars(self)[direction] = metricObj
333
+ elif name in dataNames:
334
+ vars(self)[name] = safeEval(attrs["value"])
335
+ else:
336
+ log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
337
+
338
+
339
+ class SbitLineMetrics(object):
340
+ def toXML(self, name, writer, ttFont):
341
+ writer.begintag("sbitLineMetrics", [("direction", name)])
342
+ writer.newline()
343
+ for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
344
+ writer.simpletag(metricName, value=getattr(self, metricName))
345
+ writer.newline()
346
+ writer.endtag("sbitLineMetrics")
347
+ writer.newline()
348
+
349
+ def fromXML(self, name, attrs, content, ttFont):
350
+ metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
351
+ for element in content:
352
+ if not isinstance(element, tuple):
353
+ continue
354
+ name, attrs, content = element
355
+ if name in metricNames:
356
+ vars(self)[name] = safeEval(attrs["value"])
357
+
358
+
359
+ # Important information about the naming scheme. Used for identifying subtables.
360
+ _indexSubTableSubclassPrefix = "eblc_index_sub_table_"
361
+
362
+
363
+ class EblcIndexSubTable(object):
364
+ def __init__(self, data, ttFont):
365
+ self.data = data
366
+ self.ttFont = ttFont
367
+ # TODO Currently non-lazy decompiling doesn't work for this class...
368
+ # if not ttFont.lazy:
369
+ # self.decompile()
370
+ # del self.data, self.ttFont
371
+
372
+ def __getattr__(self, attr):
373
+ # Allow lazy decompile.
374
+ if attr[:2] == "__":
375
+ raise AttributeError(attr)
376
+ if attr == "data":
377
+ raise AttributeError(attr)
378
+ self.decompile()
379
+ return getattr(self, attr)
380
+
381
+ def ensureDecompiled(self, recurse=False):
382
+ if hasattr(self, "data"):
383
+ self.decompile()
384
+
385
+ # This method just takes care of the indexSubHeader. Implementing subclasses
386
+ # should call it to compile the indexSubHeader and then continue compiling
387
+ # the remainder of their unique format.
388
+ def compile(self, ttFont):
389
+ return struct.pack(
390
+ indexSubHeaderFormat,
391
+ self.indexFormat,
392
+ self.imageFormat,
393
+ self.imageDataOffset,
394
+ )
395
+
396
+ # Creates the XML for bitmap glyphs. Each index sub table basically makes
397
+ # the same XML except for specific metric information that is written
398
+ # out via a method call that a subclass implements optionally.
399
+ def toXML(self, writer, ttFont):
400
+ writer.begintag(
401
+ self.__class__.__name__,
402
+ [
403
+ ("imageFormat", self.imageFormat),
404
+ ("firstGlyphIndex", self.firstGlyphIndex),
405
+ ("lastGlyphIndex", self.lastGlyphIndex),
406
+ ],
407
+ )
408
+ writer.newline()
409
+ self.writeMetrics(writer, ttFont)
410
+ # Write out the names as thats all thats needed to rebuild etc.
411
+ # For font debugging of consecutive formats the ids are also written.
412
+ # The ids are not read when moving from the XML format.
413
+ glyphIds = map(ttFont.getGlyphID, self.names)
414
+ for glyphName, glyphId in zip(self.names, glyphIds):
415
+ writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
416
+ writer.newline()
417
+ writer.endtag(self.__class__.__name__)
418
+ writer.newline()
419
+
420
+ def fromXML(self, name, attrs, content, ttFont):
421
+ # Read all the attributes. Even though the glyph indices are
422
+ # recalculated, they are still read in case there needs to
423
+ # be an immediate export of the data.
424
+ self.imageFormat = safeEval(attrs["imageFormat"])
425
+ self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
426
+ self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
427
+
428
+ self.readMetrics(name, attrs, content, ttFont)
429
+
430
+ self.names = []
431
+ for element in content:
432
+ if not isinstance(element, tuple):
433
+ continue
434
+ name, attrs, content = element
435
+ if name == "glyphLoc":
436
+ self.names.append(attrs["name"])
437
+
438
+ # A helper method that writes the metrics for the index sub table. It also
439
+ # is responsible for writing the image size for fixed size data since fixed
440
+ # size is not recalculated on compile. Default behavior is to do nothing.
441
+ def writeMetrics(self, writer, ttFont):
442
+ pass
443
+
444
+ # A helper method that is the inverse of writeMetrics.
445
+ def readMetrics(self, name, attrs, content, ttFont):
446
+ pass
447
+
448
+ # This method is for fixed glyph data sizes. There are formats where
449
+ # the glyph data is fixed but are actually composite glyphs. To handle
450
+ # this the font spec in indexSubTable makes the data the size of the
451
+ # fixed size by padding the component arrays. This function abstracts
452
+ # out this padding process. Input is data unpadded. Output is data
453
+ # padded only in fixed formats. Default behavior is to return the data.
454
+ def padBitmapData(self, data):
455
+ return data
456
+
457
+ # Remove any of the glyph locations and names that are flagged as skipped.
458
+ # This only occurs in formats {1,3}.
459
+ def removeSkipGlyphs(self):
460
+ # Determines if a name, location pair is a valid data location.
461
+ # Skip glyphs are marked when the size is equal to zero.
462
+ def isValidLocation(args):
463
+ (name, (startByte, endByte)) = args
464
+ return startByte < endByte
465
+
466
+ # Remove all skip glyphs.
467
+ dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
468
+ self.names, self.locations = list(map(list, zip(*dataPairs)))
469
+
470
+
471
+ # A closure for creating a custom mixin. This is done because formats 1 and 3
472
+ # are very similar. The only difference between them is the size per offset
473
+ # value. Code put in here should handle both cases generally.
474
+ def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
475
+ # Prep the data size for the offset array data format.
476
+ dataFormat = ">" + formatStringForDataType
477
+ offsetDataSize = struct.calcsize(dataFormat)
478
+
479
+ class OffsetArrayIndexSubTableMixin(object):
480
+ def decompile(self):
481
+ numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
482
+ indexingOffsets = [
483
+ glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
484
+ ]
485
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
486
+ offsetArray = [
487
+ struct.unpack(dataFormat, self.data[slice(*loc)])[0]
488
+ for loc in indexingLocations
489
+ ]
490
+
491
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
492
+ modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
493
+ self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
494
+
495
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
496
+ self.removeSkipGlyphs()
497
+ del self.data, self.ttFont
498
+
499
+ def compile(self, ttFont):
500
+ # First make sure that all the data lines up properly. Formats 1 and 3
501
+ # must have all its data lined up consecutively. If not this will fail.
502
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
503
+ assert (
504
+ curLoc[1] == nxtLoc[0]
505
+ ), "Data must be consecutive in indexSubTable offset formats"
506
+
507
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
508
+ # Make sure that all ids are sorted strictly increasing.
509
+ assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
510
+
511
+ # Run a simple algorithm to add skip glyphs to the data locations at
512
+ # the places where an id is not present.
513
+ idQueue = deque(glyphIds)
514
+ locQueue = deque(self.locations)
515
+ allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
516
+ allLocations = []
517
+ for curId in allGlyphIds:
518
+ if curId != idQueue[0]:
519
+ allLocations.append((locQueue[0][0], locQueue[0][0]))
520
+ else:
521
+ idQueue.popleft()
522
+ allLocations.append(locQueue.popleft())
523
+
524
+ # Now that all the locations are collected, pack them appropriately into
525
+ # offsets. This is the form where offset[i] is the location and
526
+ # offset[i+1]-offset[i] is the size of the data location.
527
+ offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
528
+ # Image data offset must be less than or equal to the minimum of locations.
529
+ # This offset may change the value for round tripping but is safer and
530
+ # allows imageDataOffset to not be required to be in the XML version.
531
+ self.imageDataOffset = min(offsets)
532
+ offsetArray = [offset - self.imageDataOffset for offset in offsets]
533
+
534
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
535
+ dataList += [
536
+ struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
537
+ ]
538
+ # Take care of any padding issues. Only occurs in format 3.
539
+ if offsetDataSize * len(offsetArray) % 4 != 0:
540
+ dataList.append(struct.pack(dataFormat, 0))
541
+ return bytesjoin(dataList)
542
+
543
+ return OffsetArrayIndexSubTableMixin
544
+
545
+
546
+ # A Mixin for functionality shared between the different kinds
547
+ # of fixed sized data handling. Both kinds have big metrics so
548
+ # that kind of special processing is also handled in this mixin.
549
+ class FixedSizeIndexSubTableMixin(object):
550
+ def writeMetrics(self, writer, ttFont):
551
+ writer.simpletag("imageSize", value=self.imageSize)
552
+ writer.newline()
553
+ self.metrics.toXML(writer, ttFont)
554
+
555
+ def readMetrics(self, name, attrs, content, ttFont):
556
+ for element in content:
557
+ if not isinstance(element, tuple):
558
+ continue
559
+ name, attrs, content = element
560
+ if name == "imageSize":
561
+ self.imageSize = safeEval(attrs["value"])
562
+ elif name == BigGlyphMetrics.__name__:
563
+ self.metrics = BigGlyphMetrics()
564
+ self.metrics.fromXML(name, attrs, content, ttFont)
565
+ elif name == SmallGlyphMetrics.__name__:
566
+ log.warning(
567
+ "SmallGlyphMetrics being ignored in format %d.", self.indexFormat
568
+ )
569
+
570
+ def padBitmapData(self, data):
571
+ # Make sure that the data isn't bigger than the fixed size.
572
+ assert len(data) <= self.imageSize, (
573
+ "Data in indexSubTable format %d must be less than the fixed size."
574
+ % self.indexFormat
575
+ )
576
+ # Pad the data so that it matches the fixed size.
577
+ pad = (self.imageSize - len(data)) * b"\0"
578
+ return data + pad
579
+
580
+
581
+ class eblc_index_sub_table_1(
582
+ _createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable
583
+ ):
584
+ pass
585
+
586
+
587
+ class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
588
+ def decompile(self):
589
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
590
+ self.metrics = BigGlyphMetrics()
591
+ sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
592
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
593
+ offsets = [
594
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
595
+ ]
596
+ self.locations = list(zip(offsets, offsets[1:]))
597
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
598
+ del self.data, self.ttFont
599
+
600
+ def compile(self, ttFont):
601
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
602
+ # Make sure all the ids are consecutive. This is required by Format 2.
603
+ assert glyphIds == list(
604
+ range(self.firstGlyphIndex, self.lastGlyphIndex + 1)
605
+ ), "Format 2 ids must be consecutive."
606
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
607
+
608
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
609
+ dataList.append(struct.pack(">L", self.imageSize))
610
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
611
+ return bytesjoin(dataList)
612
+
613
+
614
+ class eblc_index_sub_table_3(
615
+ _createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable
616
+ ):
617
+ pass
618
+
619
+
620
+ class eblc_index_sub_table_4(EblcIndexSubTable):
621
+ def decompile(self):
622
+ (numGlyphs,) = struct.unpack(">L", self.data[:4])
623
+ data = self.data[4:]
624
+ indexingOffsets = [
625
+ glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2)
626
+ ]
627
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
628
+ glyphArray = [
629
+ struct.unpack(codeOffsetPairFormat, data[slice(*loc)])
630
+ for loc in indexingLocations
631
+ ]
632
+ glyphIds, offsets = list(map(list, zip(*glyphArray)))
633
+ # There are one too many glyph ids. Get rid of the last one.
634
+ glyphIds.pop()
635
+
636
+ offsets = [offset + self.imageDataOffset for offset in offsets]
637
+ self.locations = list(zip(offsets, offsets[1:]))
638
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
639
+ del self.data, self.ttFont
640
+
641
+ def compile(self, ttFont):
642
+ # First make sure that all the data lines up properly. Format 4
643
+ # must have all its data lined up consecutively. If not this will fail.
644
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
645
+ assert (
646
+ curLoc[1] == nxtLoc[0]
647
+ ), "Data must be consecutive in indexSubTable format 4"
648
+
649
+ offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
650
+ # Image data offset must be less than or equal to the minimum of locations.
651
+ # Resetting this offset may change the value for round tripping but is safer
652
+ # and allows imageDataOffset to not be required to be in the XML version.
653
+ self.imageDataOffset = min(offsets)
654
+ offsets = [offset - self.imageDataOffset for offset in offsets]
655
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
656
+ # Create an iterator over the ids plus a padding value.
657
+ idsPlusPad = list(itertools.chain(glyphIds, [0]))
658
+
659
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
660
+ dataList.append(struct.pack(">L", len(glyphIds)))
661
+ tmp = [
662
+ struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)
663
+ ]
664
+ dataList += tmp
665
+ data = bytesjoin(dataList)
666
+ return data
667
+
668
+
669
+ class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
670
+ def decompile(self):
671
+ self.origDataLen = 0
672
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
673
+ data = self.data[4:]
674
+ self.metrics, data = sstruct.unpack2(
675
+ bigGlyphMetricsFormat, data, BigGlyphMetrics()
676
+ )
677
+ (numGlyphs,) = struct.unpack(">L", data[:4])
678
+ data = data[4:]
679
+ glyphIds = [
680
+ struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs)
681
+ ]
682
+
683
+ offsets = [
684
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
685
+ ]
686
+ self.locations = list(zip(offsets, offsets[1:]))
687
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
688
+ del self.data, self.ttFont
689
+
690
+ def compile(self, ttFont):
691
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
692
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
693
+ dataList.append(struct.pack(">L", self.imageSize))
694
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
695
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
696
+ dataList.append(struct.pack(">L", len(glyphIds)))
697
+ dataList += [struct.pack(">H", curId) for curId in glyphIds]
698
+ if len(glyphIds) % 2 == 1:
699
+ dataList.append(struct.pack(">H", 0))
700
+ return bytesjoin(dataList)
701
+
702
+
703
+ # Dictionary of indexFormat to the class representing that format.
704
+ eblc_sub_table_classes = {
705
+ 1: eblc_index_sub_table_1,
706
+ 2: eblc_index_sub_table_2,
707
+ 3: eblc_index_sub_table_3,
708
+ 4: eblc_index_sub_table_4,
709
+ 5: eblc_index_sub_table_5,
710
+ }
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import safeEval
3
+ from fontTools.misc.timeTools import timestampFromString, timestampToString
4
+ from . import DefaultTable
5
+
6
+ FFTMFormat = """
7
+ > # big endian
8
+ version: I
9
+ FFTimeStamp: Q
10
+ sourceCreated: Q
11
+ sourceModified: Q
12
+ """
13
+
14
+
15
+ class table_F_F_T_M_(DefaultTable.DefaultTable):
16
+ def decompile(self, data, ttFont):
17
+ dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
18
+
19
+ def compile(self, ttFont):
20
+ data = sstruct.pack(FFTMFormat, self)
21
+ return data
22
+
23
+ def toXML(self, writer, ttFont):
24
+ writer.comment(
25
+ "FontForge's timestamp, font source creation and modification dates"
26
+ )
27
+ writer.newline()
28
+ formatstring, names, fixes = sstruct.getformat(FFTMFormat)
29
+ for name in names:
30
+ value = getattr(self, name)
31
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
32
+ value = timestampToString(value)
33
+ writer.simpletag(name, value=value)
34
+ writer.newline()
35
+
36
+ def fromXML(self, name, attrs, content, ttFont):
37
+ value = attrs["value"]
38
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
39
+ value = timestampFromString(value)
40
+ else:
41
+ value = safeEval(value)
42
+ setattr(self, name, value)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_G_D_E_F_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_G_P_O_S_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.fixedTools import floatToFixedToStr
3
+ from fontTools.misc.textTools import safeEval
4
+
5
+ # from itertools import *
6
+ from functools import partial
7
+ from . import DefaultTable
8
+ from . import grUtils
9
+ import struct
10
+
11
+
12
+ Glat_format_0 = """
13
+ > # big endian
14
+ version: 16.16F
15
+ """
16
+
17
+ Glat_format_3 = """
18
+ >
19
+ version: 16.16F
20
+ compression:L # compression scheme or reserved
21
+ """
22
+
23
+ Glat_format_1_entry = """
24
+ >
25
+ attNum: B # Attribute number of first attribute
26
+ num: B # Number of attributes in this run
27
+ """
28
+ Glat_format_23_entry = """
29
+ >
30
+ attNum: H # Attribute number of first attribute
31
+ num: H # Number of attributes in this run
32
+ """
33
+
34
+ Glat_format_3_octabox_metrics = """
35
+ >
36
+ subboxBitmap: H # Which subboxes exist on 4x4 grid
37
+ diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
38
+ diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
39
+ diagPosMin: B # Defines minimum positively-sloped diagonal (di)
40
+ diagPosMax: B # Defines maximum positively-sloped diagonal (da)
41
+ """
42
+
43
+ Glat_format_3_subbox_entry = """
44
+ >
45
+ left: B # xi
46
+ right: B # xa
47
+ bottom: B # yi
48
+ top: B # ya
49
+ diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
50
+ diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
51
+ diagPosMin: B # Defines minimum positively-sloped diagonal (di)
52
+ diagPosMax: B # Defines maximum positively-sloped diagonal (da)
53
+ """
54
+
55
+
56
+ class _Object:
57
+ pass
58
+
59
+
60
+ class _Dict(dict):
61
+ pass
62
+
63
+
64
+ class table_G__l_a_t(DefaultTable.DefaultTable):
65
+ """
66
+ Support Graphite Glat tables
67
+ """
68
+
69
+ def __init__(self, tag=None):
70
+ DefaultTable.DefaultTable.__init__(self, tag)
71
+ self.scheme = 0
72
+
73
+ def decompile(self, data, ttFont):
74
+ sstruct.unpack2(Glat_format_0, data, self)
75
+ self.version = float(floatToFixedToStr(self.version, precisionBits=16))
76
+ if self.version <= 1.9:
77
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
78
+ elif self.version <= 2.9:
79
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
80
+ elif self.version >= 3.0:
81
+ (data, self.scheme) = grUtils.decompress(data)
82
+ sstruct.unpack2(Glat_format_3, data, self)
83
+ self.hasOctaboxes = (self.compression & 1) == 1
84
+ decoder = self.decompileAttributes3
85
+
86
+ gloc = ttFont["Gloc"]
87
+ self.attributes = {}
88
+ count = 0
89
+ for s, e in zip(gloc, gloc[1:]):
90
+ self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
91
+ count += 1
92
+
93
+ def decompileAttributes12(self, data, fmt):
94
+ attributes = _Dict()
95
+ while len(data) > 3:
96
+ e, data = sstruct.unpack2(fmt, data, _Object())
97
+ keys = range(e.attNum, e.attNum + e.num)
98
+ if len(data) >= 2 * e.num:
99
+ vals = struct.unpack_from((">%dh" % e.num), data)
100
+ attributes.update(zip(keys, vals))
101
+ data = data[2 * e.num :]
102
+ return attributes
103
+
104
+ def decompileAttributes3(self, data):
105
+ if self.hasOctaboxes:
106
+ o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
107
+ numsub = bin(o.subboxBitmap).count("1")
108
+ o.subboxes = []
109
+ for b in range(numsub):
110
+ if len(data) >= 8:
111
+ subbox, data = sstruct.unpack2(
112
+ Glat_format_3_subbox_entry, data, _Object()
113
+ )
114
+ o.subboxes.append(subbox)
115
+ attrs = self.decompileAttributes12(data, Glat_format_23_entry)
116
+ if self.hasOctaboxes:
117
+ attrs.octabox = o
118
+ return attrs
119
+
120
+ def compile(self, ttFont):
121
+ data = sstruct.pack(Glat_format_0, self)
122
+ if self.version <= 1.9:
123
+ encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
124
+ elif self.version <= 2.9:
125
+ encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
126
+ elif self.version >= 3.0:
127
+ self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
128
+ data = sstruct.pack(Glat_format_3, self)
129
+ encoder = self.compileAttributes3
130
+
131
+ glocs = []
132
+ for n in range(len(self.attributes)):
133
+ glocs.append(len(data))
134
+ data += encoder(self.attributes[ttFont.getGlyphName(n)])
135
+ glocs.append(len(data))
136
+ ttFont["Gloc"].set(glocs)
137
+
138
+ if self.version >= 3.0:
139
+ data = grUtils.compress(self.scheme, data)
140
+ return data
141
+
142
+ def compileAttributes12(self, attrs, fmt):
143
+ data = b""
144
+ for e in grUtils.entries(attrs):
145
+ data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
146
+ (">%dh" % len(e[2])), *e[2]
147
+ )
148
+ return data
149
+
150
+ def compileAttributes3(self, attrs):
151
+ if self.hasOctaboxes:
152
+ o = attrs.octabox
153
+ data = sstruct.pack(Glat_format_3_octabox_metrics, o)
154
+ numsub = bin(o.subboxBitmap).count("1")
155
+ for b in range(numsub):
156
+ data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
157
+ else:
158
+ data = ""
159
+ return data + self.compileAttributes12(attrs, Glat_format_23_entry)
160
+
161
+ def toXML(self, writer, ttFont):
162
+ writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
163
+ writer.newline()
164
+ for n, a in sorted(
165
+ self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
166
+ ):
167
+ writer.begintag("glyph", name=n)
168
+ writer.newline()
169
+ if hasattr(a, "octabox"):
170
+ o = a.octabox
171
+ formatstring, names, fixes = sstruct.getformat(
172
+ Glat_format_3_octabox_metrics
173
+ )
174
+ vals = {}
175
+ for k in names:
176
+ if k == "subboxBitmap":
177
+ continue
178
+ vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
179
+ vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
180
+ writer.begintag("octaboxes", **vals)
181
+ writer.newline()
182
+ formatstring, names, fixes = sstruct.getformat(
183
+ Glat_format_3_subbox_entry
184
+ )
185
+ for s in o.subboxes:
186
+ vals = {}
187
+ for k in names:
188
+ vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
189
+ writer.simpletag("octabox", **vals)
190
+ writer.newline()
191
+ writer.endtag("octaboxes")
192
+ writer.newline()
193
+ for k, v in sorted(a.items()):
194
+ writer.simpletag("attribute", index=k, value=v)
195
+ writer.newline()
196
+ writer.endtag("glyph")
197
+ writer.newline()
198
+
199
+ def fromXML(self, name, attrs, content, ttFont):
200
+ if name == "version":
201
+ self.version = float(safeEval(attrs["version"]))
202
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
203
+ if name != "glyph":
204
+ return
205
+ if not hasattr(self, "attributes"):
206
+ self.attributes = {}
207
+ gname = attrs["name"]
208
+ attributes = _Dict()
209
+ for element in content:
210
+ if not isinstance(element, tuple):
211
+ continue
212
+ tag, attrs, subcontent = element
213
+ if tag == "attribute":
214
+ k = int(safeEval(attrs["index"]))
215
+ v = int(safeEval(attrs["value"]))
216
+ attributes[k] = v
217
+ elif tag == "octaboxes":
218
+ self.hasOctaboxes = True
219
+ o = _Object()
220
+ o.subboxBitmap = int(attrs["bitmap"], 16)
221
+ o.subboxes = []
222
+ del attrs["bitmap"]
223
+ for k, v in attrs.items():
224
+ setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
225
+ for element in subcontent:
226
+ if not isinstance(element, tuple):
227
+ continue
228
+ (tag, attrs, subcontent) = element
229
+ so = _Object()
230
+ for k, v in attrs.items():
231
+ setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
232
+ o.subboxes.append(so)
233
+ attributes.octabox = o
234
+ self.attributes[gname] = attributes
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_o_c.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import safeEval
3
+ from . import DefaultTable
4
+ import array
5
+ import sys
6
+
7
+
8
+ Gloc_header = """
9
+ > # big endian
10
+ version: 16.16F # Table version
11
+ flags: H # bit 0: 1=long format, 0=short format
12
+ # bit 1: 1=attribute names, 0=no names
13
+ numAttribs: H # NUmber of attributes
14
+ """
15
+
16
+
17
+ class table_G__l_o_c(DefaultTable.DefaultTable):
18
+ """
19
+ Support Graphite Gloc tables
20
+ """
21
+
22
+ dependencies = ["Glat"]
23
+
24
+ def __init__(self, tag=None):
25
+ DefaultTable.DefaultTable.__init__(self, tag)
26
+ self.attribIds = None
27
+ self.numAttribs = 0
28
+
29
+ def decompile(self, data, ttFont):
30
+ _, data = sstruct.unpack2(Gloc_header, data, self)
31
+ flags = self.flags
32
+ del self.flags
33
+ self.locations = array.array("I" if flags & 1 else "H")
34
+ self.locations.frombytes(data[: len(data) - self.numAttribs * (flags & 2)])
35
+ if sys.byteorder != "big":
36
+ self.locations.byteswap()
37
+ self.attribIds = array.array("H")
38
+ if flags & 2:
39
+ self.attribIds.frombytes(data[-self.numAttribs * 2 :])
40
+ if sys.byteorder != "big":
41
+ self.attribIds.byteswap()
42
+
43
+ def compile(self, ttFont):
44
+ data = sstruct.pack(
45
+ Gloc_header,
46
+ dict(
47
+ version=1.0,
48
+ flags=(bool(self.attribIds) << 1) + (self.locations.typecode == "I"),
49
+ numAttribs=self.numAttribs,
50
+ ),
51
+ )
52
+ if sys.byteorder != "big":
53
+ self.locations.byteswap()
54
+ data += self.locations.tobytes()
55
+ if sys.byteorder != "big":
56
+ self.locations.byteswap()
57
+ if self.attribIds:
58
+ if sys.byteorder != "big":
59
+ self.attribIds.byteswap()
60
+ data += self.attribIds.tobytes()
61
+ if sys.byteorder != "big":
62
+ self.attribIds.byteswap()
63
+ return data
64
+
65
+ def set(self, locations):
66
+ long_format = max(locations) >= 65536
67
+ self.locations = array.array("I" if long_format else "H", locations)
68
+
69
+ def toXML(self, writer, ttFont):
70
+ writer.simpletag("attributes", number=self.numAttribs)
71
+ writer.newline()
72
+
73
+ def fromXML(self, name, attrs, content, ttFont):
74
+ if name == "attributes":
75
+ self.numAttribs = int(safeEval(attrs["number"]))
76
+
77
+ def __getitem__(self, index):
78
+ return self.locations[index]
79
+
80
+ def __len__(self):
81
+ return len(self.locations)
82
+
83
+ def __iter__(self):
84
+ return iter(self.locations)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/H_V_A_R_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_H_V_A_R_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_J_S_T_F_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/L_T_S_H_.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc.textTools import safeEval
2
+ from . import DefaultTable
3
+ import struct
4
+ import array
5
+
6
+ # XXX I've lowered the strictness, to make sure Apple's own Chicago
7
+ # XXX gets through. They're looking into it, I hope to raise the standards
8
+ # XXX back to normal eventually.
9
+
10
+
11
+ class table_L_T_S_H_(DefaultTable.DefaultTable):
12
+ def decompile(self, data, ttFont):
13
+ version, numGlyphs = struct.unpack(">HH", data[:4])
14
+ data = data[4:]
15
+ assert version == 0, "unknown version: %s" % version
16
+ assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
17
+ # ouch: the assertion is not true in Chicago!
18
+ # assert numGlyphs == ttFont['maxp'].numGlyphs
19
+ yPels = array.array("B")
20
+ yPels.frombytes(data)
21
+ self.yPels = {}
22
+ for i in range(numGlyphs):
23
+ self.yPels[ttFont.getGlyphName(i)] = yPels[i]
24
+
25
+ def compile(self, ttFont):
26
+ version = 0
27
+ names = list(self.yPels.keys())
28
+ numGlyphs = len(names)
29
+ yPels = [0] * numGlyphs
30
+ # ouch: the assertion is not true in Chicago!
31
+ # assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
32
+ for name in names:
33
+ yPels[ttFont.getGlyphID(name)] = self.yPels[name]
34
+ yPels = array.array("B", yPels)
35
+ return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
36
+
37
+ def toXML(self, writer, ttFont):
38
+ names = sorted(self.yPels.keys())
39
+ for name in names:
40
+ writer.simpletag("yPel", name=name, value=self.yPels[name])
41
+ writer.newline()
42
+
43
+ def fromXML(self, name, attrs, content, ttFont):
44
+ if not hasattr(self, "yPels"):
45
+ self.yPels = {}
46
+ if name != "yPel":
47
+ return # ignore unknown tags
48
+ self.yPels[attrs["name"]] = safeEval(attrs["value"])
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/M_E_T_A_.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import byteord, safeEval
3
+ from . import DefaultTable
4
+ import pdb
5
+ import struct
6
+
7
+
8
+ METAHeaderFormat = """
9
+ > # big endian
10
+ tableVersionMajor: H
11
+ tableVersionMinor: H
12
+ metaEntriesVersionMajor: H
13
+ metaEntriesVersionMinor: H
14
+ unicodeVersion: L
15
+ metaFlags: H
16
+ nMetaRecs: H
17
+ """
18
+ # This record is followed by nMetaRecs of METAGlyphRecordFormat.
19
+ # This in turn is followd by as many METAStringRecordFormat entries
20
+ # as specified by the METAGlyphRecordFormat entries
21
+ # this is followed by the strings specifried in the METAStringRecordFormat
22
+ METAGlyphRecordFormat = """
23
+ > # big endian
24
+ glyphID: H
25
+ nMetaEntry: H
26
+ """
27
+ # This record is followd by a variable data length field:
28
+ # USHORT or ULONG hdrOffset
29
+ # Offset from start of META table to the beginning
30
+ # of this glyphs array of ns Metadata string entries.
31
+ # Size determined by metaFlags field
32
+ # METAGlyphRecordFormat entries must be sorted by glyph ID
33
+
34
+ METAStringRecordFormat = """
35
+ > # big endian
36
+ labelID: H
37
+ stringLen: H
38
+ """
39
+ # This record is followd by a variable data length field:
40
+ # USHORT or ULONG stringOffset
41
+ # METAStringRecordFormat entries must be sorted in order of labelID
42
+ # There may be more than one entry with the same labelID
43
+ # There may be more than one strign with the same content.
44
+
45
+ # Strings shall be Unicode UTF-8 encoded, and null-terminated.
46
+
47
+ METALabelDict = {
48
+ 0: "MojikumiX4051", # An integer in the range 1-20
49
+ 1: "UNIUnifiedBaseChars",
50
+ 2: "BaseFontName",
51
+ 3: "Language",
52
+ 4: "CreationDate",
53
+ 5: "FoundryName",
54
+ 6: "FoundryCopyright",
55
+ 7: "OwnerURI",
56
+ 8: "WritingScript",
57
+ 10: "StrokeCount",
58
+ 11: "IndexingRadical",
59
+ }
60
+
61
+
62
+ def getLabelString(labelID):
63
+ try:
64
+ label = METALabelDict[labelID]
65
+ except KeyError:
66
+ label = "Unknown label"
67
+ return str(label)
68
+
69
+
70
+ class table_M_E_T_A_(DefaultTable.DefaultTable):
71
+ dependencies = []
72
+
73
+ def decompile(self, data, ttFont):
74
+ dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
75
+ self.glyphRecords = []
76
+ for i in range(self.nMetaRecs):
77
+ glyphRecord, newData = sstruct.unpack2(
78
+ METAGlyphRecordFormat, newData, GlyphRecord()
79
+ )
80
+ if self.metaFlags == 0:
81
+ [glyphRecord.offset] = struct.unpack(">H", newData[:2])
82
+ newData = newData[2:]
83
+ elif self.metaFlags == 1:
84
+ [glyphRecord.offset] = struct.unpack(">H", newData[:4])
85
+ newData = newData[4:]
86
+ else:
87
+ assert 0, (
88
+ "The metaFlags field in the META table header has a value other than 0 or 1 :"
89
+ + str(self.metaFlags)
90
+ )
91
+ glyphRecord.stringRecs = []
92
+ newData = data[glyphRecord.offset :]
93
+ for j in range(glyphRecord.nMetaEntry):
94
+ stringRec, newData = sstruct.unpack2(
95
+ METAStringRecordFormat, newData, StringRecord()
96
+ )
97
+ if self.metaFlags == 0:
98
+ [stringRec.offset] = struct.unpack(">H", newData[:2])
99
+ newData = newData[2:]
100
+ else:
101
+ [stringRec.offset] = struct.unpack(">H", newData[:4])
102
+ newData = newData[4:]
103
+ stringRec.string = data[
104
+ stringRec.offset : stringRec.offset + stringRec.stringLen
105
+ ]
106
+ glyphRecord.stringRecs.append(stringRec)
107
+ self.glyphRecords.append(glyphRecord)
108
+
109
+ def compile(self, ttFont):
110
+ offsetOK = 0
111
+ self.nMetaRecs = len(self.glyphRecords)
112
+ count = 0
113
+ while offsetOK != 1:
114
+ count = count + 1
115
+ if count > 4:
116
+ pdb.set_trace()
117
+ metaData = sstruct.pack(METAHeaderFormat, self)
118
+ stringRecsOffset = len(metaData) + self.nMetaRecs * (
119
+ 6 + 2 * (self.metaFlags & 1)
120
+ )
121
+ stringRecSize = 6 + 2 * (self.metaFlags & 1)
122
+ for glyphRec in self.glyphRecords:
123
+ glyphRec.offset = stringRecsOffset
124
+ if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
125
+ self.metaFlags = self.metaFlags + 1
126
+ offsetOK = -1
127
+ break
128
+ metaData = metaData + glyphRec.compile(self)
129
+ stringRecsOffset = stringRecsOffset + (
130
+ glyphRec.nMetaEntry * stringRecSize
131
+ )
132
+ # this will be the String Record offset for the next GlyphRecord.
133
+ if offsetOK == -1:
134
+ offsetOK = 0
135
+ continue
136
+
137
+ # metaData now contains the header and all of the GlyphRecords. Its length should bw
138
+ # the offset to the first StringRecord.
139
+ stringOffset = stringRecsOffset
140
+ for glyphRec in self.glyphRecords:
141
+ assert glyphRec.offset == len(
142
+ metaData
143
+ ), "Glyph record offset did not compile correctly! for rec:" + str(
144
+ glyphRec
145
+ )
146
+ for stringRec in glyphRec.stringRecs:
147
+ stringRec.offset = stringOffset
148
+ if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
149
+ self.metaFlags = self.metaFlags + 1
150
+ offsetOK = -1
151
+ break
152
+ metaData = metaData + stringRec.compile(self)
153
+ stringOffset = stringOffset + stringRec.stringLen
154
+ if offsetOK == -1:
155
+ offsetOK = 0
156
+ continue
157
+
158
+ if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
159
+ self.metaFlags = self.metaFlags - 1
160
+ continue
161
+ else:
162
+ offsetOK = 1
163
+
164
+ # metaData now contains the header and all of the GlyphRecords and all of the String Records.
165
+ # Its length should be the offset to the first string datum.
166
+ for glyphRec in self.glyphRecords:
167
+ for stringRec in glyphRec.stringRecs:
168
+ assert stringRec.offset == len(
169
+ metaData
170
+ ), "String offset did not compile correctly! for string:" + str(
171
+ stringRec.string
172
+ )
173
+ metaData = metaData + stringRec.string
174
+
175
+ return metaData
176
+
177
+ def toXML(self, writer, ttFont):
178
+ writer.comment(
179
+ "Lengths and number of entries in this table will be recalculated by the compiler"
180
+ )
181
+ writer.newline()
182
+ formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
183
+ for name in names:
184
+ value = getattr(self, name)
185
+ writer.simpletag(name, value=value)
186
+ writer.newline()
187
+ for glyphRec in self.glyphRecords:
188
+ glyphRec.toXML(writer, ttFont)
189
+
190
+ def fromXML(self, name, attrs, content, ttFont):
191
+ if name == "GlyphRecord":
192
+ if not hasattr(self, "glyphRecords"):
193
+ self.glyphRecords = []
194
+ glyphRec = GlyphRecord()
195
+ self.glyphRecords.append(glyphRec)
196
+ for element in content:
197
+ if isinstance(element, str):
198
+ continue
199
+ name, attrs, content = element
200
+ glyphRec.fromXML(name, attrs, content, ttFont)
201
+ glyphRec.offset = -1
202
+ glyphRec.nMetaEntry = len(glyphRec.stringRecs)
203
+ else:
204
+ setattr(self, name, safeEval(attrs["value"]))
205
+
206
+
207
+ class GlyphRecord(object):
208
+ def __init__(self):
209
+ self.glyphID = -1
210
+ self.nMetaEntry = -1
211
+ self.offset = -1
212
+ self.stringRecs = []
213
+
214
+ def toXML(self, writer, ttFont):
215
+ writer.begintag("GlyphRecord")
216
+ writer.newline()
217
+ writer.simpletag("glyphID", value=self.glyphID)
218
+ writer.newline()
219
+ writer.simpletag("nMetaEntry", value=self.nMetaEntry)
220
+ writer.newline()
221
+ for stringRec in self.stringRecs:
222
+ stringRec.toXML(writer, ttFont)
223
+ writer.endtag("GlyphRecord")
224
+ writer.newline()
225
+
226
+ def fromXML(self, name, attrs, content, ttFont):
227
+ if name == "StringRecord":
228
+ stringRec = StringRecord()
229
+ self.stringRecs.append(stringRec)
230
+ for element in content:
231
+ if isinstance(element, str):
232
+ continue
233
+ stringRec.fromXML(name, attrs, content, ttFont)
234
+ stringRec.stringLen = len(stringRec.string)
235
+ else:
236
+ setattr(self, name, safeEval(attrs["value"]))
237
+
238
+ def compile(self, parentTable):
239
+ data = sstruct.pack(METAGlyphRecordFormat, self)
240
+ if parentTable.metaFlags == 0:
241
+ datum = struct.pack(">H", self.offset)
242
+ elif parentTable.metaFlags == 1:
243
+ datum = struct.pack(">L", self.offset)
244
+ data = data + datum
245
+ return data
246
+
247
+ def __repr__(self):
248
+ return (
249
+ "GlyphRecord[ glyphID: "
250
+ + str(self.glyphID)
251
+ + ", nMetaEntry: "
252
+ + str(self.nMetaEntry)
253
+ + ", offset: "
254
+ + str(self.offset)
255
+ + " ]"
256
+ )
257
+
258
+
259
+ # XXX The following two functions are really broken around UTF-8 vs Unicode
260
+
261
+
262
+ def mapXMLToUTF8(string):
263
+ uString = str()
264
+ strLen = len(string)
265
+ i = 0
266
+ while i < strLen:
267
+ prefixLen = 0
268
+ if string[i : i + 3] == "&#x":
269
+ prefixLen = 3
270
+ elif string[i : i + 7] == "&amp;#x":
271
+ prefixLen = 7
272
+ if prefixLen:
273
+ i = i + prefixLen
274
+ j = i
275
+ while string[i] != ";":
276
+ i = i + 1
277
+ valStr = string[j:i]
278
+
279
+ uString = uString + chr(eval("0x" + valStr))
280
+ else:
281
+ uString = uString + chr(byteord(string[i]))
282
+ i = i + 1
283
+
284
+ return uString.encode("utf_8")
285
+
286
+
287
+ def mapUTF8toXML(string):
288
+ uString = string.decode("utf_8")
289
+ string = ""
290
+ for uChar in uString:
291
+ i = ord(uChar)
292
+ if (i < 0x80) and (i > 0x1F):
293
+ string = string + uChar
294
+ else:
295
+ string = string + "&#x" + hex(i)[2:] + ";"
296
+ return string
297
+
298
+
299
+ class StringRecord(object):
300
+ def toXML(self, writer, ttFont):
301
+ writer.begintag("StringRecord")
302
+ writer.newline()
303
+ writer.simpletag("labelID", value=self.labelID)
304
+ writer.comment(getLabelString(self.labelID))
305
+ writer.newline()
306
+ writer.newline()
307
+ writer.simpletag("string", value=mapUTF8toXML(self.string))
308
+ writer.newline()
309
+ writer.endtag("StringRecord")
310
+ writer.newline()
311
+
312
+ def fromXML(self, name, attrs, content, ttFont):
313
+ for element in content:
314
+ if isinstance(element, str):
315
+ continue
316
+ name, attrs, content = element
317
+ value = attrs["value"]
318
+ if name == "string":
319
+ self.string = mapXMLToUTF8(value)
320
+ else:
321
+ setattr(self, name, safeEval(value))
322
+
323
+ def compile(self, parentTable):
324
+ data = sstruct.pack(METAStringRecordFormat, self)
325
+ if parentTable.metaFlags == 0:
326
+ datum = struct.pack(">H", self.offset)
327
+ elif parentTable.metaFlags == 1:
328
+ datum = struct.pack(">L", self.offset)
329
+ data = data + datum
330
+ return data
331
+
332
+ def __repr__(self):
333
+ return (
334
+ "StringRecord [ labelID: "
335
+ + str(self.labelID)
336
+ + " aka "
337
+ + getLabelString(self.labelID)
338
+ + ", offset: "
339
+ + str(self.offset)
340
+ + ", length: "
341
+ + str(self.stringLen)
342
+ + ", string: "
343
+ + self.string
344
+ + " ]"
345
+ )
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/O_S_2f_2.py ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.roundTools import otRound
3
+ from fontTools.misc.textTools import safeEval, num2binary, binary2num
4
+ from fontTools.ttLib.tables import DefaultTable
5
+ import bisect
6
+ import logging
7
+
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+ # panose classification
12
+
13
+ panoseFormat = """
14
+ bFamilyType: B
15
+ bSerifStyle: B
16
+ bWeight: B
17
+ bProportion: B
18
+ bContrast: B
19
+ bStrokeVariation: B
20
+ bArmStyle: B
21
+ bLetterForm: B
22
+ bMidline: B
23
+ bXHeight: B
24
+ """
25
+
26
+
27
+ class Panose(object):
28
+ def __init__(self, **kwargs):
29
+ _, names, _ = sstruct.getformat(panoseFormat)
30
+ for name in names:
31
+ setattr(self, name, kwargs.pop(name, 0))
32
+ for k in kwargs:
33
+ raise TypeError(f"Panose() got an unexpected keyword argument {k!r}")
34
+
35
+ def toXML(self, writer, ttFont):
36
+ formatstring, names, fixes = sstruct.getformat(panoseFormat)
37
+ for name in names:
38
+ writer.simpletag(name, value=getattr(self, name))
39
+ writer.newline()
40
+
41
+ def fromXML(self, name, attrs, content, ttFont):
42
+ setattr(self, name, safeEval(attrs["value"]))
43
+
44
+
45
+ # 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
46
+
47
+ OS2_format_0 = """
48
+ > # big endian
49
+ version: H # version
50
+ xAvgCharWidth: h # average character width
51
+ usWeightClass: H # degree of thickness of strokes
52
+ usWidthClass: H # aspect ratio
53
+ fsType: H # type flags
54
+ ySubscriptXSize: h # subscript horizontal font size
55
+ ySubscriptYSize: h # subscript vertical font size
56
+ ySubscriptXOffset: h # subscript x offset
57
+ ySubscriptYOffset: h # subscript y offset
58
+ ySuperscriptXSize: h # superscript horizontal font size
59
+ ySuperscriptYSize: h # superscript vertical font size
60
+ ySuperscriptXOffset: h # superscript x offset
61
+ ySuperscriptYOffset: h # superscript y offset
62
+ yStrikeoutSize: h # strikeout size
63
+ yStrikeoutPosition: h # strikeout position
64
+ sFamilyClass: h # font family class and subclass
65
+ panose: 10s # panose classification number
66
+ ulUnicodeRange1: L # character range
67
+ ulUnicodeRange2: L # character range
68
+ ulUnicodeRange3: L # character range
69
+ ulUnicodeRange4: L # character range
70
+ achVendID: 4s # font vendor identification
71
+ fsSelection: H # font selection flags
72
+ usFirstCharIndex: H # first unicode character index
73
+ usLastCharIndex: H # last unicode character index
74
+ sTypoAscender: h # typographic ascender
75
+ sTypoDescender: h # typographic descender
76
+ sTypoLineGap: h # typographic line gap
77
+ usWinAscent: H # Windows ascender
78
+ usWinDescent: H # Windows descender
79
+ """
80
+
81
+ OS2_format_1_addition = """
82
+ ulCodePageRange1: L
83
+ ulCodePageRange2: L
84
+ """
85
+
86
+ OS2_format_2_addition = (
87
+ OS2_format_1_addition
88
+ + """
89
+ sxHeight: h
90
+ sCapHeight: h
91
+ usDefaultChar: H
92
+ usBreakChar: H
93
+ usMaxContext: H
94
+ """
95
+ )
96
+
97
+ OS2_format_5_addition = (
98
+ OS2_format_2_addition
99
+ + """
100
+ usLowerOpticalPointSize: H
101
+ usUpperOpticalPointSize: H
102
+ """
103
+ )
104
+
105
+ bigendian = " > # big endian\n"
106
+
107
+ OS2_format_1 = OS2_format_0 + OS2_format_1_addition
108
+ OS2_format_2 = OS2_format_0 + OS2_format_2_addition
109
+ OS2_format_5 = OS2_format_0 + OS2_format_5_addition
110
+ OS2_format_1_addition = bigendian + OS2_format_1_addition
111
+ OS2_format_2_addition = bigendian + OS2_format_2_addition
112
+ OS2_format_5_addition = bigendian + OS2_format_5_addition
113
+
114
+
115
+ class table_O_S_2f_2(DefaultTable.DefaultTable):
116
+ """the OS/2 table"""
117
+
118
+ dependencies = ["head"]
119
+
120
+ def decompile(self, data, ttFont):
121
+ dummy, data = sstruct.unpack2(OS2_format_0, data, self)
122
+
123
+ if self.version == 1:
124
+ dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
125
+ elif self.version in (2, 3, 4):
126
+ dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
127
+ elif self.version == 5:
128
+ dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
129
+ self.usLowerOpticalPointSize /= 20
130
+ self.usUpperOpticalPointSize /= 20
131
+ elif self.version != 0:
132
+ from fontTools import ttLib
133
+
134
+ raise ttLib.TTLibError(
135
+ "unknown format for OS/2 table: version %s" % self.version
136
+ )
137
+ if len(data):
138
+ log.warning("too much 'OS/2' table data")
139
+
140
+ self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
141
+
142
+ def compile(self, ttFont):
143
+ self.updateFirstAndLastCharIndex(ttFont)
144
+ panose = self.panose
145
+ head = ttFont["head"]
146
+ if (self.fsSelection & 1) and not (head.macStyle & 1 << 1):
147
+ log.warning(
148
+ "fsSelection bit 0 (italic) and "
149
+ "head table macStyle bit 1 (italic) should match"
150
+ )
151
+ if (self.fsSelection & 1 << 5) and not (head.macStyle & 1):
152
+ log.warning(
153
+ "fsSelection bit 5 (bold) and "
154
+ "head table macStyle bit 0 (bold) should match"
155
+ )
156
+ if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)):
157
+ log.warning(
158
+ "fsSelection bit 6 (regular) is set, "
159
+ "bits 0 (italic) and 5 (bold) must be clear"
160
+ )
161
+ if self.version < 4 and self.fsSelection & 0b1110000000:
162
+ log.warning(
163
+ "fsSelection bits 7, 8 and 9 are only defined in "
164
+ "OS/2 table version 4 and up: version %s",
165
+ self.version,
166
+ )
167
+ self.panose = sstruct.pack(panoseFormat, self.panose)
168
+ if self.version == 0:
169
+ data = sstruct.pack(OS2_format_0, self)
170
+ elif self.version == 1:
171
+ data = sstruct.pack(OS2_format_1, self)
172
+ elif self.version in (2, 3, 4):
173
+ data = sstruct.pack(OS2_format_2, self)
174
+ elif self.version == 5:
175
+ d = self.__dict__.copy()
176
+ d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20)
177
+ d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20)
178
+ data = sstruct.pack(OS2_format_5, d)
179
+ else:
180
+ from fontTools import ttLib
181
+
182
+ raise ttLib.TTLibError(
183
+ "unknown format for OS/2 table: version %s" % self.version
184
+ )
185
+ self.panose = panose
186
+ return data
187
+
188
+ def toXML(self, writer, ttFont):
189
+ writer.comment(
190
+ "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
191
+ "will be recalculated by the compiler"
192
+ )
193
+ writer.newline()
194
+ if self.version == 1:
195
+ format = OS2_format_1
196
+ elif self.version in (2, 3, 4):
197
+ format = OS2_format_2
198
+ elif self.version == 5:
199
+ format = OS2_format_5
200
+ else:
201
+ format = OS2_format_0
202
+ formatstring, names, fixes = sstruct.getformat(format)
203
+ for name in names:
204
+ value = getattr(self, name)
205
+ if name == "panose":
206
+ writer.begintag("panose")
207
+ writer.newline()
208
+ value.toXML(writer, ttFont)
209
+ writer.endtag("panose")
210
+ elif name in (
211
+ "ulUnicodeRange1",
212
+ "ulUnicodeRange2",
213
+ "ulUnicodeRange3",
214
+ "ulUnicodeRange4",
215
+ "ulCodePageRange1",
216
+ "ulCodePageRange2",
217
+ ):
218
+ writer.simpletag(name, value=num2binary(value))
219
+ elif name in ("fsType", "fsSelection"):
220
+ writer.simpletag(name, value=num2binary(value, 16))
221
+ elif name == "achVendID":
222
+ writer.simpletag(name, value=repr(value)[1:-1])
223
+ else:
224
+ writer.simpletag(name, value=value)
225
+ writer.newline()
226
+
227
+ def fromXML(self, name, attrs, content, ttFont):
228
+ if name == "panose":
229
+ self.panose = panose = Panose()
230
+ for element in content:
231
+ if isinstance(element, tuple):
232
+ name, attrs, content = element
233
+ panose.fromXML(name, attrs, content, ttFont)
234
+ elif name in (
235
+ "ulUnicodeRange1",
236
+ "ulUnicodeRange2",
237
+ "ulUnicodeRange3",
238
+ "ulUnicodeRange4",
239
+ "ulCodePageRange1",
240
+ "ulCodePageRange2",
241
+ "fsType",
242
+ "fsSelection",
243
+ ):
244
+ setattr(self, name, binary2num(attrs["value"]))
245
+ elif name == "achVendID":
246
+ setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
247
+ else:
248
+ setattr(self, name, safeEval(attrs["value"]))
249
+
250
+ def updateFirstAndLastCharIndex(self, ttFont):
251
+ if "cmap" not in ttFont:
252
+ return
253
+ codes = set()
254
+ for table in getattr(ttFont["cmap"], "tables", []):
255
+ if table.isUnicode():
256
+ codes.update(table.cmap.keys())
257
+ if codes:
258
+ minCode = min(codes)
259
+ maxCode = max(codes)
260
+ # USHORT cannot hold codepoints greater than 0xFFFF
261
+ self.usFirstCharIndex = min(0xFFFF, minCode)
262
+ self.usLastCharIndex = min(0xFFFF, maxCode)
263
+
264
+ # misspelled attributes kept for legacy reasons
265
+
266
+ @property
267
+ def usMaxContex(self):
268
+ return self.usMaxContext
269
+
270
+ @usMaxContex.setter
271
+ def usMaxContex(self, value):
272
+ self.usMaxContext = value
273
+
274
+ @property
275
+ def fsFirstCharIndex(self):
276
+ return self.usFirstCharIndex
277
+
278
+ @fsFirstCharIndex.setter
279
+ def fsFirstCharIndex(self, value):
280
+ self.usFirstCharIndex = value
281
+
282
+ @property
283
+ def fsLastCharIndex(self):
284
+ return self.usLastCharIndex
285
+
286
+ @fsLastCharIndex.setter
287
+ def fsLastCharIndex(self, value):
288
+ self.usLastCharIndex = value
289
+
290
+ def getUnicodeRanges(self):
291
+ """Return the set of 'ulUnicodeRange*' bits currently enabled."""
292
+ bits = set()
293
+ ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
294
+ ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
295
+ for i in range(32):
296
+ if ul1 & (1 << i):
297
+ bits.add(i)
298
+ if ul2 & (1 << i):
299
+ bits.add(i + 32)
300
+ if ul3 & (1 << i):
301
+ bits.add(i + 64)
302
+ if ul4 & (1 << i):
303
+ bits.add(i + 96)
304
+ return bits
305
+
306
+ def setUnicodeRanges(self, bits):
307
+ """Set the 'ulUnicodeRange*' fields to the specified 'bits'."""
308
+ ul1, ul2, ul3, ul4 = 0, 0, 0, 0
309
+ for bit in bits:
310
+ if 0 <= bit < 32:
311
+ ul1 |= 1 << bit
312
+ elif 32 <= bit < 64:
313
+ ul2 |= 1 << (bit - 32)
314
+ elif 64 <= bit < 96:
315
+ ul3 |= 1 << (bit - 64)
316
+ elif 96 <= bit < 123:
317
+ ul4 |= 1 << (bit - 96)
318
+ else:
319
+ raise ValueError("expected 0 <= int <= 122, found: %r" % bit)
320
+ self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
321
+ self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
322
+
323
+ def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
324
+ """Intersect the codepoints in the font's Unicode cmap subtables with
325
+ the Unicode block ranges defined in the OpenType specification (v1.7),
326
+ and set the respective 'ulUnicodeRange*' bits if there is at least ONE
327
+ intersection.
328
+ If 'pruneOnly' is True, only clear unused bits with NO intersection.
329
+ """
330
+ unicodes = set()
331
+ for table in ttFont["cmap"].tables:
332
+ if table.isUnicode():
333
+ unicodes.update(table.cmap.keys())
334
+ if pruneOnly:
335
+ empty = intersectUnicodeRanges(unicodes, inverse=True)
336
+ bits = self.getUnicodeRanges() - empty
337
+ else:
338
+ bits = intersectUnicodeRanges(unicodes)
339
+ self.setUnicodeRanges(bits)
340
+ return bits
341
+
342
+ def getCodePageRanges(self):
343
+ """Return the set of 'ulCodePageRange*' bits currently enabled."""
344
+ bits = set()
345
+ if self.version < 1:
346
+ return bits
347
+ ul1, ul2 = self.ulCodePageRange1, self.ulCodePageRange2
348
+ for i in range(32):
349
+ if ul1 & (1 << i):
350
+ bits.add(i)
351
+ if ul2 & (1 << i):
352
+ bits.add(i + 32)
353
+ return bits
354
+
355
+ def setCodePageRanges(self, bits):
356
+ """Set the 'ulCodePageRange*' fields to the specified 'bits'."""
357
+ ul1, ul2 = 0, 0
358
+ for bit in bits:
359
+ if 0 <= bit < 32:
360
+ ul1 |= 1 << bit
361
+ elif 32 <= bit < 64:
362
+ ul2 |= 1 << (bit - 32)
363
+ else:
364
+ raise ValueError(f"expected 0 <= int <= 63, found: {bit:r}")
365
+ if self.version < 1:
366
+ self.version = 1
367
+ self.ulCodePageRange1, self.ulCodePageRange2 = ul1, ul2
368
+
369
+ def recalcCodePageRanges(self, ttFont, pruneOnly=False):
370
+ unicodes = set()
371
+ for table in ttFont["cmap"].tables:
372
+ if table.isUnicode():
373
+ unicodes.update(table.cmap.keys())
374
+ bits = calcCodePageRanges(unicodes)
375
+ if pruneOnly:
376
+ bits &= self.getCodePageRanges()
377
+ # when no codepage ranges can be enabled, fall back to enabling bit 0
378
+ # (Latin 1) so that the font works in MS Word:
379
+ # https://github.com/googlei18n/fontmake/issues/468
380
+ if not bits:
381
+ bits = {0}
382
+ self.setCodePageRanges(bits)
383
+ return bits
384
+
385
+ def recalcAvgCharWidth(self, ttFont):
386
+ """Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
387
+
388
+ Set it to 0 if the unlikely event 'hmtx' table is not found.
389
+ """
390
+ avg_width = 0
391
+ hmtx = ttFont.get("hmtx")
392
+ if hmtx is not None:
393
+ widths = [width for width, _ in hmtx.metrics.values() if width > 0]
394
+ if widths:
395
+ avg_width = otRound(sum(widths) / len(widths))
396
+ self.xAvgCharWidth = avg_width
397
+ return avg_width
398
+
399
+
400
+ # Unicode ranges data from the OpenType OS/2 table specification v1.7
401
+
402
+ OS2_UNICODE_RANGES = (
403
+ (("Basic Latin", (0x0000, 0x007F)),),
404
+ (("Latin-1 Supplement", (0x0080, 0x00FF)),),
405
+ (("Latin Extended-A", (0x0100, 0x017F)),),
406
+ (("Latin Extended-B", (0x0180, 0x024F)),),
407
+ (
408
+ ("IPA Extensions", (0x0250, 0x02AF)),
409
+ ("Phonetic Extensions", (0x1D00, 0x1D7F)),
410
+ ("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)),
411
+ ),
412
+ (
413
+ ("Spacing Modifier Letters", (0x02B0, 0x02FF)),
414
+ ("Modifier Tone Letters", (0xA700, 0xA71F)),
415
+ ),
416
+ (
417
+ ("Combining Diacritical Marks", (0x0300, 0x036F)),
418
+ ("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)),
419
+ ),
420
+ (("Greek and Coptic", (0x0370, 0x03FF)),),
421
+ (("Coptic", (0x2C80, 0x2CFF)),),
422
+ (
423
+ ("Cyrillic", (0x0400, 0x04FF)),
424
+ ("Cyrillic Supplement", (0x0500, 0x052F)),
425
+ ("Cyrillic Extended-A", (0x2DE0, 0x2DFF)),
426
+ ("Cyrillic Extended-B", (0xA640, 0xA69F)),
427
+ ),
428
+ (("Armenian", (0x0530, 0x058F)),),
429
+ (("Hebrew", (0x0590, 0x05FF)),),
430
+ (("Vai", (0xA500, 0xA63F)),),
431
+ (("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))),
432
+ (("NKo", (0x07C0, 0x07FF)),),
433
+ (("Devanagari", (0x0900, 0x097F)),),
434
+ (("Bengali", (0x0980, 0x09FF)),),
435
+ (("Gurmukhi", (0x0A00, 0x0A7F)),),
436
+ (("Gujarati", (0x0A80, 0x0AFF)),),
437
+ (("Oriya", (0x0B00, 0x0B7F)),),
438
+ (("Tamil", (0x0B80, 0x0BFF)),),
439
+ (("Telugu", (0x0C00, 0x0C7F)),),
440
+ (("Kannada", (0x0C80, 0x0CFF)),),
441
+ (("Malayalam", (0x0D00, 0x0D7F)),),
442
+ (("Thai", (0x0E00, 0x0E7F)),),
443
+ (("Lao", (0x0E80, 0x0EFF)),),
444
+ (("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))),
445
+ (("Balinese", (0x1B00, 0x1B7F)),),
446
+ (("Hangul Jamo", (0x1100, 0x11FF)),),
447
+ (
448
+ ("Latin Extended Additional", (0x1E00, 0x1EFF)),
449
+ ("Latin Extended-C", (0x2C60, 0x2C7F)),
450
+ ("Latin Extended-D", (0xA720, 0xA7FF)),
451
+ ),
452
+ (("Greek Extended", (0x1F00, 0x1FFF)),),
453
+ (
454
+ ("General Punctuation", (0x2000, 0x206F)),
455
+ ("Supplemental Punctuation", (0x2E00, 0x2E7F)),
456
+ ),
457
+ (("Superscripts And Subscripts", (0x2070, 0x209F)),),
458
+ (("Currency Symbols", (0x20A0, 0x20CF)),),
459
+ (("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),),
460
+ (("Letterlike Symbols", (0x2100, 0x214F)),),
461
+ (("Number Forms", (0x2150, 0x218F)),),
462
+ (
463
+ ("Arrows", (0x2190, 0x21FF)),
464
+ ("Supplemental Arrows-A", (0x27F0, 0x27FF)),
465
+ ("Supplemental Arrows-B", (0x2900, 0x297F)),
466
+ ("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)),
467
+ ),
468
+ (
469
+ ("Mathematical Operators", (0x2200, 0x22FF)),
470
+ ("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)),
471
+ ("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)),
472
+ ("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)),
473
+ ),
474
+ (("Miscellaneous Technical", (0x2300, 0x23FF)),),
475
+ (("Control Pictures", (0x2400, 0x243F)),),
476
+ (("Optical Character Recognition", (0x2440, 0x245F)),),
477
+ (("Enclosed Alphanumerics", (0x2460, 0x24FF)),),
478
+ (("Box Drawing", (0x2500, 0x257F)),),
479
+ (("Block Elements", (0x2580, 0x259F)),),
480
+ (("Geometric Shapes", (0x25A0, 0x25FF)),),
481
+ (("Miscellaneous Symbols", (0x2600, 0x26FF)),),
482
+ (("Dingbats", (0x2700, 0x27BF)),),
483
+ (("CJK Symbols And Punctuation", (0x3000, 0x303F)),),
484
+ (("Hiragana", (0x3040, 0x309F)),),
485
+ (
486
+ ("Katakana", (0x30A0, 0x30FF)),
487
+ ("Katakana Phonetic Extensions", (0x31F0, 0x31FF)),
488
+ ),
489
+ (("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))),
490
+ (("Hangul Compatibility Jamo", (0x3130, 0x318F)),),
491
+ (("Phags-pa", (0xA840, 0xA87F)),),
492
+ (("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),),
493
+ (("CJK Compatibility", (0x3300, 0x33FF)),),
494
+ (("Hangul Syllables", (0xAC00, 0xD7AF)),),
495
+ (("Non-Plane 0 *", (0xD800, 0xDFFF)),),
496
+ (("Phoenician", (0x10900, 0x1091F)),),
497
+ (
498
+ ("CJK Unified Ideographs", (0x4E00, 0x9FFF)),
499
+ ("CJK Radicals Supplement", (0x2E80, 0x2EFF)),
500
+ ("Kangxi Radicals", (0x2F00, 0x2FDF)),
501
+ ("Ideographic Description Characters", (0x2FF0, 0x2FFF)),
502
+ ("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)),
503
+ ("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)),
504
+ ("Kanbun", (0x3190, 0x319F)),
505
+ ),
506
+ (("Private Use Area (plane 0)", (0xE000, 0xF8FF)),),
507
+ (
508
+ ("CJK Strokes", (0x31C0, 0x31EF)),
509
+ ("CJK Compatibility Ideographs", (0xF900, 0xFAFF)),
510
+ ("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)),
511
+ ),
512
+ (("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),),
513
+ (("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),),
514
+ (("Combining Half Marks", (0xFE20, 0xFE2F)),),
515
+ (
516
+ ("Vertical Forms", (0xFE10, 0xFE1F)),
517
+ ("CJK Compatibility Forms", (0xFE30, 0xFE4F)),
518
+ ),
519
+ (("Small Form Variants", (0xFE50, 0xFE6F)),),
520
+ (("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),),
521
+ (("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),),
522
+ (("Specials", (0xFFF0, 0xFFFF)),),
523
+ (("Tibetan", (0x0F00, 0x0FFF)),),
524
+ (("Syriac", (0x0700, 0x074F)),),
525
+ (("Thaana", (0x0780, 0x07BF)),),
526
+ (("Sinhala", (0x0D80, 0x0DFF)),),
527
+ (("Myanmar", (0x1000, 0x109F)),),
528
+ (
529
+ ("Ethiopic", (0x1200, 0x137F)),
530
+ ("Ethiopic Supplement", (0x1380, 0x139F)),
531
+ ("Ethiopic Extended", (0x2D80, 0x2DDF)),
532
+ ),
533
+ (("Cherokee", (0x13A0, 0x13FF)),),
534
+ (("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),),
535
+ (("Ogham", (0x1680, 0x169F)),),
536
+ (("Runic", (0x16A0, 0x16FF)),),
537
+ (("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))),
538
+ (("Mongolian", (0x1800, 0x18AF)),),
539
+ (("Braille Patterns", (0x2800, 0x28FF)),),
540
+ (("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))),
541
+ (
542
+ ("Tagalog", (0x1700, 0x171F)),
543
+ ("Hanunoo", (0x1720, 0x173F)),
544
+ ("Buhid", (0x1740, 0x175F)),
545
+ ("Tagbanwa", (0x1760, 0x177F)),
546
+ ),
547
+ (("Old Italic", (0x10300, 0x1032F)),),
548
+ (("Gothic", (0x10330, 0x1034F)),),
549
+ (("Deseret", (0x10400, 0x1044F)),),
550
+ (
551
+ ("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)),
552
+ ("Musical Symbols", (0x1D100, 0x1D1FF)),
553
+ ("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)),
554
+ ),
555
+ (("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),),
556
+ (
557
+ ("Private Use (plane 15)", (0xF0000, 0xFFFFD)),
558
+ ("Private Use (plane 16)", (0x100000, 0x10FFFD)),
559
+ ),
560
+ (
561
+ ("Variation Selectors", (0xFE00, 0xFE0F)),
562
+ ("Variation Selectors Supplement", (0xE0100, 0xE01EF)),
563
+ ),
564
+ (("Tags", (0xE0000, 0xE007F)),),
565
+ (("Limbu", (0x1900, 0x194F)),),
566
+ (("Tai Le", (0x1950, 0x197F)),),
567
+ (("New Tai Lue", (0x1980, 0x19DF)),),
568
+ (("Buginese", (0x1A00, 0x1A1F)),),
569
+ (("Glagolitic", (0x2C00, 0x2C5F)),),
570
+ (("Tifinagh", (0x2D30, 0x2D7F)),),
571
+ (("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),),
572
+ (("Syloti Nagri", (0xA800, 0xA82F)),),
573
+ (
574
+ ("Linear B Syllabary", (0x10000, 0x1007F)),
575
+ ("Linear B Ideograms", (0x10080, 0x100FF)),
576
+ ("Aegean Numbers", (0x10100, 0x1013F)),
577
+ ),
578
+ (("Ancient Greek Numbers", (0x10140, 0x1018F)),),
579
+ (("Ugaritic", (0x10380, 0x1039F)),),
580
+ (("Old Persian", (0x103A0, 0x103DF)),),
581
+ (("Shavian", (0x10450, 0x1047F)),),
582
+ (("Osmanya", (0x10480, 0x104AF)),),
583
+ (("Cypriot Syllabary", (0x10800, 0x1083F)),),
584
+ (("Kharoshthi", (0x10A00, 0x10A5F)),),
585
+ (("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),),
586
+ (
587
+ ("Cuneiform", (0x12000, 0x123FF)),
588
+ ("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)),
589
+ ),
590
+ (("Counting Rod Numerals", (0x1D360, 0x1D37F)),),
591
+ (("Sundanese", (0x1B80, 0x1BBF)),),
592
+ (("Lepcha", (0x1C00, 0x1C4F)),),
593
+ (("Ol Chiki", (0x1C50, 0x1C7F)),),
594
+ (("Saurashtra", (0xA880, 0xA8DF)),),
595
+ (("Kayah Li", (0xA900, 0xA92F)),),
596
+ (("Rejang", (0xA930, 0xA95F)),),
597
+ (("Cham", (0xAA00, 0xAA5F)),),
598
+ (("Ancient Symbols", (0x10190, 0x101CF)),),
599
+ (("Phaistos Disc", (0x101D0, 0x101FF)),),
600
+ (
601
+ ("Carian", (0x102A0, 0x102DF)),
602
+ ("Lycian", (0x10280, 0x1029F)),
603
+ ("Lydian", (0x10920, 0x1093F)),
604
+ ),
605
+ (("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))),
606
+ )
607
+
608
+
609
+ _unicodeStarts = []
610
+ _unicodeValues = [None]
611
+
612
+
613
+ def _getUnicodeRanges():
614
+ # build the ranges of codepoints for each unicode range bit, and cache result
615
+ if not _unicodeStarts:
616
+ unicodeRanges = [
617
+ (start, (stop, bit))
618
+ for bit, blocks in enumerate(OS2_UNICODE_RANGES)
619
+ for _, (start, stop) in blocks
620
+ ]
621
+ for start, (stop, bit) in sorted(unicodeRanges):
622
+ _unicodeStarts.append(start)
623
+ _unicodeValues.append((stop, bit))
624
+ return _unicodeStarts, _unicodeValues
625
+
626
+
627
+ def intersectUnicodeRanges(unicodes, inverse=False):
628
+ """Intersect a sequence of (int) Unicode codepoints with the Unicode block
629
+ ranges defined in the OpenType specification v1.7, and return the set of
630
+ 'ulUnicodeRanges' bits for which there is at least ONE intersection.
631
+ If 'inverse' is True, return the the bits for which there is NO intersection.
632
+
633
+ >>> intersectUnicodeRanges([0x0410]) == {9}
634
+ True
635
+ >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
636
+ True
637
+ >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
638
+ ... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
639
+ True
640
+ """
641
+ unicodes = set(unicodes)
642
+ unicodestarts, unicodevalues = _getUnicodeRanges()
643
+ bits = set()
644
+ for code in unicodes:
645
+ stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
646
+ if code <= stop:
647
+ bits.add(bit)
648
+ # The spec says that bit 57 ("Non Plane 0") implies that there's
649
+ # at least one codepoint beyond the BMP; so I also include all
650
+ # the non-BMP codepoints here
651
+ if any(0x10000 <= code < 0x110000 for code in unicodes):
652
+ bits.add(57)
653
+ return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
654
+
655
+
656
+ def calcCodePageRanges(unicodes):
657
+ """Given a set of Unicode codepoints (integers), calculate the
658
+ corresponding OS/2 CodePage range bits.
659
+ This is a direct translation of FontForge implementation:
660
+ https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
661
+ """
662
+ bits = set()
663
+ hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
664
+ hasLineart = ord("┤") in unicodes
665
+
666
+ for uni in unicodes:
667
+ if uni == ord("Þ") and hasAscii:
668
+ bits.add(0) # Latin 1
669
+ elif uni == ord("Ľ") and hasAscii:
670
+ bits.add(1) # Latin 2: Eastern Europe
671
+ if hasLineart:
672
+ bits.add(58) # Latin 2
673
+ elif uni == ord("Б"):
674
+ bits.add(2) # Cyrillic
675
+ if ord("Ѕ") in unicodes and hasLineart:
676
+ bits.add(57) # IBM Cyrillic
677
+ if ord("╜") in unicodes and hasLineart:
678
+ bits.add(49) # MS-DOS Russian
679
+ elif uni == ord("Ά"):
680
+ bits.add(3) # Greek
681
+ if hasLineart and ord("½") in unicodes:
682
+ bits.add(48) # IBM Greek
683
+ if hasLineart and ord("√") in unicodes:
684
+ bits.add(60) # Greek, former 437 G
685
+ elif uni == ord("İ") and hasAscii:
686
+ bits.add(4) # Turkish
687
+ if hasLineart:
688
+ bits.add(56) # IBM turkish
689
+ elif uni == ord("א"):
690
+ bits.add(5) # Hebrew
691
+ if hasLineart and ord("√") in unicodes:
692
+ bits.add(53) # Hebrew
693
+ elif uni == ord("ر"):
694
+ bits.add(6) # Arabic
695
+ if ord("√") in unicodes:
696
+ bits.add(51) # Arabic
697
+ if hasLineart:
698
+ bits.add(61) # Arabic; ASMO 708
699
+ elif uni == ord("ŗ") and hasAscii:
700
+ bits.add(7) # Windows Baltic
701
+ if hasLineart:
702
+ bits.add(59) # MS-DOS Baltic
703
+ elif uni == ord("₫") and hasAscii:
704
+ bits.add(8) # Vietnamese
705
+ elif uni == ord("ๅ"):
706
+ bits.add(16) # Thai
707
+ elif uni == ord("エ"):
708
+ bits.add(17) # JIS/Japan
709
+ elif uni == ord("ㄅ"):
710
+ bits.add(18) # Chinese: Simplified
711
+ elif uni == ord("ㄱ"):
712
+ bits.add(19) # Korean wansung
713
+ elif uni == ord("央"):
714
+ bits.add(20) # Chinese: Traditional
715
+ elif uni == ord("곴"):
716
+ bits.add(21) # Korean Johab
717
+ elif uni == ord("♥") and hasAscii:
718
+ bits.add(30) # OEM Character Set
719
+ # TODO: Symbol bit has a special meaning (check the spec), we need
720
+ # to confirm if this is wanted by default.
721
+ # elif chr(0xF000) <= char <= chr(0xF0FF):
722
+ # codepageRanges.add(31) # Symbol Character Set
723
+ elif uni == ord("þ") and hasAscii and hasLineart:
724
+ bits.add(54) # MS-DOS Icelandic
725
+ elif uni == ord("╚") and hasAscii:
726
+ bits.add(62) # WE/Latin 1
727
+ bits.add(63) # US
728
+ elif hasAscii and hasLineart and ord("√") in unicodes:
729
+ if uni == ord("Å"):
730
+ bits.add(50) # MS-DOS Nordic
731
+ elif uni == ord("é"):
732
+ bits.add(52) # MS-DOS Canadian French
733
+ elif uni == ord("õ"):
734
+ bits.add(55) # MS-DOS Portuguese
735
+
736
+ if hasAscii and ord("‰") in unicodes and ord("∑") in unicodes:
737
+ bits.add(29) # Macintosh Character Set (US Roman)
738
+
739
+ return bits
740
+
741
+
742
+ if __name__ == "__main__":
743
+ import doctest, sys
744
+
745
+ sys.exit(doctest.testmod().failed)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_S_T_A_T_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/S_V_G_.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compiles/decompiles SVG table.
2
+
3
+ https://docs.microsoft.com/en-us/typography/opentype/spec/svg
4
+
5
+ The XML format is:
6
+
7
+ .. code-block:: xml
8
+
9
+ <SVG>
10
+ <svgDoc endGlyphID="1" startGlyphID="1">
11
+ <![CDATA[ <complete SVG doc> ]]
12
+ </svgDoc>
13
+ ...
14
+ <svgDoc endGlyphID="n" startGlyphID="m">
15
+ <![CDATA[ <complete SVG doc> ]]
16
+ </svgDoc>
17
+ </SVG>
18
+ """
19
+
20
+ from fontTools.misc.textTools import bytesjoin, safeEval, strjoin, tobytes, tostr
21
+ from fontTools.misc import sstruct
22
+ from . import DefaultTable
23
+ from collections.abc import Sequence
24
+ from dataclasses import dataclass, astuple
25
+ from io import BytesIO
26
+ import struct
27
+ import logging
28
+
29
+
30
+ log = logging.getLogger(__name__)
31
+
32
+
33
+ SVG_format_0 = """
34
+ > # big endian
35
+ version: H
36
+ offsetToSVGDocIndex: L
37
+ reserved: L
38
+ """
39
+
40
+ SVG_format_0Size = sstruct.calcsize(SVG_format_0)
41
+
42
+ doc_index_entry_format_0 = """
43
+ > # big endian
44
+ startGlyphID: H
45
+ endGlyphID: H
46
+ svgDocOffset: L
47
+ svgDocLength: L
48
+ """
49
+
50
+ doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
51
+
52
+
53
+ class table_S_V_G_(DefaultTable.DefaultTable):
54
+ def decompile(self, data, ttFont):
55
+ self.docList = []
56
+ # Version 0 is the standardized version of the table; and current.
57
+ # https://www.microsoft.com/typography/otspec/svg.htm
58
+ sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
59
+ if self.version != 0:
60
+ log.warning(
61
+ "Unknown SVG table version '%s'. Decompiling as version 0.",
62
+ self.version,
63
+ )
64
+ # read in SVG Documents Index
65
+ # data starts with the first entry of the entry list.
66
+ pos = subTableStart = self.offsetToSVGDocIndex
67
+ self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0]
68
+ pos += 2
69
+ if self.numEntries > 0:
70
+ data2 = data[pos:]
71
+ entries = []
72
+ for i in range(self.numEntries):
73
+ record_data = data2[
74
+ i
75
+ * doc_index_entry_format_0Size : (i + 1)
76
+ * doc_index_entry_format_0Size
77
+ ]
78
+ docIndexEntry = sstruct.unpack(
79
+ doc_index_entry_format_0, record_data, DocumentIndexEntry()
80
+ )
81
+ entries.append(docIndexEntry)
82
+
83
+ for entry in entries:
84
+ start = entry.svgDocOffset + subTableStart
85
+ end = start + entry.svgDocLength
86
+ doc = data[start:end]
87
+ compressed = False
88
+ if doc.startswith(b"\x1f\x8b"):
89
+ import gzip
90
+
91
+ bytesIO = BytesIO(doc)
92
+ with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
93
+ doc = gunzipper.read()
94
+ del bytesIO
95
+ compressed = True
96
+ doc = tostr(doc, "utf_8")
97
+ self.docList.append(
98
+ SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
99
+ )
100
+
101
+ def compile(self, ttFont):
102
+ version = 0
103
+ offsetToSVGDocIndex = (
104
+ SVG_format_0Size # I start the SVGDocIndex right after the header.
105
+ )
106
+ # get SGVDoc info.
107
+ docList = []
108
+ entryList = []
109
+ numEntries = len(self.docList)
110
+ datum = struct.pack(">H", numEntries)
111
+ entryList.append(datum)
112
+ curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
113
+ seenDocs = {}
114
+ allCompressed = getattr(self, "compressed", False)
115
+ for i, doc in enumerate(self.docList):
116
+ if isinstance(doc, (list, tuple)):
117
+ doc = SVGDocument(*doc)
118
+ self.docList[i] = doc
119
+ docBytes = tobytes(doc.data, encoding="utf_8")
120
+ if (allCompressed or doc.compressed) and not docBytes.startswith(
121
+ b"\x1f\x8b"
122
+ ):
123
+ import gzip
124
+
125
+ bytesIO = BytesIO()
126
+ # mtime=0 strips the useless timestamp and makes gzip output reproducible;
127
+ # equivalent to `gzip -n`
128
+ with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
129
+ gzipper.write(docBytes)
130
+ gzipped = bytesIO.getvalue()
131
+ if len(gzipped) < len(docBytes):
132
+ docBytes = gzipped
133
+ del gzipped, bytesIO
134
+ docLength = len(docBytes)
135
+ if docBytes in seenDocs:
136
+ docOffset = seenDocs[docBytes]
137
+ else:
138
+ docOffset = curOffset
139
+ curOffset += docLength
140
+ seenDocs[docBytes] = docOffset
141
+ docList.append(docBytes)
142
+ entry = struct.pack(
143
+ ">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength
144
+ )
145
+ entryList.append(entry)
146
+ entryList.extend(docList)
147
+ svgDocData = bytesjoin(entryList)
148
+
149
+ reserved = 0
150
+ header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
151
+ data = [header, svgDocData]
152
+ data = bytesjoin(data)
153
+ return data
154
+
155
+ def toXML(self, writer, ttFont):
156
+ for i, doc in enumerate(self.docList):
157
+ if isinstance(doc, (list, tuple)):
158
+ doc = SVGDocument(*doc)
159
+ self.docList[i] = doc
160
+ attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
161
+ if doc.compressed:
162
+ attrs["compressed"] = 1
163
+ writer.begintag("svgDoc", **attrs)
164
+ writer.newline()
165
+ writer.writecdata(doc.data)
166
+ writer.newline()
167
+ writer.endtag("svgDoc")
168
+ writer.newline()
169
+
170
+ def fromXML(self, name, attrs, content, ttFont):
171
+ if name == "svgDoc":
172
+ if not hasattr(self, "docList"):
173
+ self.docList = []
174
+ doc = strjoin(content)
175
+ doc = doc.strip()
176
+ startGID = int(attrs["startGlyphID"])
177
+ endGID = int(attrs["endGlyphID"])
178
+ compressed = bool(safeEval(attrs.get("compressed", "0")))
179
+ self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
180
+ else:
181
+ log.warning("Unknown %s %s", name, content)
182
+
183
+
184
+ class DocumentIndexEntry(object):
185
+ def __init__(self):
186
+ self.startGlyphID = None # USHORT
187
+ self.endGlyphID = None # USHORT
188
+ self.svgDocOffset = None # ULONG
189
+ self.svgDocLength = None # ULONG
190
+
191
+ def __repr__(self):
192
+ return (
193
+ "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s"
194
+ % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
195
+ )
196
+
197
+
198
+ @dataclass
199
+ class SVGDocument(Sequence):
200
+ data: str
201
+ startGlyphID: int
202
+ endGlyphID: int
203
+ compressed: bool = False
204
+
205
+ # Previously, the SVG table's docList attribute contained a lists of 3 items:
206
+ # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
207
+ # For backward compatibility with code that depends of them being sequences of
208
+ # fixed length=3, we subclass the Sequence abstract base class and pretend only
209
+ # the first three items are present. 'compressed' is only accessible via named
210
+ # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
211
+ def __getitem__(self, index):
212
+ return astuple(self)[:3][index]
213
+
214
+ def __len__(self):
215
+ return 3
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/S__i_l_f.py ADDED
@@ -0,0 +1,1037 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.fixedTools import floatToFixedToStr
3
+ from fontTools.misc.textTools import byteord, safeEval
4
+
5
+ # from itertools import *
6
+ from . import DefaultTable
7
+ from . import grUtils
8
+ from array import array
9
+ from functools import reduce
10
+ import struct, re, sys
11
+
12
+ Silf_hdr_format = """
13
+ >
14
+ version: 16.16F
15
+ """
16
+
17
+ Silf_hdr_format_3 = """
18
+ >
19
+ version: 16.16F
20
+ compilerVersion: L
21
+ numSilf: H
22
+ x
23
+ x
24
+ """
25
+
26
+ Silf_part1_format_v3 = """
27
+ >
28
+ ruleVersion: 16.16F
29
+ passOffset: H
30
+ pseudosOffset: H
31
+ """
32
+
33
+ Silf_part1_format = """
34
+ >
35
+ maxGlyphID: H
36
+ extraAscent: h
37
+ extraDescent: h
38
+ numPasses: B
39
+ iSubst: B
40
+ iPos: B
41
+ iJust: B
42
+ iBidi: B
43
+ flags: B
44
+ maxPreContext: B
45
+ maxPostContext: B
46
+ attrPseudo: B
47
+ attrBreakWeight: B
48
+ attrDirectionality: B
49
+ attrMirroring: B
50
+ attrSkipPasses: B
51
+ numJLevels: B
52
+ """
53
+
54
+ Silf_justify_format = """
55
+ >
56
+ attrStretch: B
57
+ attrShrink: B
58
+ attrStep: B
59
+ attrWeight: B
60
+ runto: B
61
+ x
62
+ x
63
+ x
64
+ """
65
+
66
+ Silf_part2_format = """
67
+ >
68
+ numLigComp: H
69
+ numUserDefn: B
70
+ maxCompPerLig: B
71
+ direction: B
72
+ attCollisions: B
73
+ x
74
+ x
75
+ x
76
+ numCritFeatures: B
77
+ """
78
+
79
+ Silf_pseudomap_format = """
80
+ >
81
+ unicode: L
82
+ nPseudo: H
83
+ """
84
+
85
+ Silf_pseudomap_format_h = """
86
+ >
87
+ unicode: H
88
+ nPseudo: H
89
+ """
90
+
91
+ Silf_classmap_format = """
92
+ >
93
+ numClass: H
94
+ numLinear: H
95
+ """
96
+
97
+ Silf_lookupclass_format = """
98
+ >
99
+ numIDs: H
100
+ searchRange: H
101
+ entrySelector: H
102
+ rangeShift: H
103
+ """
104
+
105
+ Silf_lookuppair_format = """
106
+ >
107
+ glyphId: H
108
+ index: H
109
+ """
110
+
111
+ Silf_pass_format = """
112
+ >
113
+ flags: B
114
+ maxRuleLoop: B
115
+ maxRuleContext: B
116
+ maxBackup: B
117
+ numRules: H
118
+ fsmOffset: H
119
+ pcCode: L
120
+ rcCode: L
121
+ aCode: L
122
+ oDebug: L
123
+ numRows: H
124
+ numTransitional: H
125
+ numSuccess: H
126
+ numColumns: H
127
+ """
128
+
129
+ aCode_info = (
130
+ ("NOP", 0),
131
+ ("PUSH_BYTE", "b"),
132
+ ("PUSH_BYTE_U", "B"),
133
+ ("PUSH_SHORT", ">h"),
134
+ ("PUSH_SHORT_U", ">H"),
135
+ ("PUSH_LONG", ">L"),
136
+ ("ADD", 0),
137
+ ("SUB", 0),
138
+ ("MUL", 0),
139
+ ("DIV", 0),
140
+ ("MIN", 0),
141
+ ("MAX", 0),
142
+ ("NEG", 0),
143
+ ("TRUNC8", 0),
144
+ ("TRUNC16", 0),
145
+ ("COND", 0),
146
+ ("AND", 0), # x10
147
+ ("OR", 0),
148
+ ("NOT", 0),
149
+ ("EQUAL", 0),
150
+ ("NOT_EQ", 0),
151
+ ("LESS", 0),
152
+ ("GTR", 0),
153
+ ("LESS_EQ", 0),
154
+ ("GTR_EQ", 0),
155
+ ("NEXT", 0),
156
+ ("NEXT_N", "b"),
157
+ ("COPY_NEXT", 0),
158
+ ("PUT_GLYPH_8BIT_OBS", "B"),
159
+ ("PUT_SUBS_8BIT_OBS", "bBB"),
160
+ ("PUT_COPY", "b"),
161
+ ("INSERT", 0),
162
+ ("DELETE", 0), # x20
163
+ ("ASSOC", -1),
164
+ ("CNTXT_ITEM", "bB"),
165
+ ("ATTR_SET", "B"),
166
+ ("ATTR_ADD", "B"),
167
+ ("ATTR_SUB", "B"),
168
+ ("ATTR_SET_SLOT", "B"),
169
+ ("IATTR_SET_SLOT", "BB"),
170
+ ("PUSH_SLOT_ATTR", "Bb"),
171
+ ("PUSH_GLYPH_ATTR_OBS", "Bb"),
172
+ ("PUSH_GLYPH_METRIC", "Bbb"),
173
+ ("PUSH_FEAT", "Bb"),
174
+ ("PUSH_ATT_TO_GATTR_OBS", "Bb"),
175
+ ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
176
+ ("PUSH_ISLOT_ATTR", "Bbb"),
177
+ ("PUSH_IGLYPH_ATTR", "Bbb"),
178
+ ("POP_RET", 0), # x30
179
+ ("RET_ZERO", 0),
180
+ ("RET_TRUE", 0),
181
+ ("IATTR_SET", "BB"),
182
+ ("IATTR_ADD", "BB"),
183
+ ("IATTR_SUB", "BB"),
184
+ ("PUSH_PROC_STATE", "B"),
185
+ ("PUSH_VERSION", 0),
186
+ ("PUT_SUBS", ">bHH"),
187
+ ("PUT_SUBS2", 0),
188
+ ("PUT_SUBS3", 0),
189
+ ("PUT_GLYPH", ">H"),
190
+ ("PUSH_GLYPH_ATTR", ">Hb"),
191
+ ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
192
+ ("BITOR", 0),
193
+ ("BITAND", 0),
194
+ ("BITNOT", 0), # x40
195
+ ("BITSET", ">HH"),
196
+ ("SET_FEAT", "Bb"),
197
+ )
198
+ aCode_map = dict([(x[0], (i, x[1])) for i, x in enumerate(aCode_info)])
199
+
200
+
201
+ def disassemble(aCode):
202
+ codelen = len(aCode)
203
+ pc = 0
204
+ res = []
205
+ while pc < codelen:
206
+ opcode = byteord(aCode[pc : pc + 1])
207
+ if opcode > len(aCode_info):
208
+ instr = aCode_info[0]
209
+ else:
210
+ instr = aCode_info[opcode]
211
+ pc += 1
212
+ if instr[1] != 0 and pc >= codelen:
213
+ return res
214
+ if instr[1] == -1:
215
+ count = byteord(aCode[pc])
216
+ fmt = "%dB" % count
217
+ pc += 1
218
+ elif instr[1] == 0:
219
+ fmt = ""
220
+ else:
221
+ fmt = instr[1]
222
+ if fmt == "":
223
+ res.append(instr[0])
224
+ continue
225
+ parms = struct.unpack_from(fmt, aCode[pc:])
226
+ res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")")
227
+ pc += struct.calcsize(fmt)
228
+ return res
229
+
230
+
231
+ instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
232
+
233
+
234
+ def assemble(instrs):
235
+ res = b""
236
+ for inst in instrs:
237
+ m = instre.match(inst)
238
+ if not m or not m.group(1) in aCode_map:
239
+ continue
240
+ opcode, parmfmt = aCode_map[m.group(1)]
241
+ res += struct.pack("B", opcode)
242
+ if m.group(2):
243
+ if parmfmt == 0:
244
+ continue
245
+ parms = [int(x) for x in re.split(r",\s*", m.group(2))]
246
+ if parmfmt == -1:
247
+ l = len(parms)
248
+ res += struct.pack(("%dB" % (l + 1)), l, *parms)
249
+ else:
250
+ res += struct.pack(parmfmt, *parms)
251
+ return res
252
+
253
+
254
+ def writecode(tag, writer, instrs):
255
+ writer.begintag(tag)
256
+ writer.newline()
257
+ for l in disassemble(instrs):
258
+ writer.write(l)
259
+ writer.newline()
260
+ writer.endtag(tag)
261
+ writer.newline()
262
+
263
+
264
+ def readcode(content):
265
+ res = []
266
+ for e in content_string(content).split("\n"):
267
+ e = e.strip()
268
+ if not len(e):
269
+ continue
270
+ res.append(e)
271
+ return assemble(res)
272
+
273
+
274
+ attrs_info = (
275
+ "flags",
276
+ "extraAscent",
277
+ "extraDescent",
278
+ "maxGlyphID",
279
+ "numLigComp",
280
+ "numUserDefn",
281
+ "maxCompPerLig",
282
+ "direction",
283
+ "lbGID",
284
+ )
285
+ attrs_passindexes = ("iSubst", "iPos", "iJust", "iBidi")
286
+ attrs_contexts = ("maxPreContext", "maxPostContext")
287
+ attrs_attributes = (
288
+ "attrPseudo",
289
+ "attrBreakWeight",
290
+ "attrDirectionality",
291
+ "attrMirroring",
292
+ "attrSkipPasses",
293
+ "attCollisions",
294
+ )
295
+ pass_attrs_info = (
296
+ "flags",
297
+ "maxRuleLoop",
298
+ "maxRuleContext",
299
+ "maxBackup",
300
+ "minRulePreContext",
301
+ "maxRulePreContext",
302
+ "collisionThreshold",
303
+ )
304
+ pass_attrs_fsm = ("numRows", "numTransitional", "numSuccess", "numColumns")
305
+
306
+
307
+ def writesimple(tag, self, writer, *attrkeys):
308
+ attrs = dict([(k, getattr(self, k)) for k in attrkeys])
309
+ writer.simpletag(tag, **attrs)
310
+ writer.newline()
311
+
312
+
313
+ def getSimple(self, attrs, *attr_list):
314
+ for k in attr_list:
315
+ if k in attrs:
316
+ setattr(self, k, int(safeEval(attrs[k])))
317
+
318
+
319
+ def content_string(contents):
320
+ res = ""
321
+ for element in contents:
322
+ if isinstance(element, tuple):
323
+ continue
324
+ res += element
325
+ return res.strip()
326
+
327
+
328
+ def wrapline(writer, dat, length=80):
329
+ currline = ""
330
+ for d in dat:
331
+ if len(currline) > length:
332
+ writer.write(currline[:-1])
333
+ writer.newline()
334
+ currline = ""
335
+ currline += d + " "
336
+ if len(currline):
337
+ writer.write(currline[:-1])
338
+ writer.newline()
339
+
340
+
341
+ class _Object:
342
+ pass
343
+
344
+
345
+ class table_S__i_l_f(DefaultTable.DefaultTable):
346
+ """Silf table support"""
347
+
348
+ def __init__(self, tag=None):
349
+ DefaultTable.DefaultTable.__init__(self, tag)
350
+ self.silfs = []
351
+
352
+ def decompile(self, data, ttFont):
353
+ sstruct.unpack2(Silf_hdr_format, data, self)
354
+ self.version = float(floatToFixedToStr(self.version, precisionBits=16))
355
+ if self.version >= 5.0:
356
+ (data, self.scheme) = grUtils.decompress(data)
357
+ sstruct.unpack2(Silf_hdr_format_3, data, self)
358
+ base = sstruct.calcsize(Silf_hdr_format_3)
359
+ elif self.version < 3.0:
360
+ self.numSilf = struct.unpack(">H", data[4:6])
361
+ self.scheme = 0
362
+ self.compilerVersion = 0
363
+ base = 8
364
+ else:
365
+ self.scheme = 0
366
+ sstruct.unpack2(Silf_hdr_format_3, data, self)
367
+ base = sstruct.calcsize(Silf_hdr_format_3)
368
+
369
+ silfoffsets = struct.unpack_from((">%dL" % self.numSilf), data[base:])
370
+ for offset in silfoffsets:
371
+ s = Silf()
372
+ self.silfs.append(s)
373
+ s.decompile(data[offset:], ttFont, self.version)
374
+
375
+ def compile(self, ttFont):
376
+ self.numSilf = len(self.silfs)
377
+ if self.version < 3.0:
378
+ hdr = sstruct.pack(Silf_hdr_format, self)
379
+ hdr += struct.pack(">HH", self.numSilf, 0)
380
+ else:
381
+ hdr = sstruct.pack(Silf_hdr_format_3, self)
382
+ offset = len(hdr) + 4 * self.numSilf
383
+ data = b""
384
+ for s in self.silfs:
385
+ hdr += struct.pack(">L", offset)
386
+ subdata = s.compile(ttFont, self.version)
387
+ offset += len(subdata)
388
+ data += subdata
389
+ if self.version >= 5.0:
390
+ return grUtils.compress(self.scheme, hdr + data)
391
+ return hdr + data
392
+
393
+ def toXML(self, writer, ttFont):
394
+ writer.comment("Attributes starting with _ are informative only")
395
+ writer.newline()
396
+ writer.simpletag(
397
+ "version",
398
+ version=self.version,
399
+ compilerVersion=self.compilerVersion,
400
+ compressionScheme=self.scheme,
401
+ )
402
+ writer.newline()
403
+ for s in self.silfs:
404
+ writer.begintag("silf")
405
+ writer.newline()
406
+ s.toXML(writer, ttFont, self.version)
407
+ writer.endtag("silf")
408
+ writer.newline()
409
+
410
+ def fromXML(self, name, attrs, content, ttFont):
411
+ if name == "version":
412
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
413
+ self.version = float(safeEval(attrs["version"]))
414
+ self.compilerVersion = int(safeEval(attrs["compilerVersion"]))
415
+ return
416
+ if name == "silf":
417
+ s = Silf()
418
+ self.silfs.append(s)
419
+ for element in content:
420
+ if not isinstance(element, tuple):
421
+ continue
422
+ tag, attrs, subcontent = element
423
+ s.fromXML(tag, attrs, subcontent, ttFont, self.version)
424
+
425
+
426
+ class Silf(object):
427
+ """A particular Silf subtable"""
428
+
429
+ def __init__(self):
430
+ self.passes = []
431
+ self.scriptTags = []
432
+ self.critFeatures = []
433
+ self.jLevels = []
434
+ self.pMap = {}
435
+
436
+ def decompile(self, data, ttFont, version=2.0):
437
+ if version >= 3.0:
438
+ _, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
439
+ self.ruleVersion = float(
440
+ floatToFixedToStr(self.ruleVersion, precisionBits=16)
441
+ )
442
+ _, data = sstruct.unpack2(Silf_part1_format, data, self)
443
+ for jlevel in range(self.numJLevels):
444
+ j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
445
+ self.jLevels.append(j)
446
+ _, data = sstruct.unpack2(Silf_part2_format, data, self)
447
+ if self.numCritFeatures:
448
+ self.critFeatures = struct.unpack_from(
449
+ (">%dH" % self.numCritFeatures), data
450
+ )
451
+ data = data[self.numCritFeatures * 2 + 1 :]
452
+ (numScriptTag,) = struct.unpack_from("B", data)
453
+ if numScriptTag:
454
+ self.scriptTags = [
455
+ struct.unpack("4s", data[x : x + 4])[0].decode("ascii")
456
+ for x in range(1, 1 + 4 * numScriptTag, 4)
457
+ ]
458
+ data = data[1 + 4 * numScriptTag :]
459
+ (self.lbGID,) = struct.unpack(">H", data[:2])
460
+ if self.numPasses:
461
+ self.oPasses = struct.unpack(
462
+ (">%dL" % (self.numPasses + 1)), data[2 : 6 + 4 * self.numPasses]
463
+ )
464
+ data = data[6 + 4 * self.numPasses :]
465
+ (numPseudo,) = struct.unpack(">H", data[:2])
466
+ for i in range(numPseudo):
467
+ if version >= 3.0:
468
+ pseudo = sstruct.unpack(
469
+ Silf_pseudomap_format, data[8 + 6 * i : 14 + 6 * i], _Object()
470
+ )
471
+ else:
472
+ pseudo = sstruct.unpack(
473
+ Silf_pseudomap_format_h, data[8 + 4 * i : 12 + 4 * i], _Object()
474
+ )
475
+ self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
476
+ data = data[8 + 6 * numPseudo :]
477
+ currpos = (
478
+ sstruct.calcsize(Silf_part1_format)
479
+ + sstruct.calcsize(Silf_justify_format) * self.numJLevels
480
+ + sstruct.calcsize(Silf_part2_format)
481
+ + 2 * self.numCritFeatures
482
+ + 1
483
+ + 1
484
+ + 4 * numScriptTag
485
+ + 6
486
+ + 4 * self.numPasses
487
+ + 8
488
+ + 6 * numPseudo
489
+ )
490
+ if version >= 3.0:
491
+ currpos += sstruct.calcsize(Silf_part1_format_v3)
492
+ self.classes = Classes()
493
+ self.classes.decompile(data, ttFont, version)
494
+ for i in range(self.numPasses):
495
+ p = Pass()
496
+ self.passes.append(p)
497
+ p.decompile(
498
+ data[self.oPasses[i] - currpos : self.oPasses[i + 1] - currpos],
499
+ ttFont,
500
+ version,
501
+ )
502
+
503
+ def compile(self, ttFont, version=2.0):
504
+ self.numPasses = len(self.passes)
505
+ self.numJLevels = len(self.jLevels)
506
+ self.numCritFeatures = len(self.critFeatures)
507
+ numPseudo = len(self.pMap)
508
+ data = b""
509
+ if version >= 3.0:
510
+ hdroffset = sstruct.calcsize(Silf_part1_format_v3)
511
+ else:
512
+ hdroffset = 0
513
+ data += sstruct.pack(Silf_part1_format, self)
514
+ for j in self.jLevels:
515
+ data += sstruct.pack(Silf_justify_format, j)
516
+ data += sstruct.pack(Silf_part2_format, self)
517
+ if self.numCritFeatures:
518
+ data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures)
519
+ data += struct.pack("BB", 0, len(self.scriptTags))
520
+ if len(self.scriptTags):
521
+ tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags]
522
+ data += b"".join(tdata)
523
+ data += struct.pack(">H", self.lbGID)
524
+ self.passOffset = len(data)
525
+
526
+ data1 = grUtils.bininfo(numPseudo, 6)
527
+ currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
528
+ self.pseudosOffset = currpos + len(data1)
529
+ for u, p in sorted(self.pMap.items()):
530
+ data1 += struct.pack(
531
+ (">LH" if version >= 3.0 else ">HH"), u, ttFont.getGlyphID(p)
532
+ )
533
+ data1 += self.classes.compile(ttFont, version)
534
+ currpos += len(data1)
535
+ data2 = b""
536
+ datao = b""
537
+ for i, p in enumerate(self.passes):
538
+ base = currpos + len(data2)
539
+ datao += struct.pack(">L", base)
540
+ data2 += p.compile(ttFont, base, version)
541
+ datao += struct.pack(">L", currpos + len(data2))
542
+
543
+ if version >= 3.0:
544
+ data3 = sstruct.pack(Silf_part1_format_v3, self)
545
+ else:
546
+ data3 = b""
547
+ return data3 + data + datao + data1 + data2
548
+
549
+ def toXML(self, writer, ttFont, version=2.0):
550
+ if version >= 3.0:
551
+ writer.simpletag("version", ruleVersion=self.ruleVersion)
552
+ writer.newline()
553
+ writesimple("info", self, writer, *attrs_info)
554
+ writesimple("passindexes", self, writer, *attrs_passindexes)
555
+ writesimple("contexts", self, writer, *attrs_contexts)
556
+ writesimple("attributes", self, writer, *attrs_attributes)
557
+ if len(self.jLevels):
558
+ writer.begintag("justifications")
559
+ writer.newline()
560
+ jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
561
+ for i, j in enumerate(self.jLevels):
562
+ attrs = dict([(k, getattr(j, k)) for k in jnames])
563
+ writer.simpletag("justify", **attrs)
564
+ writer.newline()
565
+ writer.endtag("justifications")
566
+ writer.newline()
567
+ if len(self.critFeatures):
568
+ writer.begintag("critFeatures")
569
+ writer.newline()
570
+ writer.write(" ".join(map(str, self.critFeatures)))
571
+ writer.newline()
572
+ writer.endtag("critFeatures")
573
+ writer.newline()
574
+ if len(self.scriptTags):
575
+ writer.begintag("scriptTags")
576
+ writer.newline()
577
+ writer.write(" ".join(self.scriptTags))
578
+ writer.newline()
579
+ writer.endtag("scriptTags")
580
+ writer.newline()
581
+ if self.pMap:
582
+ writer.begintag("pseudoMap")
583
+ writer.newline()
584
+ for k, v in sorted(self.pMap.items()):
585
+ writer.simpletag("pseudo", unicode=hex(k), pseudo=v)
586
+ writer.newline()
587
+ writer.endtag("pseudoMap")
588
+ writer.newline()
589
+ self.classes.toXML(writer, ttFont, version)
590
+ if len(self.passes):
591
+ writer.begintag("passes")
592
+ writer.newline()
593
+ for i, p in enumerate(self.passes):
594
+ writer.begintag("pass", _index=i)
595
+ writer.newline()
596
+ p.toXML(writer, ttFont, version)
597
+ writer.endtag("pass")
598
+ writer.newline()
599
+ writer.endtag("passes")
600
+ writer.newline()
601
+
602
+ def fromXML(self, name, attrs, content, ttFont, version=2.0):
603
+ if name == "version":
604
+ self.ruleVersion = float(safeEval(attrs.get("ruleVersion", "0")))
605
+ if name == "info":
606
+ getSimple(self, attrs, *attrs_info)
607
+ elif name == "passindexes":
608
+ getSimple(self, attrs, *attrs_passindexes)
609
+ elif name == "contexts":
610
+ getSimple(self, attrs, *attrs_contexts)
611
+ elif name == "attributes":
612
+ getSimple(self, attrs, *attrs_attributes)
613
+ elif name == "justifications":
614
+ for element in content:
615
+ if not isinstance(element, tuple):
616
+ continue
617
+ (tag, attrs, subcontent) = element
618
+ if tag == "justify":
619
+ j = _Object()
620
+ for k, v in attrs.items():
621
+ setattr(j, k, int(v))
622
+ self.jLevels.append(j)
623
+ elif name == "critFeatures":
624
+ self.critFeatures = []
625
+ element = content_string(content)
626
+ self.critFeatures.extend(map(int, element.split()))
627
+ elif name == "scriptTags":
628
+ self.scriptTags = []
629
+ element = content_string(content)
630
+ for n in element.split():
631
+ self.scriptTags.append(n)
632
+ elif name == "pseudoMap":
633
+ self.pMap = {}
634
+ for element in content:
635
+ if not isinstance(element, tuple):
636
+ continue
637
+ (tag, attrs, subcontent) = element
638
+ if tag == "pseudo":
639
+ k = int(attrs["unicode"], 16)
640
+ v = attrs["pseudo"]
641
+ self.pMap[k] = v
642
+ elif name == "classes":
643
+ self.classes = Classes()
644
+ for element in content:
645
+ if not isinstance(element, tuple):
646
+ continue
647
+ tag, attrs, subcontent = element
648
+ self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
649
+ elif name == "passes":
650
+ for element in content:
651
+ if not isinstance(element, tuple):
652
+ continue
653
+ tag, attrs, subcontent = element
654
+ if tag == "pass":
655
+ p = Pass()
656
+ for e in subcontent:
657
+ if not isinstance(e, tuple):
658
+ continue
659
+ p.fromXML(e[0], e[1], e[2], ttFont, version)
660
+ self.passes.append(p)
661
+
662
+
663
+ class Classes(object):
664
+ def __init__(self):
665
+ self.linear = []
666
+ self.nonLinear = []
667
+
668
+ def decompile(self, data, ttFont, version=2.0):
669
+ sstruct.unpack2(Silf_classmap_format, data, self)
670
+ if version >= 4.0:
671
+ oClasses = struct.unpack(
672
+ (">%dL" % (self.numClass + 1)), data[4 : 8 + 4 * self.numClass]
673
+ )
674
+ else:
675
+ oClasses = struct.unpack(
676
+ (">%dH" % (self.numClass + 1)), data[4 : 6 + 2 * self.numClass]
677
+ )
678
+ for s, e in zip(oClasses[: self.numLinear], oClasses[1 : self.numLinear + 1]):
679
+ self.linear.append(
680
+ ttFont.getGlyphName(x)
681
+ for x in struct.unpack((">%dH" % ((e - s) / 2)), data[s:e])
682
+ )
683
+ for s, e in zip(
684
+ oClasses[self.numLinear : self.numClass],
685
+ oClasses[self.numLinear + 1 : self.numClass + 1],
686
+ ):
687
+ nonLinids = [
688
+ struct.unpack(">HH", data[x : x + 4]) for x in range(s + 8, e, 4)
689
+ ]
690
+ nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
691
+ self.nonLinear.append(nonLin)
692
+
693
+ def compile(self, ttFont, version=2.0):
694
+ data = b""
695
+ oClasses = []
696
+ if version >= 4.0:
697
+ offset = 8 + 4 * (len(self.linear) + len(self.nonLinear))
698
+ else:
699
+ offset = 6 + 2 * (len(self.linear) + len(self.nonLinear))
700
+ for l in self.linear:
701
+ oClasses.append(len(data) + offset)
702
+ gs = [ttFont.getGlyphID(x) for x in l]
703
+ data += struct.pack((">%dH" % len(l)), *gs)
704
+ for l in self.nonLinear:
705
+ oClasses.append(len(data) + offset)
706
+ gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()]
707
+ data += grUtils.bininfo(len(gs))
708
+ data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)])
709
+ oClasses.append(len(data) + offset)
710
+ self.numClass = len(oClasses) - 1
711
+ self.numLinear = len(self.linear)
712
+ return (
713
+ sstruct.pack(Silf_classmap_format, self)
714
+ + struct.pack(
715
+ ((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), *oClasses
716
+ )
717
+ + data
718
+ )
719
+
720
+ def toXML(self, writer, ttFont, version=2.0):
721
+ writer.begintag("classes")
722
+ writer.newline()
723
+ writer.begintag("linearClasses")
724
+ writer.newline()
725
+ for i, l in enumerate(self.linear):
726
+ writer.begintag("linear", _index=i)
727
+ writer.newline()
728
+ wrapline(writer, l)
729
+ writer.endtag("linear")
730
+ writer.newline()
731
+ writer.endtag("linearClasses")
732
+ writer.newline()
733
+ writer.begintag("nonLinearClasses")
734
+ writer.newline()
735
+ for i, l in enumerate(self.nonLinear):
736
+ writer.begintag("nonLinear", _index=i + self.numLinear)
737
+ writer.newline()
738
+ for inp, ind in l.items():
739
+ writer.simpletag("map", glyph=inp, index=ind)
740
+ writer.newline()
741
+ writer.endtag("nonLinear")
742
+ writer.newline()
743
+ writer.endtag("nonLinearClasses")
744
+ writer.newline()
745
+ writer.endtag("classes")
746
+ writer.newline()
747
+
748
+ def fromXML(self, name, attrs, content, ttFont, version=2.0):
749
+ if name == "linearClasses":
750
+ for element in content:
751
+ if not isinstance(element, tuple):
752
+ continue
753
+ tag, attrs, subcontent = element
754
+ if tag == "linear":
755
+ l = content_string(subcontent).split()
756
+ self.linear.append(l)
757
+ elif name == "nonLinearClasses":
758
+ for element in content:
759
+ if not isinstance(element, tuple):
760
+ continue
761
+ tag, attrs, subcontent = element
762
+ if tag == "nonLinear":
763
+ l = {}
764
+ for e in subcontent:
765
+ if not isinstance(e, tuple):
766
+ continue
767
+ tag, attrs, subsubcontent = e
768
+ if tag == "map":
769
+ l[attrs["glyph"]] = int(safeEval(attrs["index"]))
770
+ self.nonLinear.append(l)
771
+
772
+
773
+ class Pass(object):
774
+ def __init__(self):
775
+ self.colMap = {}
776
+ self.rules = []
777
+ self.rulePreContexts = []
778
+ self.ruleSortKeys = []
779
+ self.ruleConstraints = []
780
+ self.passConstraints = b""
781
+ self.actions = []
782
+ self.stateTrans = []
783
+ self.startStates = []
784
+
785
+ def decompile(self, data, ttFont, version=2.0):
786
+ _, data = sstruct.unpack2(Silf_pass_format, data, self)
787
+ (numRange, _, _, _) = struct.unpack(">4H", data[:8])
788
+ data = data[8:]
789
+ for i in range(numRange):
790
+ (first, last, col) = struct.unpack(">3H", data[6 * i : 6 * i + 6])
791
+ for g in range(first, last + 1):
792
+ self.colMap[ttFont.getGlyphName(g)] = col
793
+ data = data[6 * numRange :]
794
+ oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
795
+ data = data[2 + 2 * self.numSuccess :]
796
+ rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
797
+ self.rules = [rules[s:e] for (s, e) in zip(oRuleMap, oRuleMap[1:])]
798
+ data = data[2 * oRuleMap[-1] :]
799
+ (self.minRulePreContext, self.maxRulePreContext) = struct.unpack("BB", data[:2])
800
+ numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
801
+ self.startStates = struct.unpack(
802
+ (">%dH" % numStartStates), data[2 : 2 + numStartStates * 2]
803
+ )
804
+ data = data[2 + numStartStates * 2 :]
805
+ self.ruleSortKeys = struct.unpack(
806
+ (">%dH" % self.numRules), data[: 2 * self.numRules]
807
+ )
808
+ data = data[2 * self.numRules :]
809
+ self.rulePreContexts = struct.unpack(
810
+ ("%dB" % self.numRules), data[: self.numRules]
811
+ )
812
+ data = data[self.numRules :]
813
+ (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
814
+ oConstraints = list(
815
+ struct.unpack(
816
+ (">%dH" % (self.numRules + 1)), data[3 : 5 + self.numRules * 2]
817
+ )
818
+ )
819
+ data = data[5 + self.numRules * 2 :]
820
+ oActions = list(
821
+ struct.unpack((">%dH" % (self.numRules + 1)), data[: 2 + self.numRules * 2])
822
+ )
823
+ data = data[2 * self.numRules + 2 :]
824
+ for i in range(self.numTransitional):
825
+ a = array(
826
+ "H", data[i * self.numColumns * 2 : (i + 1) * self.numColumns * 2]
827
+ )
828
+ if sys.byteorder != "big":
829
+ a.byteswap()
830
+ self.stateTrans.append(a)
831
+ data = data[self.numTransitional * self.numColumns * 2 + 1 :]
832
+ self.passConstraints = data[:pConstraint]
833
+ data = data[pConstraint:]
834
+ for i in range(len(oConstraints) - 2, -1, -1):
835
+ if oConstraints[i] == 0:
836
+ oConstraints[i] = oConstraints[i + 1]
837
+ self.ruleConstraints = [
838
+ (data[s:e] if (e - s > 1) else b"")
839
+ for (s, e) in zip(oConstraints, oConstraints[1:])
840
+ ]
841
+ data = data[oConstraints[-1] :]
842
+ self.actions = [
843
+ (data[s:e] if (e - s > 1) else "") for (s, e) in zip(oActions, oActions[1:])
844
+ ]
845
+ data = data[oActions[-1] :]
846
+ # not using debug
847
+
848
+ def compile(self, ttFont, base, version=2.0):
849
+ # build it all up backwards
850
+ oActions = reduce(
851
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.actions + [b""], (0, [])
852
+ )[1]
853
+ oConstraints = reduce(
854
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]),
855
+ self.ruleConstraints + [b""],
856
+ (1, []),
857
+ )[1]
858
+ constraintCode = b"\000" + b"".join(self.ruleConstraints)
859
+ transes = []
860
+ for t in self.stateTrans:
861
+ if sys.byteorder != "big":
862
+ t.byteswap()
863
+ transes.append(t.tobytes())
864
+ if sys.byteorder != "big":
865
+ t.byteswap()
866
+ if not len(transes):
867
+ self.startStates = [0]
868
+ oRuleMap = reduce(
869
+ lambda a, x: (a[0] + len(x), a[1] + [a[0]]), self.rules + [[]], (0, [])
870
+ )[1]
871
+ passRanges = []
872
+ gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
873
+ for e in grUtils.entries(gidcolmap, sameval=True):
874
+ if e[1]:
875
+ passRanges.append((e[0], e[0] + e[1] - 1, e[2][0]))
876
+ self.numRules = len(self.actions)
877
+ self.fsmOffset = (
878
+ sstruct.calcsize(Silf_pass_format)
879
+ + 8
880
+ + len(passRanges) * 6
881
+ + len(oRuleMap) * 2
882
+ + 2 * oRuleMap[-1]
883
+ + 2
884
+ + 2 * len(self.startStates)
885
+ + 3 * self.numRules
886
+ + 3
887
+ + 4 * self.numRules
888
+ + 4
889
+ )
890
+ self.pcCode = (
891
+ self.fsmOffset + 2 * self.numTransitional * self.numColumns + 1 + base
892
+ )
893
+ self.rcCode = self.pcCode + len(self.passConstraints)
894
+ self.aCode = self.rcCode + len(constraintCode)
895
+ self.oDebug = 0
896
+ # now generate output
897
+ data = sstruct.pack(Silf_pass_format, self)
898
+ data += grUtils.bininfo(len(passRanges), 6)
899
+ data += b"".join(struct.pack(">3H", *p) for p in passRanges)
900
+ data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
901
+ flatrules = reduce(lambda a, x: a + x, self.rules, [])
902
+ data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
903
+ data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
904
+ data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
905
+ data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
906
+ data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
907
+ data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
908
+ data += struct.pack((">%dH" % (self.numRules + 1)), *oConstraints)
909
+ data += struct.pack((">%dH" % (self.numRules + 1)), *oActions)
910
+ return (
911
+ data
912
+ + b"".join(transes)
913
+ + struct.pack("B", 0)
914
+ + self.passConstraints
915
+ + constraintCode
916
+ + b"".join(self.actions)
917
+ )
918
+
919
+ def toXML(self, writer, ttFont, version=2.0):
920
+ writesimple("info", self, writer, *pass_attrs_info)
921
+ writesimple("fsminfo", self, writer, *pass_attrs_fsm)
922
+ writer.begintag("colmap")
923
+ writer.newline()
924
+ wrapline(
925
+ writer,
926
+ [
927
+ "{}={}".format(*x)
928
+ for x in sorted(
929
+ self.colMap.items(), key=lambda x: ttFont.getGlyphID(x[0])
930
+ )
931
+ ],
932
+ )
933
+ writer.endtag("colmap")
934
+ writer.newline()
935
+ writer.begintag("staterulemap")
936
+ writer.newline()
937
+ for i, r in enumerate(self.rules):
938
+ writer.simpletag(
939
+ "state",
940
+ number=self.numRows - self.numSuccess + i,
941
+ rules=" ".join(map(str, r)),
942
+ )
943
+ writer.newline()
944
+ writer.endtag("staterulemap")
945
+ writer.newline()
946
+ writer.begintag("rules")
947
+ writer.newline()
948
+ for i in range(len(self.actions)):
949
+ writer.begintag(
950
+ "rule",
951
+ index=i,
952
+ precontext=self.rulePreContexts[i],
953
+ sortkey=self.ruleSortKeys[i],
954
+ )
955
+ writer.newline()
956
+ if len(self.ruleConstraints[i]):
957
+ writecode("constraint", writer, self.ruleConstraints[i])
958
+ writecode("action", writer, self.actions[i])
959
+ writer.endtag("rule")
960
+ writer.newline()
961
+ writer.endtag("rules")
962
+ writer.newline()
963
+ if len(self.passConstraints):
964
+ writecode("passConstraint", writer, self.passConstraints)
965
+ if len(self.stateTrans):
966
+ writer.begintag("fsm")
967
+ writer.newline()
968
+ writer.begintag("starts")
969
+ writer.write(" ".join(map(str, self.startStates)))
970
+ writer.endtag("starts")
971
+ writer.newline()
972
+ for i, s in enumerate(self.stateTrans):
973
+ writer.begintag("row", _i=i)
974
+ # no newlines here
975
+ writer.write(" ".join(map(str, s)))
976
+ writer.endtag("row")
977
+ writer.newline()
978
+ writer.endtag("fsm")
979
+ writer.newline()
980
+
981
+ def fromXML(self, name, attrs, content, ttFont, version=2.0):
982
+ if name == "info":
983
+ getSimple(self, attrs, *pass_attrs_info)
984
+ elif name == "fsminfo":
985
+ getSimple(self, attrs, *pass_attrs_fsm)
986
+ elif name == "colmap":
987
+ e = content_string(content)
988
+ for w in e.split():
989
+ x = w.split("=")
990
+ if len(x) != 2 or x[0] == "" or x[1] == "":
991
+ continue
992
+ self.colMap[x[0]] = int(x[1])
993
+ elif name == "staterulemap":
994
+ for e in content:
995
+ if not isinstance(e, tuple):
996
+ continue
997
+ tag, a, c = e
998
+ if tag == "state":
999
+ self.rules.append([int(x) for x in a["rules"].split(" ")])
1000
+ elif name == "rules":
1001
+ for element in content:
1002
+ if not isinstance(element, tuple):
1003
+ continue
1004
+ tag, a, c = element
1005
+ if tag != "rule":
1006
+ continue
1007
+ self.rulePreContexts.append(int(a["precontext"]))
1008
+ self.ruleSortKeys.append(int(a["sortkey"]))
1009
+ con = b""
1010
+ act = b""
1011
+ for e in c:
1012
+ if not isinstance(e, tuple):
1013
+ continue
1014
+ tag, a, subc = e
1015
+ if tag == "constraint":
1016
+ con = readcode(subc)
1017
+ elif tag == "action":
1018
+ act = readcode(subc)
1019
+ self.actions.append(act)
1020
+ self.ruleConstraints.append(con)
1021
+ elif name == "passConstraint":
1022
+ self.passConstraints = readcode(content)
1023
+ elif name == "fsm":
1024
+ for element in content:
1025
+ if not isinstance(element, tuple):
1026
+ continue
1027
+ tag, a, c = element
1028
+ if tag == "row":
1029
+ s = array("H")
1030
+ e = content_string(c)
1031
+ s.extend(map(int, e.split()))
1032
+ self.stateTrans.append(s)
1033
+ elif tag == "starts":
1034
+ s = []
1035
+ e = content_string(c)
1036
+ s.extend(map(int, e.split()))
1037
+ self.startStates = s
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_J_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .T_S_I_V_ import table_T_S_I_V_
2
+
3
+
4
+ class table_T_S_I_J_(table_T_S_I_V_):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .T_S_I_V_ import table_T_S_I_V_
2
+
3
+
4
+ class table_T_S_I_P_(table_T_S_I_V_):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
2
+ tool to store its hinting source data.
3
+
4
+ TSI1 contains the text of the glyph programs in the form of low-level assembly
5
+ code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
6
+ """
7
+
8
+ from . import DefaultTable
9
+ from fontTools.misc.loggingTools import LogMixin
10
+ from fontTools.misc.textTools import strjoin, tobytes, tostr
11
+
12
+
13
+ class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
14
+ extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
15
+
16
+ indextable = "TSI0"
17
+
18
+ def decompile(self, data, ttFont):
19
+ totalLength = len(data)
20
+ indextable = ttFont[self.indextable]
21
+ for indices, isExtra in zip(
22
+ (indextable.indices, indextable.extra_indices), (False, True)
23
+ ):
24
+ programs = {}
25
+ for i, (glyphID, textLength, textOffset) in enumerate(indices):
26
+ if isExtra:
27
+ name = self.extras[glyphID]
28
+ else:
29
+ name = ttFont.getGlyphName(glyphID)
30
+ if textOffset > totalLength:
31
+ self.log.warning("textOffset > totalLength; %r skipped" % name)
32
+ continue
33
+ if textLength < 0x8000:
34
+ # If the length stored in the record is less than 32768, then use
35
+ # that as the length of the record.
36
+ pass
37
+ elif textLength == 0x8000:
38
+ # If the length is 32768, compute the actual length as follows:
39
+ isLast = i == (len(indices) - 1)
40
+ if isLast:
41
+ if isExtra:
42
+ # For the last "extra" record (the very last record of the
43
+ # table), the length is the difference between the total
44
+ # length of the TSI1 table and the textOffset of the final
45
+ # record.
46
+ nextTextOffset = totalLength
47
+ else:
48
+ # For the last "normal" record (the last record just prior
49
+ # to the record containing the "magic number"), the length
50
+ # is the difference between the textOffset of the record
51
+ # following the "magic number" (0xFFFE) record (i.e. the
52
+ # first "extra" record), and the textOffset of the last
53
+ # "normal" record.
54
+ nextTextOffset = indextable.extra_indices[0][2]
55
+ else:
56
+ # For all other records with a length of 0x8000, the length is
57
+ # the difference between the textOffset of the record in
58
+ # question and the textOffset of the next record.
59
+ nextTextOffset = indices[i + 1][2]
60
+ assert nextTextOffset >= textOffset, "entries not sorted by offset"
61
+ if nextTextOffset > totalLength:
62
+ self.log.warning(
63
+ "nextTextOffset > totalLength; %r truncated" % name
64
+ )
65
+ nextTextOffset = totalLength
66
+ textLength = nextTextOffset - textOffset
67
+ else:
68
+ from fontTools import ttLib
69
+
70
+ raise ttLib.TTLibError(
71
+ "%r textLength (%d) must not be > 32768" % (name, textLength)
72
+ )
73
+ text = data[textOffset : textOffset + textLength]
74
+ assert len(text) == textLength
75
+ text = tostr(text, encoding="utf-8")
76
+ if text:
77
+ programs[name] = text
78
+ if isExtra:
79
+ self.extraPrograms = programs
80
+ else:
81
+ self.glyphPrograms = programs
82
+
83
+ def compile(self, ttFont):
84
+ if not hasattr(self, "glyphPrograms"):
85
+ self.glyphPrograms = {}
86
+ self.extraPrograms = {}
87
+ data = b""
88
+ indextable = ttFont[self.indextable]
89
+ glyphNames = ttFont.getGlyphOrder()
90
+
91
+ indices = []
92
+ for i in range(len(glyphNames)):
93
+ if len(data) % 2:
94
+ data = (
95
+ data + b"\015"
96
+ ) # align on 2-byte boundaries, fill with return chars. Yum.
97
+ name = glyphNames[i]
98
+ if name in self.glyphPrograms:
99
+ text = tobytes(self.glyphPrograms[name], encoding="utf-8")
100
+ else:
101
+ text = b""
102
+ textLength = len(text)
103
+ if textLength >= 0x8000:
104
+ textLength = 0x8000
105
+ indices.append((i, textLength, len(data)))
106
+ data = data + text
107
+
108
+ extra_indices = []
109
+ codes = sorted(self.extras.items())
110
+ for i in range(len(codes)):
111
+ if len(data) % 2:
112
+ data = (
113
+ data + b"\015"
114
+ ) # align on 2-byte boundaries, fill with return chars.
115
+ code, name = codes[i]
116
+ if name in self.extraPrograms:
117
+ text = tobytes(self.extraPrograms[name], encoding="utf-8")
118
+ else:
119
+ text = b""
120
+ textLength = len(text)
121
+ if textLength >= 0x8000:
122
+ textLength = 0x8000
123
+ extra_indices.append((code, textLength, len(data)))
124
+ data = data + text
125
+ indextable.set(indices, extra_indices)
126
+ return data
127
+
128
+ def toXML(self, writer, ttFont):
129
+ names = sorted(self.glyphPrograms.keys())
130
+ writer.newline()
131
+ for name in names:
132
+ text = self.glyphPrograms[name]
133
+ if not text:
134
+ continue
135
+ writer.begintag("glyphProgram", name=name)
136
+ writer.newline()
137
+ writer.write_noindent(text.replace("\r", "\n"))
138
+ writer.newline()
139
+ writer.endtag("glyphProgram")
140
+ writer.newline()
141
+ writer.newline()
142
+ extra_names = sorted(self.extraPrograms.keys())
143
+ for name in extra_names:
144
+ text = self.extraPrograms[name]
145
+ if not text:
146
+ continue
147
+ writer.begintag("extraProgram", name=name)
148
+ writer.newline()
149
+ writer.write_noindent(text.replace("\r", "\n"))
150
+ writer.newline()
151
+ writer.endtag("extraProgram")
152
+ writer.newline()
153
+ writer.newline()
154
+
155
+ def fromXML(self, name, attrs, content, ttFont):
156
+ if not hasattr(self, "glyphPrograms"):
157
+ self.glyphPrograms = {}
158
+ self.extraPrograms = {}
159
+ lines = strjoin(content).replace("\r", "\n").split("\n")
160
+ text = "\r".join(lines[1:-1])
161
+ if name == "glyphProgram":
162
+ self.glyphPrograms[attrs["name"]] = text
163
+ elif name == "extraProgram":
164
+ self.extraPrograms[attrs["name"]] = text
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__3.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
2
+ tool to store its hinting source data.
3
+
4
+ TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code.
5
+ """
6
+
7
+ from fontTools import ttLib
8
+
9
+ superclass = ttLib.getTableClass("TSI1")
10
+
11
+
12
+ class table_T_S_I__3(superclass):
13
+ extras = {
14
+ 0xFFFA: "reserved0",
15
+ 0xFFFB: "reserved1",
16
+ 0xFFFC: "reserved2",
17
+ 0xFFFD: "reserved3",
18
+ }
19
+
20
+ indextable = "TSI2"
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py ADDED
@@ -0,0 +1,846 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc.fixedTools import (
2
+ fixedToFloat as fi2fl,
3
+ floatToFixed as fl2fi,
4
+ floatToFixedToStr as fl2str,
5
+ strToFixedToFloat as str2fl,
6
+ otRound,
7
+ )
8
+ from fontTools.misc.textTools import safeEval
9
+ import array
10
+ from collections import Counter, defaultdict
11
+ import io
12
+ import logging
13
+ import struct
14
+ import sys
15
+
16
+
17
+ # https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
18
+
19
+ EMBEDDED_PEAK_TUPLE = 0x8000
20
+ INTERMEDIATE_REGION = 0x4000
21
+ PRIVATE_POINT_NUMBERS = 0x2000
22
+
23
+ DELTAS_ARE_ZERO = 0x80
24
+ DELTAS_ARE_WORDS = 0x40
25
+ DELTAS_ARE_LONGS = 0xC0
26
+ DELTAS_SIZE_MASK = 0xC0
27
+ DELTA_RUN_COUNT_MASK = 0x3F
28
+
29
+ POINTS_ARE_WORDS = 0x80
30
+ POINT_RUN_COUNT_MASK = 0x7F
31
+
32
+ TUPLES_SHARE_POINT_NUMBERS = 0x8000
33
+ TUPLE_COUNT_MASK = 0x0FFF
34
+ TUPLE_INDEX_MASK = 0x0FFF
35
+
36
+ log = logging.getLogger(__name__)
37
+
38
+
39
+ class TupleVariation(object):
40
+ def __init__(self, axes, coordinates):
41
+ self.axes = axes.copy()
42
+ self.coordinates = list(coordinates)
43
+
44
+ def __repr__(self):
45
+ axes = ",".join(
46
+ sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
47
+ )
48
+ return "<TupleVariation %s %s>" % (axes, self.coordinates)
49
+
50
+ def __eq__(self, other):
51
+ return self.coordinates == other.coordinates and self.axes == other.axes
52
+
53
+ def getUsedPoints(self):
54
+ # Empty set means "all points used".
55
+ if None not in self.coordinates:
56
+ return frozenset()
57
+ used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
58
+ # Return None if no points used.
59
+ return used if used else None
60
+
61
+ def hasImpact(self):
62
+ """Returns True if this TupleVariation has any visible impact.
63
+
64
+ If the result is False, the TupleVariation can be omitted from the font
65
+ without making any visible difference.
66
+ """
67
+ return any(c is not None for c in self.coordinates)
68
+
69
+ def toXML(self, writer, axisTags):
70
+ writer.begintag("tuple")
71
+ writer.newline()
72
+ for axis in axisTags:
73
+ value = self.axes.get(axis)
74
+ if value is not None:
75
+ minValue, value, maxValue = value
76
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
77
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
78
+ if minValue == defaultMinValue and maxValue == defaultMaxValue:
79
+ writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
80
+ else:
81
+ attrs = [
82
+ ("axis", axis),
83
+ ("min", fl2str(minValue, 14)),
84
+ ("value", fl2str(value, 14)),
85
+ ("max", fl2str(maxValue, 14)),
86
+ ]
87
+ writer.simpletag("coord", attrs)
88
+ writer.newline()
89
+ wrote_any_deltas = False
90
+ for i, delta in enumerate(self.coordinates):
91
+ if type(delta) == tuple and len(delta) == 2:
92
+ writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
93
+ writer.newline()
94
+ wrote_any_deltas = True
95
+ elif type(delta) == int:
96
+ writer.simpletag("delta", cvt=i, value=delta)
97
+ writer.newline()
98
+ wrote_any_deltas = True
99
+ elif delta is not None:
100
+ log.error("bad delta format")
101
+ writer.comment("bad delta #%d" % i)
102
+ writer.newline()
103
+ wrote_any_deltas = True
104
+ if not wrote_any_deltas:
105
+ writer.comment("no deltas")
106
+ writer.newline()
107
+ writer.endtag("tuple")
108
+ writer.newline()
109
+
110
+ def fromXML(self, name, attrs, _content):
111
+ if name == "coord":
112
+ axis = attrs["axis"]
113
+ value = str2fl(attrs["value"], 14)
114
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
115
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
116
+ minValue = str2fl(attrs.get("min", defaultMinValue), 14)
117
+ maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
118
+ self.axes[axis] = (minValue, value, maxValue)
119
+ elif name == "delta":
120
+ if "pt" in attrs:
121
+ point = safeEval(attrs["pt"])
122
+ x = safeEval(attrs["x"])
123
+ y = safeEval(attrs["y"])
124
+ self.coordinates[point] = (x, y)
125
+ elif "cvt" in attrs:
126
+ cvt = safeEval(attrs["cvt"])
127
+ value = safeEval(attrs["value"])
128
+ self.coordinates[cvt] = value
129
+ else:
130
+ log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
131
+
132
+ def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
133
+ assert set(self.axes.keys()) <= set(axisTags), (
134
+ "Unknown axis tag found.",
135
+ self.axes.keys(),
136
+ axisTags,
137
+ )
138
+
139
+ tupleData = []
140
+ auxData = []
141
+
142
+ if pointData is None:
143
+ usedPoints = self.getUsedPoints()
144
+ if usedPoints is None: # Nothing to encode
145
+ return b"", b""
146
+ pointData = self.compilePoints(usedPoints)
147
+
148
+ coord = self.compileCoord(axisTags)
149
+ flags = sharedCoordIndices.get(coord)
150
+ if flags is None:
151
+ flags = EMBEDDED_PEAK_TUPLE
152
+ tupleData.append(coord)
153
+
154
+ intermediateCoord = self.compileIntermediateCoord(axisTags)
155
+ if intermediateCoord is not None:
156
+ flags |= INTERMEDIATE_REGION
157
+ tupleData.append(intermediateCoord)
158
+
159
+ # pointData of b'' implies "use shared points".
160
+ if pointData:
161
+ flags |= PRIVATE_POINT_NUMBERS
162
+ auxData.append(pointData)
163
+
164
+ auxData.append(self.compileDeltas())
165
+ auxData = b"".join(auxData)
166
+
167
+ tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
168
+ return b"".join(tupleData), auxData
169
+
170
+ def compileCoord(self, axisTags):
171
+ result = []
172
+ axes = self.axes
173
+ for axis in axisTags:
174
+ triple = axes.get(axis)
175
+ if triple is None:
176
+ result.append(b"\0\0")
177
+ else:
178
+ result.append(struct.pack(">h", fl2fi(triple[1], 14)))
179
+ return b"".join(result)
180
+
181
+ def compileIntermediateCoord(self, axisTags):
182
+ needed = False
183
+ for axis in axisTags:
184
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
185
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
186
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
187
+ if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
188
+ needed = True
189
+ break
190
+ if not needed:
191
+ return None
192
+ minCoords = []
193
+ maxCoords = []
194
+ for axis in axisTags:
195
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
196
+ minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
197
+ maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
198
+ return b"".join(minCoords + maxCoords)
199
+
200
+ @staticmethod
201
+ def decompileCoord_(axisTags, data, offset):
202
+ coord = {}
203
+ pos = offset
204
+ for axis in axisTags:
205
+ coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
206
+ pos += 2
207
+ return coord, pos
208
+
209
+ @staticmethod
210
+ def compilePoints(points):
211
+ # If the set consists of all points in the glyph, it gets encoded with
212
+ # a special encoding: a single zero byte.
213
+ #
214
+ # To use this optimization, points passed in must be empty set.
215
+ # The following two lines are not strictly necessary as the main code
216
+ # below would emit the same. But this is most common and faster.
217
+ if not points:
218
+ return b"\0"
219
+
220
+ # In the 'gvar' table, the packing of point numbers is a little surprising.
221
+ # It consists of multiple runs, each being a delta-encoded list of integers.
222
+ # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
223
+ # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
224
+ # There are two types of runs, with values being either 8 or 16 bit unsigned
225
+ # integers.
226
+ points = list(points)
227
+ points.sort()
228
+ numPoints = len(points)
229
+
230
+ result = bytearray()
231
+ # The binary representation starts with the total number of points in the set,
232
+ # encoded into one or two bytes depending on the value.
233
+ if numPoints < 0x80:
234
+ result.append(numPoints)
235
+ else:
236
+ result.append((numPoints >> 8) | 0x80)
237
+ result.append(numPoints & 0xFF)
238
+
239
+ MAX_RUN_LENGTH = 127
240
+ pos = 0
241
+ lastValue = 0
242
+ while pos < numPoints:
243
+ runLength = 0
244
+
245
+ headerPos = len(result)
246
+ result.append(0)
247
+
248
+ useByteEncoding = None
249
+ while pos < numPoints and runLength <= MAX_RUN_LENGTH:
250
+ curValue = points[pos]
251
+ delta = curValue - lastValue
252
+ if useByteEncoding is None:
253
+ useByteEncoding = 0 <= delta <= 0xFF
254
+ if useByteEncoding and (delta > 0xFF or delta < 0):
255
+ # we need to start a new run (which will not use byte encoding)
256
+ break
257
+ # TODO This never switches back to a byte-encoding from a short-encoding.
258
+ # That's suboptimal.
259
+ if useByteEncoding:
260
+ result.append(delta)
261
+ else:
262
+ result.append(delta >> 8)
263
+ result.append(delta & 0xFF)
264
+ lastValue = curValue
265
+ pos += 1
266
+ runLength += 1
267
+ if useByteEncoding:
268
+ result[headerPos] = runLength - 1
269
+ else:
270
+ result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
271
+
272
+ return result
273
+
274
+ @staticmethod
275
+ def decompilePoints_(numPoints, data, offset, tableTag):
276
+ """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
277
+ assert tableTag in ("cvar", "gvar")
278
+ pos = offset
279
+ numPointsInData = data[pos]
280
+ pos += 1
281
+ if (numPointsInData & POINTS_ARE_WORDS) != 0:
282
+ numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
283
+ pos += 1
284
+ if numPointsInData == 0:
285
+ return (range(numPoints), pos)
286
+
287
+ result = []
288
+ while len(result) < numPointsInData:
289
+ runHeader = data[pos]
290
+ pos += 1
291
+ numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
292
+ point = 0
293
+ if (runHeader & POINTS_ARE_WORDS) != 0:
294
+ points = array.array("H")
295
+ pointsSize = numPointsInRun * 2
296
+ else:
297
+ points = array.array("B")
298
+ pointsSize = numPointsInRun
299
+ points.frombytes(data[pos : pos + pointsSize])
300
+ if sys.byteorder != "big":
301
+ points.byteswap()
302
+
303
+ assert len(points) == numPointsInRun
304
+ pos += pointsSize
305
+
306
+ result.extend(points)
307
+
308
+ # Convert relative to absolute
309
+ absolute = []
310
+ current = 0
311
+ for delta in result:
312
+ current += delta
313
+ absolute.append(current)
314
+ result = absolute
315
+ del absolute
316
+
317
+ badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
318
+ if badPoints:
319
+ log.warning(
320
+ "point %s out of range in '%s' table"
321
+ % (",".join(sorted(badPoints)), tableTag)
322
+ )
323
+ return (result, pos)
324
+
325
+ def compileDeltas(self):
326
+ deltaX = []
327
+ deltaY = []
328
+ if self.getCoordWidth() == 2:
329
+ for c in self.coordinates:
330
+ if c is None:
331
+ continue
332
+ deltaX.append(c[0])
333
+ deltaY.append(c[1])
334
+ else:
335
+ for c in self.coordinates:
336
+ if c is None:
337
+ continue
338
+ deltaX.append(c)
339
+ bytearr = bytearray()
340
+ self.compileDeltaValues_(deltaX, bytearr)
341
+ self.compileDeltaValues_(deltaY, bytearr)
342
+ return bytearr
343
+
344
+ @staticmethod
345
+ def compileDeltaValues_(deltas, bytearr=None):
346
+ """[value1, value2, value3, ...] --> bytearray
347
+
348
+ Emits a sequence of runs. Each run starts with a
349
+ byte-sized header whose 6 least significant bits
350
+ (header & 0x3F) indicate how many values are encoded
351
+ in this run. The stored length is the actual length
352
+ minus one; run lengths are thus in the range [1..64].
353
+ If the header byte has its most significant bit (0x80)
354
+ set, all values in this run are zero, and no data
355
+ follows. Otherwise, the header byte is followed by
356
+ ((header & 0x3F) + 1) signed values. If (header &
357
+ 0x40) is clear, the delta values are stored as signed
358
+ bytes; if (header & 0x40) is set, the delta values are
359
+ signed 16-bit integers.
360
+ """ # Explaining the format because the 'gvar' spec is hard to understand.
361
+ if bytearr is None:
362
+ bytearr = bytearray()
363
+ pos = 0
364
+ numDeltas = len(deltas)
365
+ while pos < numDeltas:
366
+ value = deltas[pos]
367
+ if value == 0:
368
+ pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
369
+ elif -128 <= value <= 127:
370
+ pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
371
+ elif -32768 <= value <= 32767:
372
+ pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
373
+ else:
374
+ pos = TupleVariation.encodeDeltaRunAsLongs_(deltas, pos, bytearr)
375
+ return bytearr
376
+
377
+ @staticmethod
378
+ def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
379
+ pos = offset
380
+ numDeltas = len(deltas)
381
+ while pos < numDeltas and deltas[pos] == 0:
382
+ pos += 1
383
+ runLength = pos - offset
384
+ while runLength >= 64:
385
+ bytearr.append(DELTAS_ARE_ZERO | 63)
386
+ runLength -= 64
387
+ if runLength:
388
+ bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
389
+ return pos
390
+
391
+ @staticmethod
392
+ def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
393
+ pos = offset
394
+ numDeltas = len(deltas)
395
+ while pos < numDeltas:
396
+ value = deltas[pos]
397
+ if not (-128 <= value <= 127):
398
+ break
399
+ # Within a byte-encoded run of deltas, a single zero
400
+ # is best stored literally as 0x00 value. However,
401
+ # if are two or more zeroes in a sequence, it is
402
+ # better to start a new run. For example, the sequence
403
+ # of deltas [15, 15, 0, 15, 15] becomes 6 bytes
404
+ # (04 0F 0F 00 0F 0F) when storing the zero value
405
+ # literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
406
+ # when starting a new run.
407
+ if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0:
408
+ break
409
+ pos += 1
410
+ runLength = pos - offset
411
+ while runLength >= 64:
412
+ bytearr.append(63)
413
+ bytearr.extend(array.array("b", deltas[offset : offset + 64]))
414
+ offset += 64
415
+ runLength -= 64
416
+ if runLength:
417
+ bytearr.append(runLength - 1)
418
+ bytearr.extend(array.array("b", deltas[offset:pos]))
419
+ return pos
420
+
421
+ @staticmethod
422
+ def encodeDeltaRunAsWords_(deltas, offset, bytearr):
423
+ pos = offset
424
+ numDeltas = len(deltas)
425
+ while pos < numDeltas:
426
+ value = deltas[pos]
427
+
428
+ # Within a word-encoded run of deltas, it is easiest
429
+ # to start a new run (with a different encoding)
430
+ # whenever we encounter a zero value. For example,
431
+ # the sequence [0x6666, 0, 0x7777] needs 7 bytes when
432
+ # storing the zero literally (42 66 66 00 00 77 77),
433
+ # and equally 7 bytes when starting a new run
434
+ # (40 66 66 80 40 77 77).
435
+ if value == 0:
436
+ break
437
+
438
+ # Within a word-encoded run of deltas, a single value
439
+ # in the range (-128..127) should be encoded literally
440
+ # because it is more compact. For example, the sequence
441
+ # [0x6666, 2, 0x7777] becomes 7 bytes when storing
442
+ # the value literally (42 66 66 00 02 77 77), but 8 bytes
443
+ # when starting a new run (40 66 66 00 02 40 77 77).
444
+ if (
445
+ (-128 <= value <= 127)
446
+ and pos + 1 < numDeltas
447
+ and (-128 <= deltas[pos + 1] <= 127)
448
+ ):
449
+ break
450
+
451
+ if not (-32768 <= value <= 32767):
452
+ break
453
+
454
+ pos += 1
455
+ runLength = pos - offset
456
+ while runLength >= 64:
457
+ bytearr.append(DELTAS_ARE_WORDS | 63)
458
+ a = array.array("h", deltas[offset : offset + 64])
459
+ if sys.byteorder != "big":
460
+ a.byteswap()
461
+ bytearr.extend(a)
462
+ offset += 64
463
+ runLength -= 64
464
+ if runLength:
465
+ bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
466
+ a = array.array("h", deltas[offset:pos])
467
+ if sys.byteorder != "big":
468
+ a.byteswap()
469
+ bytearr.extend(a)
470
+ return pos
471
+
472
+ @staticmethod
473
+ def encodeDeltaRunAsLongs_(deltas, offset, bytearr):
474
+ pos = offset
475
+ numDeltas = len(deltas)
476
+ while pos < numDeltas:
477
+ value = deltas[pos]
478
+ if -32768 <= value <= 32767:
479
+ break
480
+ pos += 1
481
+ runLength = pos - offset
482
+ while runLength >= 64:
483
+ bytearr.append(DELTAS_ARE_LONGS | 63)
484
+ a = array.array("i", deltas[offset : offset + 64])
485
+ if sys.byteorder != "big":
486
+ a.byteswap()
487
+ bytearr.extend(a)
488
+ offset += 64
489
+ runLength -= 64
490
+ if runLength:
491
+ bytearr.append(DELTAS_ARE_LONGS | (runLength - 1))
492
+ a = array.array("i", deltas[offset:pos])
493
+ if sys.byteorder != "big":
494
+ a.byteswap()
495
+ bytearr.extend(a)
496
+ return pos
497
+
498
+ @staticmethod
499
+ def decompileDeltas_(numDeltas, data, offset=0):
500
+ """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
501
+ result = []
502
+ pos = offset
503
+ while len(result) < numDeltas if numDeltas is not None else pos < len(data):
504
+ runHeader = data[pos]
505
+ pos += 1
506
+ numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
507
+ if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_ZERO:
508
+ result.extend([0] * numDeltasInRun)
509
+ else:
510
+ if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_LONGS:
511
+ deltas = array.array("i")
512
+ deltasSize = numDeltasInRun * 4
513
+ elif (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_WORDS:
514
+ deltas = array.array("h")
515
+ deltasSize = numDeltasInRun * 2
516
+ else:
517
+ deltas = array.array("b")
518
+ deltasSize = numDeltasInRun
519
+ deltas.frombytes(data[pos : pos + deltasSize])
520
+ if sys.byteorder != "big":
521
+ deltas.byteswap()
522
+ assert len(deltas) == numDeltasInRun, (len(deltas), numDeltasInRun)
523
+ pos += deltasSize
524
+ result.extend(deltas)
525
+ assert numDeltas is None or len(result) == numDeltas
526
+ return (result, pos)
527
+
528
+ @staticmethod
529
+ def getTupleSize_(flags, axisCount):
530
+ size = 4
531
+ if (flags & EMBEDDED_PEAK_TUPLE) != 0:
532
+ size += axisCount * 2
533
+ if (flags & INTERMEDIATE_REGION) != 0:
534
+ size += axisCount * 4
535
+ return size
536
+
537
+ def getCoordWidth(self):
538
+ """Return 2 if coordinates are (x, y) as in gvar, 1 if single values
539
+ as in cvar, or 0 if empty.
540
+ """
541
+ firstDelta = next((c for c in self.coordinates if c is not None), None)
542
+ if firstDelta is None:
543
+ return 0 # empty or has no impact
544
+ if type(firstDelta) in (int, float):
545
+ return 1
546
+ if type(firstDelta) is tuple and len(firstDelta) == 2:
547
+ return 2
548
+ raise TypeError(
549
+ "invalid type of delta; expected (int or float) number, or "
550
+ "Tuple[number, number]: %r" % firstDelta
551
+ )
552
+
553
+ def scaleDeltas(self, scalar):
554
+ if scalar == 1.0:
555
+ return # no change
556
+ coordWidth = self.getCoordWidth()
557
+ self.coordinates = [
558
+ (
559
+ None
560
+ if d is None
561
+ else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar)
562
+ )
563
+ for d in self.coordinates
564
+ ]
565
+
566
+ def roundDeltas(self):
567
+ coordWidth = self.getCoordWidth()
568
+ self.coordinates = [
569
+ (
570
+ None
571
+ if d is None
572
+ else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1]))
573
+ )
574
+ for d in self.coordinates
575
+ ]
576
+
577
+ def calcInferredDeltas(self, origCoords, endPts):
578
+ from fontTools.varLib.iup import iup_delta
579
+
580
+ if self.getCoordWidth() == 1:
581
+ raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
582
+ if None in self.coordinates:
583
+ if len(self.coordinates) != len(origCoords):
584
+ raise ValueError(
585
+ "Expected len(origCoords) == %d; found %d"
586
+ % (len(self.coordinates), len(origCoords))
587
+ )
588
+ self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
589
+
590
+ def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
591
+ from fontTools.varLib.iup import iup_delta_optimize
592
+
593
+ if None in self.coordinates:
594
+ return # already optimized
595
+
596
+ deltaOpt = iup_delta_optimize(
597
+ self.coordinates, origCoords, endPts, tolerance=tolerance
598
+ )
599
+ if None in deltaOpt:
600
+ if isComposite and all(d is None for d in deltaOpt):
601
+ # Fix for macOS composites
602
+ # https://github.com/fonttools/fonttools/issues/1381
603
+ deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
604
+ # Use "optimized" version only if smaller...
605
+ varOpt = TupleVariation(self.axes, deltaOpt)
606
+
607
+ # Shouldn't matter that this is different from fvar...?
608
+ axisTags = sorted(self.axes.keys())
609
+ tupleData, auxData = self.compile(axisTags)
610
+ unoptimizedLength = len(tupleData) + len(auxData)
611
+ tupleData, auxData = varOpt.compile(axisTags)
612
+ optimizedLength = len(tupleData) + len(auxData)
613
+
614
+ if optimizedLength < unoptimizedLength:
615
+ self.coordinates = varOpt.coordinates
616
+
617
+ def __imul__(self, scalar):
618
+ self.scaleDeltas(scalar)
619
+ return self
620
+
621
+ def __iadd__(self, other):
622
+ if not isinstance(other, TupleVariation):
623
+ return NotImplemented
624
+ deltas1 = self.coordinates
625
+ length = len(deltas1)
626
+ deltas2 = other.coordinates
627
+ if len(deltas2) != length:
628
+ raise ValueError("cannot sum TupleVariation deltas with different lengths")
629
+ # 'None' values have different meanings in gvar vs cvar TupleVariations:
630
+ # within the gvar, when deltas are not provided explicitly for some points,
631
+ # they need to be inferred; whereas for the 'cvar' table, if deltas are not
632
+ # provided for some CVT values, then no adjustments are made (i.e. None == 0).
633
+ # Thus, we cannot sum deltas for gvar TupleVariations if they contain
634
+ # inferred inferred deltas (the latter need to be computed first using
635
+ # 'calcInferredDeltas' method), but we can treat 'None' values in cvar
636
+ # deltas as if they are zeros.
637
+ if self.getCoordWidth() == 2:
638
+ for i, d2 in zip(range(length), deltas2):
639
+ d1 = deltas1[i]
640
+ try:
641
+ deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
642
+ except TypeError:
643
+ raise ValueError("cannot sum gvar deltas with inferred points")
644
+ else:
645
+ for i, d2 in zip(range(length), deltas2):
646
+ d1 = deltas1[i]
647
+ if d1 is not None and d2 is not None:
648
+ deltas1[i] = d1 + d2
649
+ elif d1 is None and d2 is not None:
650
+ deltas1[i] = d2
651
+ # elif d2 is None do nothing
652
+ return self
653
+
654
+
655
+ def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
656
+ result = []
657
+ for _ in range(sharedTupleCount):
658
+ t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
659
+ result.append(t)
660
+ return result
661
+
662
+
663
+ def compileSharedTuples(
664
+ axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
665
+ ):
666
+ coordCount = Counter()
667
+ for var in variations:
668
+ coord = var.compileCoord(axisTags)
669
+ coordCount[coord] += 1
670
+ # In python < 3.7, most_common() ordering is non-deterministic
671
+ # so apply a sort to make sure the ordering is consistent.
672
+ sharedCoords = sorted(
673
+ coordCount.most_common(MAX_NUM_SHARED_COORDS),
674
+ key=lambda item: (-item[1], item[0]),
675
+ )
676
+ return [c[0] for c in sharedCoords if c[1] > 1]
677
+
678
+
679
+ def compileTupleVariationStore(
680
+ variations, pointCount, axisTags, sharedTupleIndices, useSharedPoints=True
681
+ ):
682
+ # pointCount is actually unused. Keeping for API compat.
683
+ del pointCount
684
+ newVariations = []
685
+ pointDatas = []
686
+ # Compile all points and figure out sharing if desired
687
+ sharedPoints = None
688
+
689
+ # Collect, count, and compile point-sets for all variation sets
690
+ pointSetCount = defaultdict(int)
691
+ for v in variations:
692
+ points = v.getUsedPoints()
693
+ if points is None: # Empty variations
694
+ continue
695
+ pointSetCount[points] += 1
696
+ newVariations.append(v)
697
+ pointDatas.append(points)
698
+ variations = newVariations
699
+ del newVariations
700
+
701
+ if not variations:
702
+ return (0, b"", b"")
703
+
704
+ n = len(variations[0].coordinates)
705
+ assert all(
706
+ len(v.coordinates) == n for v in variations
707
+ ), "Variation sets have different sizes"
708
+
709
+ compiledPoints = {
710
+ pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount
711
+ }
712
+
713
+ tupleVariationCount = len(variations)
714
+ tuples = []
715
+ data = []
716
+
717
+ if useSharedPoints:
718
+ # Find point-set which saves most bytes.
719
+ def key(pn):
720
+ pointSet = pn[0]
721
+ count = pn[1]
722
+ return len(compiledPoints[pointSet]) * (count - 1)
723
+
724
+ sharedPoints = max(pointSetCount.items(), key=key)[0]
725
+
726
+ data.append(compiledPoints[sharedPoints])
727
+ tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
728
+
729
+ # b'' implies "use shared points"
730
+ pointDatas = [
731
+ compiledPoints[points] if points != sharedPoints else b""
732
+ for points in pointDatas
733
+ ]
734
+
735
+ for v, p in zip(variations, pointDatas):
736
+ thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
737
+
738
+ tuples.append(thisTuple)
739
+ data.append(thisData)
740
+
741
+ tuples = b"".join(tuples)
742
+ data = b"".join(data)
743
+ return tupleVariationCount, tuples, data
744
+
745
+
746
+ def decompileTupleVariationStore(
747
+ tableTag,
748
+ axisTags,
749
+ tupleVariationCount,
750
+ pointCount,
751
+ sharedTuples,
752
+ data,
753
+ pos,
754
+ dataPos,
755
+ ):
756
+ numAxes = len(axisTags)
757
+ result = []
758
+ if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
759
+ sharedPoints, dataPos = TupleVariation.decompilePoints_(
760
+ pointCount, data, dataPos, tableTag
761
+ )
762
+ else:
763
+ sharedPoints = []
764
+ for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
765
+ dataSize, flags = struct.unpack(">HH", data[pos : pos + 4])
766
+ tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
767
+ tupleData = data[pos : pos + tupleSize]
768
+ pointDeltaData = data[dataPos : dataPos + dataSize]
769
+ result.append(
770
+ decompileTupleVariation_(
771
+ pointCount,
772
+ sharedTuples,
773
+ sharedPoints,
774
+ tableTag,
775
+ axisTags,
776
+ tupleData,
777
+ pointDeltaData,
778
+ )
779
+ )
780
+ pos += tupleSize
781
+ dataPos += dataSize
782
+ return result
783
+
784
+
785
+ def decompileTupleVariation_(
786
+ pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData
787
+ ):
788
+ assert tableTag in ("cvar", "gvar"), tableTag
789
+ flags = struct.unpack(">H", data[2:4])[0]
790
+ pos = 4
791
+ if (flags & EMBEDDED_PEAK_TUPLE) == 0:
792
+ peak = sharedTuples[flags & TUPLE_INDEX_MASK]
793
+ else:
794
+ peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
795
+ if (flags & INTERMEDIATE_REGION) != 0:
796
+ start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
797
+ end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
798
+ else:
799
+ start, end = inferRegion_(peak)
800
+ axes = {}
801
+ for axis in axisTags:
802
+ region = start[axis], peak[axis], end[axis]
803
+ if region != (0.0, 0.0, 0.0):
804
+ axes[axis] = region
805
+ pos = 0
806
+ if (flags & PRIVATE_POINT_NUMBERS) != 0:
807
+ points, pos = TupleVariation.decompilePoints_(
808
+ pointCount, tupleData, pos, tableTag
809
+ )
810
+ else:
811
+ points = sharedPoints
812
+
813
+ deltas = [None] * pointCount
814
+
815
+ if tableTag == "cvar":
816
+ deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
817
+ for p, delta in zip(points, deltas_cvt):
818
+ if 0 <= p < pointCount:
819
+ deltas[p] = delta
820
+
821
+ elif tableTag == "gvar":
822
+ deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
823
+ deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
824
+ for p, x, y in zip(points, deltas_x, deltas_y):
825
+ if 0 <= p < pointCount:
826
+ deltas[p] = (x, y)
827
+
828
+ return TupleVariation(axes, deltas)
829
+
830
+
831
+ def inferRegion_(peak):
832
+ """Infer start and end for a (non-intermediate) region
833
+
834
+ This helper function computes the applicability region for
835
+ variation tuples whose INTERMEDIATE_REGION flag is not set in the
836
+ TupleVariationHeader structure. Variation tuples apply only to
837
+ certain regions of the variation space; outside that region, the
838
+ tuple has no effect. To make the binary encoding more compact,
839
+ TupleVariationHeaders can omit the intermediateStartTuple and
840
+ intermediateEndTuple fields.
841
+ """
842
+ start, end = {}, {}
843
+ for axis, value in peak.items():
844
+ start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
845
+ end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
846
+ return (start, end)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_V_A_R_C_(BaseTTXConverter):
5
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
2
+ def _moduleFinderHint():
3
+ """Dummy function to let modulefinder know what tables may be
4
+ dynamically imported. Generated by MetaTools/buildTableList.py.
5
+
6
+ >>> _moduleFinderHint()
7
+ """
8
+ from . import B_A_S_E_
9
+ from . import C_B_D_T_
10
+ from . import C_B_L_C_
11
+ from . import C_F_F_
12
+ from . import C_F_F__2
13
+ from . import C_O_L_R_
14
+ from . import C_P_A_L_
15
+ from . import D_S_I_G_
16
+ from . import D__e_b_g
17
+ from . import E_B_D_T_
18
+ from . import E_B_L_C_
19
+ from . import F_F_T_M_
20
+ from . import F__e_a_t
21
+ from . import G_D_E_F_
22
+ from . import G_M_A_P_
23
+ from . import G_P_K_G_
24
+ from . import G_P_O_S_
25
+ from . import G_S_U_B_
26
+ from . import G__l_a_t
27
+ from . import G__l_o_c
28
+ from . import H_V_A_R_
29
+ from . import J_S_T_F_
30
+ from . import L_T_S_H_
31
+ from . import M_A_T_H_
32
+ from . import M_E_T_A_
33
+ from . import M_V_A_R_
34
+ from . import O_S_2f_2
35
+ from . import S_I_N_G_
36
+ from . import S_T_A_T_
37
+ from . import S_V_G_
38
+ from . import S__i_l_f
39
+ from . import S__i_l_l
40
+ from . import T_S_I_B_
41
+ from . import T_S_I_C_
42
+ from . import T_S_I_D_
43
+ from . import T_S_I_J_
44
+ from . import T_S_I_P_
45
+ from . import T_S_I_S_
46
+ from . import T_S_I_V_
47
+ from . import T_S_I__0
48
+ from . import T_S_I__1
49
+ from . import T_S_I__2
50
+ from . import T_S_I__3
51
+ from . import T_S_I__5
52
+ from . import T_T_F_A_
53
+ from . import V_A_R_C_
54
+ from . import V_D_M_X_
55
+ from . import V_O_R_G_
56
+ from . import V_V_A_R_
57
+ from . import _a_n_k_r
58
+ from . import _a_v_a_r
59
+ from . import _b_s_l_n
60
+ from . import _c_i_d_g
61
+ from . import _c_m_a_p
62
+ from . import _c_v_a_r
63
+ from . import _c_v_t
64
+ from . import _f_e_a_t
65
+ from . import _f_p_g_m
66
+ from . import _f_v_a_r
67
+ from . import _g_a_s_p
68
+ from . import _g_c_i_d
69
+ from . import _g_l_y_f
70
+ from . import _g_v_a_r
71
+ from . import _h_d_m_x
72
+ from . import _h_e_a_d
73
+ from . import _h_h_e_a
74
+ from . import _h_m_t_x
75
+ from . import _k_e_r_n
76
+ from . import _l_c_a_r
77
+ from . import _l_o_c_a
78
+ from . import _l_t_a_g
79
+ from . import _m_a_x_p
80
+ from . import _m_e_t_a
81
+ from . import _m_o_r_t
82
+ from . import _m_o_r_x
83
+ from . import _n_a_m_e
84
+ from . import _o_p_b_d
85
+ from . import _p_o_s_t
86
+ from . import _p_r_e_p
87
+ from . import _p_r_o_p
88
+ from . import _s_b_i_x
89
+ from . import _t_r_a_k
90
+ from . import _v_h_e_a
91
+ from . import _v_m_t_x
92
+
93
+
94
+ if __name__ == "__main__":
95
+ import doctest, sys
96
+
97
+ sys.exit(doctest.testmod().failed)
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/C_F_F__2.cpython-310.pyc ADDED
Binary file (889 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/C_O_L_R_.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc ADDED
Binary file (386 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc ADDED
Binary file (386 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_o_c.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_A_T_H_.cpython-310.pyc ADDED
Binary file (386 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/O_S_2f_2.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_l.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_P_.cpython-310.pyc ADDED
Binary file (386 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc ADDED
Binary file (386 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_v_a_r.cpython-310.pyc ADDED
Binary file (8.5 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_h_h_e_a.cpython-310.pyc ADDED
Binary file (3.94 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_x.cpython-310.pyc ADDED
Binary file (386 Bytes). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otTables.cpython-310.pyc ADDED
Binary file (69.7 kB). View file
 
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/_b_s_l_n.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
5
+ class table__b_s_l_n(BaseTTXConverter):
6
+ pass
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_m_a_p.py ADDED
@@ -0,0 +1,1576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc.textTools import bytesjoin, safeEval, readHex
2
+ from fontTools.misc.encodingTools import getEncoding
3
+ from fontTools.ttLib import getSearchRange
4
+ from fontTools.unicode import Unicode
5
+ from . import DefaultTable
6
+ import sys
7
+ import struct
8
+ import array
9
+ import logging
10
+
11
+
12
+ log = logging.getLogger(__name__)
13
+
14
+
15
+ def _make_map(font, chars, gids):
16
+ assert len(chars) == len(gids)
17
+ glyphNames = font.getGlyphNameMany(gids)
18
+ cmap = {}
19
+ for char, gid, name in zip(chars, gids, glyphNames):
20
+ if gid == 0:
21
+ continue
22
+ cmap[char] = name
23
+ return cmap
24
+
25
+
26
+ class table__c_m_a_p(DefaultTable.DefaultTable):
27
+ """Character to Glyph Index Mapping Table
28
+
29
+ This class represents the `cmap <https://docs.microsoft.com/en-us/typography/opentype/spec/cmap>`_
30
+ table, which maps between input characters (in Unicode or other system encodings)
31
+ and glyphs within the font. The ``cmap`` table contains one or more subtables
32
+ which determine the mapping of of characters to glyphs across different platforms
33
+ and encoding systems.
34
+
35
+ ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access
36
+ to the subtables, although it is normally easier to retrieve individual subtables
37
+ through the utility methods described below. To add new subtables to a font,
38
+ first determine the subtable format (if in doubt use format 4 for glyphs within
39
+ the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation
40
+ Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``,
41
+ and append them to the ``.tables`` list.
42
+
43
+ Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap``
44
+ attribute.
45
+
46
+ Example::
47
+
48
+ cmap4_0_3 = CmapSubtable.newSubtable(4)
49
+ cmap4_0_3.platformID = 0
50
+ cmap4_0_3.platEncID = 3
51
+ cmap4_0_3.language = 0
52
+ cmap4_0_3.cmap = { 0xC1: "Aacute" }
53
+
54
+ cmap = newTable("cmap")
55
+ cmap.tableVersion = 0
56
+ cmap.tables = [cmap4_0_3]
57
+ """
58
+
59
+ def getcmap(self, platformID, platEncID):
60
+ """Returns the first subtable which matches the given platform and encoding.
61
+
62
+ Args:
63
+ platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh
64
+ (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows.
65
+ encodingID (int): Encoding ID. Interpretation depends on the platform ID.
66
+ See the OpenType specification for details.
67
+
68
+ Returns:
69
+ An object which is a subclass of :py:class:`CmapSubtable` if a matching
70
+ subtable is found within the font, or ``None`` otherwise.
71
+ """
72
+
73
+ for subtable in self.tables:
74
+ if subtable.platformID == platformID and subtable.platEncID == platEncID:
75
+ return subtable
76
+ return None # not found
77
+
78
+ def getBestCmap(
79
+ self,
80
+ cmapPreferences=(
81
+ (3, 10),
82
+ (0, 6),
83
+ (0, 4),
84
+ (3, 1),
85
+ (0, 3),
86
+ (0, 2),
87
+ (0, 1),
88
+ (0, 0),
89
+ ),
90
+ ):
91
+ """Returns the 'best' Unicode cmap dictionary available in the font
92
+ or ``None``, if no Unicode cmap subtable is available.
93
+
94
+ By default it will search for the following (platformID, platEncID)
95
+ pairs in order::
96
+
97
+ (3, 10), # Windows Unicode full repertoire
98
+ (0, 6), # Unicode full repertoire (format 13 subtable)
99
+ (0, 4), # Unicode 2.0 full repertoire
100
+ (3, 1), # Windows Unicode BMP
101
+ (0, 3), # Unicode 2.0 BMP
102
+ (0, 2), # Unicode ISO/IEC 10646
103
+ (0, 1), # Unicode 1.1
104
+ (0, 0) # Unicode 1.0
105
+
106
+ This particular order matches what HarfBuzz uses to choose what
107
+ subtable to use by default. This order prefers the largest-repertoire
108
+ subtable, and among those, prefers the Windows-platform over the
109
+ Unicode-platform as the former has wider support.
110
+
111
+ This order can be customized via the ``cmapPreferences`` argument.
112
+ """
113
+ for platformID, platEncID in cmapPreferences:
114
+ cmapSubtable = self.getcmap(platformID, platEncID)
115
+ if cmapSubtable is not None:
116
+ return cmapSubtable.cmap
117
+ return None # None of the requested cmap subtables were found
118
+
119
+ def buildReversed(self):
120
+ """Builds a reverse mapping dictionary
121
+
122
+ Iterates over all Unicode cmap tables and returns a dictionary mapping
123
+ glyphs to sets of codepoints, such as::
124
+
125
+ {
126
+ 'one': {0x31}
127
+ 'A': {0x41,0x391}
128
+ }
129
+
130
+ The values are sets of Unicode codepoints because
131
+ some fonts map different codepoints to the same glyph.
132
+ For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391
133
+ GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph.
134
+ """
135
+ result = {}
136
+ for subtable in self.tables:
137
+ if subtable.isUnicode():
138
+ for codepoint, name in subtable.cmap.items():
139
+ result.setdefault(name, set()).add(codepoint)
140
+ return result
141
+
142
+ def decompile(self, data, ttFont):
143
+ tableVersion, numSubTables = struct.unpack(">HH", data[:4])
144
+ self.tableVersion = int(tableVersion)
145
+ self.tables = tables = []
146
+ seenOffsets = {}
147
+ for i in range(numSubTables):
148
+ platformID, platEncID, offset = struct.unpack(
149
+ ">HHl", data[4 + i * 8 : 4 + (i + 1) * 8]
150
+ )
151
+ platformID, platEncID = int(platformID), int(platEncID)
152
+ format, length = struct.unpack(">HH", data[offset : offset + 4])
153
+ if format in [8, 10, 12, 13]:
154
+ format, reserved, length = struct.unpack(
155
+ ">HHL", data[offset : offset + 8]
156
+ )
157
+ elif format in [14]:
158
+ format, length = struct.unpack(">HL", data[offset : offset + 6])
159
+
160
+ if not length:
161
+ log.error(
162
+ "cmap subtable is reported as having zero length: platformID %s, "
163
+ "platEncID %s, format %s offset %s. Skipping table.",
164
+ platformID,
165
+ platEncID,
166
+ format,
167
+ offset,
168
+ )
169
+ continue
170
+ table = CmapSubtable.newSubtable(format)
171
+ table.platformID = platformID
172
+ table.platEncID = platEncID
173
+ # Note that by default we decompile only the subtable header info;
174
+ # any other data gets decompiled only when an attribute of the
175
+ # subtable is referenced.
176
+ table.decompileHeader(data[offset : offset + int(length)], ttFont)
177
+ if offset in seenOffsets:
178
+ table.data = None # Mark as decompiled
179
+ table.cmap = tables[seenOffsets[offset]].cmap
180
+ else:
181
+ seenOffsets[offset] = i
182
+ tables.append(table)
183
+ if ttFont.lazy is False: # Be lazy for None and True
184
+ self.ensureDecompiled()
185
+
186
+ def ensureDecompiled(self, recurse=False):
187
+ # The recurse argument is unused, but part of the signature of
188
+ # ensureDecompiled across the library.
189
+ for st in self.tables:
190
+ st.ensureDecompiled()
191
+
192
+ def compile(self, ttFont):
193
+ self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
194
+ numSubTables = len(self.tables)
195
+ totalOffset = 4 + 8 * numSubTables
196
+ data = struct.pack(">HH", self.tableVersion, numSubTables)
197
+ tableData = b""
198
+ seen = (
199
+ {}
200
+ ) # Some tables are the same object reference. Don't compile them twice.
201
+ done = (
202
+ {}
203
+ ) # Some tables are different objects, but compile to the same data chunk
204
+ for table in self.tables:
205
+ offset = seen.get(id(table.cmap))
206
+ if offset is None:
207
+ chunk = table.compile(ttFont)
208
+ offset = done.get(chunk)
209
+ if offset is None:
210
+ offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(
211
+ tableData
212
+ )
213
+ tableData = tableData + chunk
214
+ data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
215
+ return data + tableData
216
+
217
+ def toXML(self, writer, ttFont):
218
+ writer.simpletag("tableVersion", version=self.tableVersion)
219
+ writer.newline()
220
+ for table in self.tables:
221
+ table.toXML(writer, ttFont)
222
+
223
+ def fromXML(self, name, attrs, content, ttFont):
224
+ if name == "tableVersion":
225
+ self.tableVersion = safeEval(attrs["version"])
226
+ return
227
+ if name[:12] != "cmap_format_":
228
+ return
229
+ if not hasattr(self, "tables"):
230
+ self.tables = []
231
+ format = safeEval(name[12:])
232
+ table = CmapSubtable.newSubtable(format)
233
+ table.platformID = safeEval(attrs["platformID"])
234
+ table.platEncID = safeEval(attrs["platEncID"])
235
+ table.fromXML(name, attrs, content, ttFont)
236
+ self.tables.append(table)
237
+
238
+
239
+ class CmapSubtable(object):
240
+ """Base class for all cmap subtable formats.
241
+
242
+ Subclasses which handle the individual subtable formats are named
243
+ ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass`
244
+ to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a
245
+ new subtable object for a given format.
246
+
247
+ The object exposes a ``.cmap`` attribute, which contains a dictionary mapping
248
+ character codepoints to glyph names.
249
+ """
250
+
251
+ @staticmethod
252
+ def getSubtableClass(format):
253
+ """Return the subtable class for a format."""
254
+ return cmap_classes.get(format, cmap_format_unknown)
255
+
256
+ @staticmethod
257
+ def newSubtable(format):
258
+ """Return a new instance of a subtable for the given format
259
+ ."""
260
+ subtableClass = CmapSubtable.getSubtableClass(format)
261
+ return subtableClass(format)
262
+
263
+ def __init__(self, format):
264
+ self.format = format
265
+ self.data = None
266
+ self.ttFont = None
267
+ self.platformID = None #: The platform ID of this subtable
268
+ self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``)
269
+ self.language = (
270
+ None #: The language ID of this subtable (Macintosh platform only)
271
+ )
272
+
273
+ def ensureDecompiled(self, recurse=False):
274
+ # The recurse argument is unused, but part of the signature of
275
+ # ensureDecompiled across the library.
276
+ if self.data is None:
277
+ return
278
+ self.decompile(None, None) # use saved data.
279
+ self.data = None # Once this table has been decompiled, make sure we don't
280
+ # just return the original data. Also avoids recursion when
281
+ # called with an attribute that the cmap subtable doesn't have.
282
+
283
+ def __getattr__(self, attr):
284
+ # allow lazy decompilation of subtables.
285
+ if attr[:2] == "__": # don't handle requests for member functions like '__lt__'
286
+ raise AttributeError(attr)
287
+ if self.data is None:
288
+ raise AttributeError(attr)
289
+ self.ensureDecompiled()
290
+ return getattr(self, attr)
291
+
292
+ def decompileHeader(self, data, ttFont):
293
+ format, length, language = struct.unpack(">HHH", data[:6])
294
+ assert (
295
+ len(data) == length
296
+ ), "corrupt cmap table format %d (data length: %d, header length: %d)" % (
297
+ format,
298
+ len(data),
299
+ length,
300
+ )
301
+ self.format = int(format)
302
+ self.length = int(length)
303
+ self.language = int(language)
304
+ self.data = data[6:]
305
+ self.ttFont = ttFont
306
+
307
+ def toXML(self, writer, ttFont):
308
+ writer.begintag(
309
+ self.__class__.__name__,
310
+ [
311
+ ("platformID", self.platformID),
312
+ ("platEncID", self.platEncID),
313
+ ("language", self.language),
314
+ ],
315
+ )
316
+ writer.newline()
317
+ codes = sorted(self.cmap.items())
318
+ self._writeCodes(codes, writer)
319
+ writer.endtag(self.__class__.__name__)
320
+ writer.newline()
321
+
322
+ def getEncoding(self, default=None):
323
+ """Returns the Python encoding name for this cmap subtable based on its platformID,
324
+ platEncID, and language. If encoding for these values is not known, by default
325
+ ``None`` is returned. That can be overridden by passing a value to the ``default``
326
+ argument.
327
+
328
+ Note that if you want to choose a "preferred" cmap subtable, most of the time
329
+ ``self.isUnicode()`` is what you want as that one only returns true for the modern,
330
+ commonly used, Unicode-compatible triplets, not the legacy ones.
331
+ """
332
+ return getEncoding(self.platformID, self.platEncID, self.language, default)
333
+
334
+ def isUnicode(self):
335
+ """Returns true if the characters are interpreted as Unicode codepoints."""
336
+ return self.platformID == 0 or (
337
+ self.platformID == 3 and self.platEncID in [0, 1, 10]
338
+ )
339
+
340
+ def isSymbol(self):
341
+ """Returns true if the subtable is for the Symbol encoding (3,0)"""
342
+ return self.platformID == 3 and self.platEncID == 0
343
+
344
+ def _writeCodes(self, codes, writer):
345
+ isUnicode = self.isUnicode()
346
+ for code, name in codes:
347
+ writer.simpletag("map", code=hex(code), name=name)
348
+ if isUnicode:
349
+ writer.comment(Unicode[code])
350
+ writer.newline()
351
+
352
+ def __lt__(self, other):
353
+ if not isinstance(other, CmapSubtable):
354
+ return NotImplemented
355
+
356
+ # implemented so that list.sort() sorts according to the spec.
357
+ selfTuple = (
358
+ getattr(self, "platformID", None),
359
+ getattr(self, "platEncID", None),
360
+ getattr(self, "language", None),
361
+ self.__dict__,
362
+ )
363
+ otherTuple = (
364
+ getattr(other, "platformID", None),
365
+ getattr(other, "platEncID", None),
366
+ getattr(other, "language", None),
367
+ other.__dict__,
368
+ )
369
+ return selfTuple < otherTuple
370
+
371
+
372
+ class cmap_format_0(CmapSubtable):
373
+ def decompile(self, data, ttFont):
374
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
375
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
376
+ if data is not None and ttFont is not None:
377
+ self.decompileHeader(data, ttFont)
378
+ else:
379
+ assert (
380
+ data is None and ttFont is None
381
+ ), "Need both data and ttFont arguments"
382
+ data = (
383
+ self.data
384
+ ) # decompileHeader assigns the data after the header to self.data
385
+ assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
386
+ gids = array.array("B")
387
+ gids.frombytes(self.data)
388
+ charCodes = list(range(len(gids)))
389
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
390
+
391
+ def compile(self, ttFont):
392
+ if self.data:
393
+ return struct.pack(">HHH", 0, 262, self.language) + self.data
394
+
395
+ cmap = self.cmap
396
+ assert set(cmap.keys()).issubset(range(256))
397
+ getGlyphID = ttFont.getGlyphID
398
+ valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)]
399
+
400
+ gids = array.array("B", valueList)
401
+ data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes()
402
+ assert len(data) == 262
403
+ return data
404
+
405
+ def fromXML(self, name, attrs, content, ttFont):
406
+ self.language = safeEval(attrs["language"])
407
+ if not hasattr(self, "cmap"):
408
+ self.cmap = {}
409
+ cmap = self.cmap
410
+ for element in content:
411
+ if not isinstance(element, tuple):
412
+ continue
413
+ name, attrs, content = element
414
+ if name != "map":
415
+ continue
416
+ cmap[safeEval(attrs["code"])] = attrs["name"]
417
+
418
+
419
+ subHeaderFormat = ">HHhH"
420
+
421
+
422
+ class SubHeader(object):
423
+ def __init__(self):
424
+ self.firstCode = None
425
+ self.entryCount = None
426
+ self.idDelta = None
427
+ self.idRangeOffset = None
428
+ self.glyphIndexArray = []
429
+
430
+
431
+ class cmap_format_2(CmapSubtable):
432
+ def setIDDelta(self, subHeader):
433
+ subHeader.idDelta = 0
434
+ # find the minGI which is not zero.
435
+ minGI = subHeader.glyphIndexArray[0]
436
+ for gid in subHeader.glyphIndexArray:
437
+ if (gid != 0) and (gid < minGI):
438
+ minGI = gid
439
+ # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
440
+ # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
441
+ # We would like to pick an idDelta such that the first glyphArray GID is 1,
442
+ # so that we are more likely to be able to combine glypharray GID subranges.
443
+ # This means that we have a problem when minGI is > 32K
444
+ # Since the final gi is reconstructed from the glyphArray GID by:
445
+ # (short)finalGID = (gid + idDelta) % 0x10000),
446
+ # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
447
+ # negative number to an unsigned short.
448
+
449
+ if minGI > 1:
450
+ if minGI > 0x7FFF:
451
+ subHeader.idDelta = -(0x10000 - minGI) - 1
452
+ else:
453
+ subHeader.idDelta = minGI - 1
454
+ idDelta = subHeader.idDelta
455
+ for i in range(subHeader.entryCount):
456
+ gid = subHeader.glyphIndexArray[i]
457
+ if gid > 0:
458
+ subHeader.glyphIndexArray[i] = gid - idDelta
459
+
460
+ def decompile(self, data, ttFont):
461
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
462
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
463
+ if data is not None and ttFont is not None:
464
+ self.decompileHeader(data, ttFont)
465
+ else:
466
+ assert (
467
+ data is None and ttFont is None
468
+ ), "Need both data and ttFont arguments"
469
+
470
+ data = (
471
+ self.data
472
+ ) # decompileHeader assigns the data after the header to self.data
473
+ subHeaderKeys = []
474
+ maxSubHeaderindex = 0
475
+ # get the key array, and determine the number of subHeaders.
476
+ allKeys = array.array("H")
477
+ allKeys.frombytes(data[:512])
478
+ data = data[512:]
479
+ if sys.byteorder != "big":
480
+ allKeys.byteswap()
481
+ subHeaderKeys = [key // 8 for key in allKeys]
482
+ maxSubHeaderindex = max(subHeaderKeys)
483
+
484
+ # Load subHeaders
485
+ subHeaderList = []
486
+ pos = 0
487
+ for i in range(maxSubHeaderindex + 1):
488
+ subHeader = SubHeader()
489
+ (
490
+ subHeader.firstCode,
491
+ subHeader.entryCount,
492
+ subHeader.idDelta,
493
+ subHeader.idRangeOffset,
494
+ ) = struct.unpack(subHeaderFormat, data[pos : pos + 8])
495
+ pos += 8
496
+ giDataPos = pos + subHeader.idRangeOffset - 2
497
+ giList = array.array("H")
498
+ giList.frombytes(data[giDataPos : giDataPos + subHeader.entryCount * 2])
499
+ if sys.byteorder != "big":
500
+ giList.byteswap()
501
+ subHeader.glyphIndexArray = giList
502
+ subHeaderList.append(subHeader)
503
+ # How this gets processed.
504
+ # Charcodes may be one or two bytes.
505
+ # The first byte of a charcode is mapped through the subHeaderKeys, to select
506
+ # a subHeader. For any subheader but 0, the next byte is then mapped through the
507
+ # selected subheader. If subheader Index 0 is selected, then the byte itself is
508
+ # mapped through the subheader, and there is no second byte.
509
+ # Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
510
+ #
511
+ # Each subheader references a range in the glyphIndexArray whose length is entryCount.
512
+ # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
513
+ # referenced by another subheader.
514
+ # The only subheader that will be referenced by more than one first-byte value is the subheader
515
+ # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
516
+ # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
517
+ # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
518
+ # A subheader specifies a subrange within (0...256) by the
519
+ # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
520
+ # (e.g. glyph not in font).
521
+ # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
522
+ # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
523
+ # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
524
+ # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
525
+ # Example for Logocut-Medium
526
+ # first byte of charcode = 129; selects subheader 1.
527
+ # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
528
+ # second byte of charCode = 66
529
+ # the index offset = 66-64 = 2.
530
+ # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
531
+ # [glyphIndexArray index], [subrange array index] = glyphIndex
532
+ # [256], [0]=1 from charcode [129, 64]
533
+ # [257], [1]=2 from charcode [129, 65]
534
+ # [258], [2]=3 from charcode [129, 66]
535
+ # [259], [3]=4 from charcode [129, 67]
536
+ # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
537
+ # add it to the glyphID to get the final glyphIndex
538
+ # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
539
+
540
+ self.data = b""
541
+ cmap = {}
542
+ notdefGI = 0
543
+ for firstByte in range(256):
544
+ subHeadindex = subHeaderKeys[firstByte]
545
+ subHeader = subHeaderList[subHeadindex]
546
+ if subHeadindex == 0:
547
+ if (firstByte < subHeader.firstCode) or (
548
+ firstByte >= subHeader.firstCode + subHeader.entryCount
549
+ ):
550
+ continue # gi is notdef.
551
+ else:
552
+ charCode = firstByte
553
+ offsetIndex = firstByte - subHeader.firstCode
554
+ gi = subHeader.glyphIndexArray[offsetIndex]
555
+ if gi != 0:
556
+ gi = (gi + subHeader.idDelta) % 0x10000
557
+ else:
558
+ continue # gi is notdef.
559
+ cmap[charCode] = gi
560
+ else:
561
+ if subHeader.entryCount:
562
+ charCodeOffset = firstByte * 256 + subHeader.firstCode
563
+ for offsetIndex in range(subHeader.entryCount):
564
+ charCode = charCodeOffset + offsetIndex
565
+ gi = subHeader.glyphIndexArray[offsetIndex]
566
+ if gi != 0:
567
+ gi = (gi + subHeader.idDelta) % 0x10000
568
+ else:
569
+ continue
570
+ cmap[charCode] = gi
571
+ # If not subHeader.entryCount, then all char codes with this first byte are
572
+ # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
573
+ # same as mapping it to .notdef.
574
+
575
+ gids = list(cmap.values())
576
+ charCodes = list(cmap.keys())
577
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
578
+
579
+ def compile(self, ttFont):
580
+ if self.data:
581
+ return (
582
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
583
+ )
584
+ kEmptyTwoCharCodeRange = -1
585
+ notdefGI = 0
586
+
587
+ items = sorted(self.cmap.items())
588
+ charCodes = [item[0] for item in items]
589
+ names = [item[1] for item in items]
590
+ nameMap = ttFont.getReverseGlyphMap()
591
+ try:
592
+ gids = [nameMap[name] for name in names]
593
+ except KeyError:
594
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
595
+ try:
596
+ gids = [nameMap[name] for name in names]
597
+ except KeyError:
598
+ # allow virtual GIDs in format 2 tables
599
+ gids = []
600
+ for name in names:
601
+ try:
602
+ gid = nameMap[name]
603
+ except KeyError:
604
+ try:
605
+ if name[:3] == "gid":
606
+ gid = int(name[3:])
607
+ else:
608
+ gid = ttFont.getGlyphID(name)
609
+ except:
610
+ raise KeyError(name)
611
+
612
+ gids.append(gid)
613
+
614
+ # Process the (char code to gid) item list in char code order.
615
+ # By definition, all one byte char codes map to subheader 0.
616
+ # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
617
+ # which defines all char codes in its range to map to notdef) unless proven otherwise.
618
+ # Note that since the char code items are processed in char code order, all the char codes with the
619
+ # same first byte are in sequential order.
620
+
621
+ subHeaderKeys = [
622
+ kEmptyTwoCharCodeRange for x in range(256)
623
+ ] # list of indices into subHeaderList.
624
+ subHeaderList = []
625
+
626
+ # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
627
+ # with a cmap where all the one byte char codes map to notdef,
628
+ # with the result that the subhead 0 would not get created just by processing the item list.
629
+ charCode = charCodes[0]
630
+ if charCode > 255:
631
+ subHeader = SubHeader()
632
+ subHeader.firstCode = 0
633
+ subHeader.entryCount = 0
634
+ subHeader.idDelta = 0
635
+ subHeader.idRangeOffset = 0
636
+ subHeaderList.append(subHeader)
637
+
638
+ lastFirstByte = -1
639
+ items = zip(charCodes, gids)
640
+ for charCode, gid in items:
641
+ if gid == 0:
642
+ continue
643
+ firstbyte = charCode >> 8
644
+ secondByte = charCode & 0x00FF
645
+
646
+ if (
647
+ firstbyte != lastFirstByte
648
+ ): # Need to update the current subhead, and start a new one.
649
+ if lastFirstByte > -1:
650
+ # fix GI's and iDelta of current subheader.
651
+ self.setIDDelta(subHeader)
652
+
653
+ # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
654
+ # for the indices matching the char codes.
655
+ if lastFirstByte == 0:
656
+ for index in range(subHeader.entryCount):
657
+ charCode = subHeader.firstCode + index
658
+ subHeaderKeys[charCode] = 0
659
+
660
+ assert subHeader.entryCount == len(
661
+ subHeader.glyphIndexArray
662
+ ), "Error - subhead entry count does not match len of glyphID subrange."
663
+ # init new subheader
664
+ subHeader = SubHeader()
665
+ subHeader.firstCode = secondByte
666
+ subHeader.entryCount = 1
667
+ subHeader.glyphIndexArray.append(gid)
668
+ subHeaderList.append(subHeader)
669
+ subHeaderKeys[firstbyte] = len(subHeaderList) - 1
670
+ lastFirstByte = firstbyte
671
+ else:
672
+ # need to fill in with notdefs all the code points between the last charCode and the current charCode.
673
+ codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
674
+ for i in range(codeDiff):
675
+ subHeader.glyphIndexArray.append(notdefGI)
676
+ subHeader.glyphIndexArray.append(gid)
677
+ subHeader.entryCount = subHeader.entryCount + codeDiff + 1
678
+
679
+ # fix GI's and iDelta of last subheader that we we added to the subheader array.
680
+ self.setIDDelta(subHeader)
681
+
682
+ # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
683
+ subHeader = SubHeader()
684
+ subHeader.firstCode = 0
685
+ subHeader.entryCount = 0
686
+ subHeader.idDelta = 0
687
+ subHeader.idRangeOffset = 2
688
+ subHeaderList.append(subHeader)
689
+ emptySubheadIndex = len(subHeaderList) - 1
690
+ for index in range(256):
691
+ if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
692
+ subHeaderKeys[index] = emptySubheadIndex
693
+ # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
694
+ # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
695
+ # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
696
+ # charcode 0 and GID 0.
697
+
698
+ idRangeOffset = (
699
+ len(subHeaderList) - 1
700
+ ) * 8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
701
+ subheadRangeLen = (
702
+ len(subHeaderList) - 1
703
+ ) # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
704
+ for index in range(subheadRangeLen):
705
+ subHeader = subHeaderList[index]
706
+ subHeader.idRangeOffset = 0
707
+ for j in range(index):
708
+ prevSubhead = subHeaderList[j]
709
+ if (
710
+ prevSubhead.glyphIndexArray == subHeader.glyphIndexArray
711
+ ): # use the glyphIndexArray subarray
712
+ subHeader.idRangeOffset = (
713
+ prevSubhead.idRangeOffset - (index - j) * 8
714
+ )
715
+ subHeader.glyphIndexArray = []
716
+ break
717
+ if subHeader.idRangeOffset == 0: # didn't find one.
718
+ subHeader.idRangeOffset = idRangeOffset
719
+ idRangeOffset = (
720
+ idRangeOffset - 8
721
+ ) + subHeader.entryCount * 2 # one less subheader, one more subArray.
722
+ else:
723
+ idRangeOffset = idRangeOffset - 8 # one less subheader
724
+
725
+ # Now we can write out the data!
726
+ length = (
727
+ 6 + 512 + 8 * len(subHeaderList)
728
+ ) # header, 256 subHeaderKeys, and subheader array.
729
+ for subhead in subHeaderList[:-1]:
730
+ length = (
731
+ length + len(subhead.glyphIndexArray) * 2
732
+ ) # We can't use subhead.entryCount, as some of the subhead may share subArrays.
733
+ dataList = [struct.pack(">HHH", 2, length, self.language)]
734
+ for index in subHeaderKeys:
735
+ dataList.append(struct.pack(">H", index * 8))
736
+ for subhead in subHeaderList:
737
+ dataList.append(
738
+ struct.pack(
739
+ subHeaderFormat,
740
+ subhead.firstCode,
741
+ subhead.entryCount,
742
+ subhead.idDelta,
743
+ subhead.idRangeOffset,
744
+ )
745
+ )
746
+ for subhead in subHeaderList[:-1]:
747
+ for gi in subhead.glyphIndexArray:
748
+ dataList.append(struct.pack(">H", gi))
749
+ data = bytesjoin(dataList)
750
+ assert len(data) == length, (
751
+ "Error: cmap format 2 is not same length as calculated! actual: "
752
+ + str(len(data))
753
+ + " calc : "
754
+ + str(length)
755
+ )
756
+ return data
757
+
758
+ def fromXML(self, name, attrs, content, ttFont):
759
+ self.language = safeEval(attrs["language"])
760
+ if not hasattr(self, "cmap"):
761
+ self.cmap = {}
762
+ cmap = self.cmap
763
+
764
+ for element in content:
765
+ if not isinstance(element, tuple):
766
+ continue
767
+ name, attrs, content = element
768
+ if name != "map":
769
+ continue
770
+ cmap[safeEval(attrs["code"])] = attrs["name"]
771
+
772
+
773
+ cmap_format_4_format = ">7H"
774
+
775
+ # uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
776
+ # uint16 reservedPad # This value should be zero
777
+ # uint16 startCode[segCount] # Starting character code for each segment
778
+ # uint16 idDelta[segCount] # Delta for all character codes in segment
779
+ # uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
780
+ # uint16 glyphIndexArray[variable] # Glyph index array
781
+
782
+
783
+ def splitRange(startCode, endCode, cmap):
784
+ # Try to split a range of character codes into subranges with consecutive
785
+ # glyph IDs in such a way that the cmap4 subtable can be stored "most"
786
+ # efficiently. I can't prove I've got the optimal solution, but it seems
787
+ # to do well with the fonts I tested: none became bigger, many became smaller.
788
+ if startCode == endCode:
789
+ return [], [endCode]
790
+
791
+ lastID = cmap[startCode]
792
+ lastCode = startCode
793
+ inOrder = None
794
+ orderedBegin = None
795
+ subRanges = []
796
+
797
+ # Gather subranges in which the glyph IDs are consecutive.
798
+ for code in range(startCode + 1, endCode + 1):
799
+ glyphID = cmap[code]
800
+
801
+ if glyphID - 1 == lastID:
802
+ if inOrder is None or not inOrder:
803
+ inOrder = 1
804
+ orderedBegin = lastCode
805
+ else:
806
+ if inOrder:
807
+ inOrder = 0
808
+ subRanges.append((orderedBegin, lastCode))
809
+ orderedBegin = None
810
+
811
+ lastID = glyphID
812
+ lastCode = code
813
+
814
+ if inOrder:
815
+ subRanges.append((orderedBegin, lastCode))
816
+ assert lastCode == endCode
817
+
818
+ # Now filter out those new subranges that would only make the data bigger.
819
+ # A new segment cost 8 bytes, not using a new segment costs 2 bytes per
820
+ # character.
821
+ newRanges = []
822
+ for b, e in subRanges:
823
+ if b == startCode and e == endCode:
824
+ break # the whole range, we're fine
825
+ if b == startCode or e == endCode:
826
+ threshold = 4 # split costs one more segment
827
+ else:
828
+ threshold = 8 # split costs two more segments
829
+ if (e - b + 1) > threshold:
830
+ newRanges.append((b, e))
831
+ subRanges = newRanges
832
+
833
+ if not subRanges:
834
+ return [], [endCode]
835
+
836
+ if subRanges[0][0] != startCode:
837
+ subRanges.insert(0, (startCode, subRanges[0][0] - 1))
838
+ if subRanges[-1][1] != endCode:
839
+ subRanges.append((subRanges[-1][1] + 1, endCode))
840
+
841
+ # Fill the "holes" in the segments list -- those are the segments in which
842
+ # the glyph IDs are _not_ consecutive.
843
+ i = 1
844
+ while i < len(subRanges):
845
+ if subRanges[i - 1][1] + 1 != subRanges[i][0]:
846
+ subRanges.insert(i, (subRanges[i - 1][1] + 1, subRanges[i][0] - 1))
847
+ i = i + 1
848
+ i = i + 1
849
+
850
+ # Transform the ranges into startCode/endCode lists.
851
+ start = []
852
+ end = []
853
+ for b, e in subRanges:
854
+ start.append(b)
855
+ end.append(e)
856
+ start.pop(0)
857
+
858
+ assert len(start) + 1 == len(end)
859
+ return start, end
860
+
861
+
862
+ class cmap_format_4(CmapSubtable):
863
+ def decompile(self, data, ttFont):
864
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
865
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
866
+ if data is not None and ttFont is not None:
867
+ self.decompileHeader(data, ttFont)
868
+ else:
869
+ assert (
870
+ data is None and ttFont is None
871
+ ), "Need both data and ttFont arguments"
872
+
873
+ data = (
874
+ self.data
875
+ ) # decompileHeader assigns the data after the header to self.data
876
+ (segCountX2, searchRange, entrySelector, rangeShift) = struct.unpack(
877
+ ">4H", data[:8]
878
+ )
879
+ data = data[8:]
880
+ segCount = segCountX2 // 2
881
+
882
+ allCodes = array.array("H")
883
+ allCodes.frombytes(data)
884
+ self.data = data = None
885
+
886
+ if sys.byteorder != "big":
887
+ allCodes.byteswap()
888
+
889
+ # divide the data
890
+ endCode = allCodes[:segCount]
891
+ allCodes = allCodes[segCount + 1 :] # the +1 is skipping the reservedPad field
892
+ startCode = allCodes[:segCount]
893
+ allCodes = allCodes[segCount:]
894
+ idDelta = allCodes[:segCount]
895
+ allCodes = allCodes[segCount:]
896
+ idRangeOffset = allCodes[:segCount]
897
+ glyphIndexArray = allCodes[segCount:]
898
+ lenGIArray = len(glyphIndexArray)
899
+
900
+ # build 2-byte character mapping
901
+ charCodes = []
902
+ gids = []
903
+ for i in range(len(startCode) - 1): # don't do 0xffff!
904
+ start = startCode[i]
905
+ delta = idDelta[i]
906
+ rangeOffset = idRangeOffset[i]
907
+ partial = rangeOffset // 2 - start + i - len(idRangeOffset)
908
+
909
+ rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
910
+ charCodes.extend(rangeCharCodes)
911
+ if rangeOffset == 0:
912
+ gids.extend(
913
+ [(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]
914
+ )
915
+ else:
916
+ for charCode in rangeCharCodes:
917
+ index = charCode + partial
918
+ assert index < lenGIArray, (
919
+ "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !"
920
+ % (i, index, lenGIArray)
921
+ )
922
+ if glyphIndexArray[index] != 0: # if not missing glyph
923
+ glyphID = glyphIndexArray[index] + delta
924
+ else:
925
+ glyphID = 0 # missing glyph
926
+ gids.append(glyphID & 0xFFFF)
927
+
928
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
929
+
930
+ def compile(self, ttFont):
931
+ if self.data:
932
+ return (
933
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
934
+ )
935
+
936
+ charCodes = list(self.cmap.keys())
937
+ if not charCodes:
938
+ startCode = [0xFFFF]
939
+ endCode = [0xFFFF]
940
+ else:
941
+ charCodes.sort()
942
+ names = [self.cmap[code] for code in charCodes]
943
+ nameMap = ttFont.getReverseGlyphMap()
944
+ try:
945
+ gids = [nameMap[name] for name in names]
946
+ except KeyError:
947
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
948
+ try:
949
+ gids = [nameMap[name] for name in names]
950
+ except KeyError:
951
+ # allow virtual GIDs in format 4 tables
952
+ gids = []
953
+ for name in names:
954
+ try:
955
+ gid = nameMap[name]
956
+ except KeyError:
957
+ try:
958
+ if name[:3] == "gid":
959
+ gid = int(name[3:])
960
+ else:
961
+ gid = ttFont.getGlyphID(name)
962
+ except:
963
+ raise KeyError(name)
964
+
965
+ gids.append(gid)
966
+ cmap = {} # code:glyphID mapping
967
+ for code, gid in zip(charCodes, gids):
968
+ cmap[code] = gid
969
+
970
+ # Build startCode and endCode lists.
971
+ # Split the char codes in ranges of consecutive char codes, then split
972
+ # each range in more ranges of consecutive/not consecutive glyph IDs.
973
+ # See splitRange().
974
+ lastCode = charCodes[0]
975
+ endCode = []
976
+ startCode = [lastCode]
977
+ for charCode in charCodes[
978
+ 1:
979
+ ]: # skip the first code, it's the first start code
980
+ if charCode == lastCode + 1:
981
+ lastCode = charCode
982
+ continue
983
+ start, end = splitRange(startCode[-1], lastCode, cmap)
984
+ startCode.extend(start)
985
+ endCode.extend(end)
986
+ startCode.append(charCode)
987
+ lastCode = charCode
988
+ start, end = splitRange(startCode[-1], lastCode, cmap)
989
+ startCode.extend(start)
990
+ endCode.extend(end)
991
+ startCode.append(0xFFFF)
992
+ endCode.append(0xFFFF)
993
+
994
+ # build up rest of cruft
995
+ idDelta = []
996
+ idRangeOffset = []
997
+ glyphIndexArray = []
998
+ for i in range(len(endCode) - 1): # skip the closing codes (0xffff)
999
+ indices = []
1000
+ for charCode in range(startCode[i], endCode[i] + 1):
1001
+ indices.append(cmap[charCode])
1002
+ if indices == list(range(indices[0], indices[0] + len(indices))):
1003
+ idDelta.append((indices[0] - startCode[i]) % 0x10000)
1004
+ idRangeOffset.append(0)
1005
+ else:
1006
+ idDelta.append(0)
1007
+ idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
1008
+ glyphIndexArray.extend(indices)
1009
+ idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
1010
+ idRangeOffset.append(0)
1011
+
1012
+ # Insane.
1013
+ segCount = len(endCode)
1014
+ segCountX2 = segCount * 2
1015
+ searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
1016
+
1017
+ charCodeArray = array.array("H", endCode + [0] + startCode)
1018
+ idDeltaArray = array.array("H", idDelta)
1019
+ restArray = array.array("H", idRangeOffset + glyphIndexArray)
1020
+ if sys.byteorder != "big":
1021
+ charCodeArray.byteswap()
1022
+ if sys.byteorder != "big":
1023
+ idDeltaArray.byteswap()
1024
+ if sys.byteorder != "big":
1025
+ restArray.byteswap()
1026
+ data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes()
1027
+
1028
+ length = struct.calcsize(cmap_format_4_format) + len(data)
1029
+ header = struct.pack(
1030
+ cmap_format_4_format,
1031
+ self.format,
1032
+ length,
1033
+ self.language,
1034
+ segCountX2,
1035
+ searchRange,
1036
+ entrySelector,
1037
+ rangeShift,
1038
+ )
1039
+ return header + data
1040
+
1041
+ def fromXML(self, name, attrs, content, ttFont):
1042
+ self.language = safeEval(attrs["language"])
1043
+ if not hasattr(self, "cmap"):
1044
+ self.cmap = {}
1045
+ cmap = self.cmap
1046
+
1047
+ for element in content:
1048
+ if not isinstance(element, tuple):
1049
+ continue
1050
+ nameMap, attrsMap, dummyContent = element
1051
+ if nameMap != "map":
1052
+ assert 0, "Unrecognized keyword in cmap subtable"
1053
+ cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
1054
+
1055
+
1056
+ class cmap_format_6(CmapSubtable):
1057
+ def decompile(self, data, ttFont):
1058
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
1059
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
1060
+ if data is not None and ttFont is not None:
1061
+ self.decompileHeader(data, ttFont)
1062
+ else:
1063
+ assert (
1064
+ data is None and ttFont is None
1065
+ ), "Need both data and ttFont arguments"
1066
+
1067
+ data = (
1068
+ self.data
1069
+ ) # decompileHeader assigns the data after the header to self.data
1070
+ firstCode, entryCount = struct.unpack(">HH", data[:4])
1071
+ firstCode = int(firstCode)
1072
+ data = data[4:]
1073
+ # assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
1074
+ gids = array.array("H")
1075
+ gids.frombytes(data[: 2 * int(entryCount)])
1076
+ if sys.byteorder != "big":
1077
+ gids.byteswap()
1078
+ self.data = data = None
1079
+
1080
+ charCodes = list(range(firstCode, firstCode + len(gids)))
1081
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
1082
+
1083
+ def compile(self, ttFont):
1084
+ if self.data:
1085
+ return (
1086
+ struct.pack(">HHH", self.format, self.length, self.language) + self.data
1087
+ )
1088
+ cmap = self.cmap
1089
+ codes = sorted(cmap.keys())
1090
+ if codes: # yes, there are empty cmap tables.
1091
+ codes = list(range(codes[0], codes[-1] + 1))
1092
+ firstCode = codes[0]
1093
+ valueList = [
1094
+ ttFont.getGlyphID(cmap[code]) if code in cmap else 0 for code in codes
1095
+ ]
1096
+ gids = array.array("H", valueList)
1097
+ if sys.byteorder != "big":
1098
+ gids.byteswap()
1099
+ data = gids.tobytes()
1100
+ else:
1101
+ data = b""
1102
+ firstCode = 0
1103
+ header = struct.pack(
1104
+ ">HHHHH", 6, len(data) + 10, self.language, firstCode, len(codes)
1105
+ )
1106
+ return header + data
1107
+
1108
+ def fromXML(self, name, attrs, content, ttFont):
1109
+ self.language = safeEval(attrs["language"])
1110
+ if not hasattr(self, "cmap"):
1111
+ self.cmap = {}
1112
+ cmap = self.cmap
1113
+
1114
+ for element in content:
1115
+ if not isinstance(element, tuple):
1116
+ continue
1117
+ name, attrs, content = element
1118
+ if name != "map":
1119
+ continue
1120
+ cmap[safeEval(attrs["code"])] = attrs["name"]
1121
+
1122
+
1123
+ class cmap_format_12_or_13(CmapSubtable):
1124
+ def __init__(self, format):
1125
+ self.format = format
1126
+ self.reserved = 0
1127
+ self.data = None
1128
+ self.ttFont = None
1129
+
1130
+ def decompileHeader(self, data, ttFont):
1131
+ format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
1132
+ assert (
1133
+ len(data) == (16 + nGroups * 12) == (length)
1134
+ ), "corrupt cmap table format %d (data length: %d, header length: %d)" % (
1135
+ self.format,
1136
+ len(data),
1137
+ length,
1138
+ )
1139
+ self.format = format
1140
+ self.reserved = reserved
1141
+ self.length = length
1142
+ self.language = language
1143
+ self.nGroups = nGroups
1144
+ self.data = data[16:]
1145
+ self.ttFont = ttFont
1146
+
1147
+ def decompile(self, data, ttFont):
1148
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
1149
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
1150
+ if data is not None and ttFont is not None:
1151
+ self.decompileHeader(data, ttFont)
1152
+ else:
1153
+ assert (
1154
+ data is None and ttFont is None
1155
+ ), "Need both data and ttFont arguments"
1156
+
1157
+ data = (
1158
+ self.data
1159
+ ) # decompileHeader assigns the data after the header to self.data
1160
+ charCodes = []
1161
+ gids = []
1162
+ pos = 0
1163
+ for i in range(self.nGroups):
1164
+ startCharCode, endCharCode, glyphID = struct.unpack(
1165
+ ">LLL", data[pos : pos + 12]
1166
+ )
1167
+ pos += 12
1168
+ lenGroup = 1 + endCharCode - startCharCode
1169
+ charCodes.extend(list(range(startCharCode, endCharCode + 1)))
1170
+ gids.extend(self._computeGIDs(glyphID, lenGroup))
1171
+ self.data = data = None
1172
+ self.cmap = _make_map(self.ttFont, charCodes, gids)
1173
+
1174
+ def compile(self, ttFont):
1175
+ if self.data:
1176
+ return (
1177
+ struct.pack(
1178
+ ">HHLLL",
1179
+ self.format,
1180
+ self.reserved,
1181
+ self.length,
1182
+ self.language,
1183
+ self.nGroups,
1184
+ )
1185
+ + self.data
1186
+ )
1187
+ charCodes = list(self.cmap.keys())
1188
+ names = list(self.cmap.values())
1189
+ nameMap = ttFont.getReverseGlyphMap()
1190
+ try:
1191
+ gids = [nameMap[name] for name in names]
1192
+ except KeyError:
1193
+ nameMap = ttFont.getReverseGlyphMap(rebuild=True)
1194
+ try:
1195
+ gids = [nameMap[name] for name in names]
1196
+ except KeyError:
1197
+ # allow virtual GIDs in format 12 tables
1198
+ gids = []
1199
+ for name in names:
1200
+ try:
1201
+ gid = nameMap[name]
1202
+ except KeyError:
1203
+ try:
1204
+ if name[:3] == "gid":
1205
+ gid = int(name[3:])
1206
+ else:
1207
+ gid = ttFont.getGlyphID(name)
1208
+ except:
1209
+ raise KeyError(name)
1210
+
1211
+ gids.append(gid)
1212
+
1213
+ cmap = {} # code:glyphID mapping
1214
+ for code, gid in zip(charCodes, gids):
1215
+ cmap[code] = gid
1216
+
1217
+ charCodes.sort()
1218
+ index = 0
1219
+ startCharCode = charCodes[0]
1220
+ startGlyphID = cmap[startCharCode]
1221
+ lastGlyphID = startGlyphID - self._format_step
1222
+ lastCharCode = startCharCode - 1
1223
+ nGroups = 0
1224
+ dataList = []
1225
+ maxIndex = len(charCodes)
1226
+ for index in range(maxIndex):
1227
+ charCode = charCodes[index]
1228
+ glyphID = cmap[charCode]
1229
+ if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
1230
+ dataList.append(
1231
+ struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)
1232
+ )
1233
+ startCharCode = charCode
1234
+ startGlyphID = glyphID
1235
+ nGroups = nGroups + 1
1236
+ lastGlyphID = glyphID
1237
+ lastCharCode = charCode
1238
+ dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
1239
+ nGroups = nGroups + 1
1240
+ data = bytesjoin(dataList)
1241
+ lengthSubtable = len(data) + 16
1242
+ assert len(data) == (nGroups * 12) == (lengthSubtable - 16)
1243
+ return (
1244
+ struct.pack(
1245
+ ">HHLLL",
1246
+ self.format,
1247
+ self.reserved,
1248
+ lengthSubtable,
1249
+ self.language,
1250
+ nGroups,
1251
+ )
1252
+ + data
1253
+ )
1254
+
1255
+ def toXML(self, writer, ttFont):
1256
+ writer.begintag(
1257
+ self.__class__.__name__,
1258
+ [
1259
+ ("platformID", self.platformID),
1260
+ ("platEncID", self.platEncID),
1261
+ ("format", self.format),
1262
+ ("reserved", self.reserved),
1263
+ ("length", self.length),
1264
+ ("language", self.language),
1265
+ ("nGroups", self.nGroups),
1266
+ ],
1267
+ )
1268
+ writer.newline()
1269
+ codes = sorted(self.cmap.items())
1270
+ self._writeCodes(codes, writer)
1271
+ writer.endtag(self.__class__.__name__)
1272
+ writer.newline()
1273
+
1274
+ def fromXML(self, name, attrs, content, ttFont):
1275
+ self.format = safeEval(attrs["format"])
1276
+ self.reserved = safeEval(attrs["reserved"])
1277
+ self.length = safeEval(attrs["length"])
1278
+ self.language = safeEval(attrs["language"])
1279
+ self.nGroups = safeEval(attrs["nGroups"])
1280
+ if not hasattr(self, "cmap"):
1281
+ self.cmap = {}
1282
+ cmap = self.cmap
1283
+
1284
+ for element in content:
1285
+ if not isinstance(element, tuple):
1286
+ continue
1287
+ name, attrs, content = element
1288
+ if name != "map":
1289
+ continue
1290
+ cmap[safeEval(attrs["code"])] = attrs["name"]
1291
+
1292
+
1293
+ class cmap_format_12(cmap_format_12_or_13):
1294
+ _format_step = 1
1295
+
1296
+ def __init__(self, format=12):
1297
+ cmap_format_12_or_13.__init__(self, format)
1298
+
1299
+ def _computeGIDs(self, startingGlyph, numberOfGlyphs):
1300
+ return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
1301
+
1302
+ def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
1303
+ return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
1304
+
1305
+
1306
+ class cmap_format_13(cmap_format_12_or_13):
1307
+ _format_step = 0
1308
+
1309
+ def __init__(self, format=13):
1310
+ cmap_format_12_or_13.__init__(self, format)
1311
+
1312
+ def _computeGIDs(self, startingGlyph, numberOfGlyphs):
1313
+ return [startingGlyph] * numberOfGlyphs
1314
+
1315
+ def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
1316
+ return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
1317
+
1318
+
1319
+ def cvtToUVS(threeByteString):
1320
+ data = b"\0" + threeByteString
1321
+ (val,) = struct.unpack(">L", data)
1322
+ return val
1323
+
1324
+
1325
+ def cvtFromUVS(val):
1326
+ assert 0 <= val < 0x1000000
1327
+ fourByteString = struct.pack(">L", val)
1328
+ return fourByteString[1:]
1329
+
1330
+
1331
+ class cmap_format_14(CmapSubtable):
1332
+ def decompileHeader(self, data, ttFont):
1333
+ format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
1334
+ self.data = data[10:]
1335
+ self.length = length
1336
+ self.numVarSelectorRecords = numVarSelectorRecords
1337
+ self.ttFont = ttFont
1338
+ self.language = 0xFF # has no language.
1339
+
1340
+ def decompile(self, data, ttFont):
1341
+ if data is not None and ttFont is not None:
1342
+ self.decompileHeader(data, ttFont)
1343
+ else:
1344
+ assert (
1345
+ data is None and ttFont is None
1346
+ ), "Need both data and ttFont arguments"
1347
+ data = self.data
1348
+
1349
+ self.cmap = (
1350
+ {}
1351
+ ) # so that clients that expect this to exist in a cmap table won't fail.
1352
+ uvsDict = {}
1353
+ recOffset = 0
1354
+ for n in range(self.numVarSelectorRecords):
1355
+ uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(
1356
+ ">3sLL", data[recOffset : recOffset + 11]
1357
+ )
1358
+ recOffset += 11
1359
+ varUVS = cvtToUVS(uvs)
1360
+ if defOVSOffset:
1361
+ startOffset = defOVSOffset - 10
1362
+ (numValues,) = struct.unpack(">L", data[startOffset : startOffset + 4])
1363
+ startOffset += 4
1364
+ for r in range(numValues):
1365
+ uv, addtlCnt = struct.unpack(
1366
+ ">3sB", data[startOffset : startOffset + 4]
1367
+ )
1368
+ startOffset += 4
1369
+ firstBaseUV = cvtToUVS(uv)
1370
+ cnt = addtlCnt + 1
1371
+ baseUVList = list(range(firstBaseUV, firstBaseUV + cnt))
1372
+ glyphList = [None] * cnt
1373
+ localUVList = zip(baseUVList, glyphList)
1374
+ try:
1375
+ uvsDict[varUVS].extend(localUVList)
1376
+ except KeyError:
1377
+ uvsDict[varUVS] = list(localUVList)
1378
+
1379
+ if nonDefUVSOffset:
1380
+ startOffset = nonDefUVSOffset - 10
1381
+ (numRecs,) = struct.unpack(">L", data[startOffset : startOffset + 4])
1382
+ startOffset += 4
1383
+ localUVList = []
1384
+ for r in range(numRecs):
1385
+ uv, gid = struct.unpack(">3sH", data[startOffset : startOffset + 5])
1386
+ startOffset += 5
1387
+ uv = cvtToUVS(uv)
1388
+ glyphName = self.ttFont.getGlyphName(gid)
1389
+ localUVList.append((uv, glyphName))
1390
+ try:
1391
+ uvsDict[varUVS].extend(localUVList)
1392
+ except KeyError:
1393
+ uvsDict[varUVS] = localUVList
1394
+
1395
+ self.uvsDict = uvsDict
1396
+
1397
+ def toXML(self, writer, ttFont):
1398
+ writer.begintag(
1399
+ self.__class__.__name__,
1400
+ [
1401
+ ("platformID", self.platformID),
1402
+ ("platEncID", self.platEncID),
1403
+ ],
1404
+ )
1405
+ writer.newline()
1406
+ uvsDict = self.uvsDict
1407
+ uvsList = sorted(uvsDict.keys())
1408
+ for uvs in uvsList:
1409
+ uvList = uvsDict[uvs]
1410
+ uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
1411
+ for uv, gname in uvList:
1412
+ attrs = [("uv", hex(uv)), ("uvs", hex(uvs))]
1413
+ if gname is not None:
1414
+ attrs.append(("name", gname))
1415
+ writer.simpletag("map", attrs)
1416
+ writer.newline()
1417
+ writer.endtag(self.__class__.__name__)
1418
+ writer.newline()
1419
+
1420
+ def fromXML(self, name, attrs, content, ttFont):
1421
+ self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
1422
+ if not hasattr(self, "cmap"):
1423
+ self.cmap = (
1424
+ {}
1425
+ ) # so that clients that expect this to exist in a cmap table won't fail.
1426
+ if not hasattr(self, "uvsDict"):
1427
+ self.uvsDict = {}
1428
+ uvsDict = self.uvsDict
1429
+
1430
+ # For backwards compatibility reasons we accept "None" as an indicator
1431
+ # for "default mapping", unless the font actually has a glyph named
1432
+ # "None".
1433
+ _hasGlyphNamedNone = None
1434
+
1435
+ for element in content:
1436
+ if not isinstance(element, tuple):
1437
+ continue
1438
+ name, attrs, content = element
1439
+ if name != "map":
1440
+ continue
1441
+ uvs = safeEval(attrs["uvs"])
1442
+ uv = safeEval(attrs["uv"])
1443
+ gname = attrs.get("name")
1444
+ if gname == "None":
1445
+ if _hasGlyphNamedNone is None:
1446
+ _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder()
1447
+ if not _hasGlyphNamedNone:
1448
+ gname = None
1449
+ try:
1450
+ uvsDict[uvs].append((uv, gname))
1451
+ except KeyError:
1452
+ uvsDict[uvs] = [(uv, gname)]
1453
+
1454
+ def compile(self, ttFont):
1455
+ if self.data:
1456
+ return (
1457
+ struct.pack(
1458
+ ">HLL", self.format, self.length, self.numVarSelectorRecords
1459
+ )
1460
+ + self.data
1461
+ )
1462
+
1463
+ uvsDict = self.uvsDict
1464
+ uvsList = sorted(uvsDict.keys())
1465
+ self.numVarSelectorRecords = len(uvsList)
1466
+ offset = (
1467
+ 10 + self.numVarSelectorRecords * 11
1468
+ ) # current value is end of VarSelectorRecords block.
1469
+ data = []
1470
+ varSelectorRecords = []
1471
+ for uvs in uvsList:
1472
+ entryList = uvsDict[uvs]
1473
+
1474
+ defList = [entry for entry in entryList if entry[1] is None]
1475
+ if defList:
1476
+ defList = [entry[0] for entry in defList]
1477
+ defOVSOffset = offset
1478
+ defList.sort()
1479
+
1480
+ lastUV = defList[0]
1481
+ cnt = -1
1482
+ defRecs = []
1483
+ for defEntry in defList:
1484
+ cnt += 1
1485
+ if (lastUV + cnt) != defEntry:
1486
+ rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt - 1)
1487
+ lastUV = defEntry
1488
+ defRecs.append(rec)
1489
+ cnt = 0
1490
+
1491
+ rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
1492
+ defRecs.append(rec)
1493
+
1494
+ numDefRecs = len(defRecs)
1495
+ data.append(struct.pack(">L", numDefRecs))
1496
+ data.extend(defRecs)
1497
+ offset += 4 + numDefRecs * 4
1498
+ else:
1499
+ defOVSOffset = 0
1500
+
1501
+ ndefList = [entry for entry in entryList if entry[1] is not None]
1502
+ if ndefList:
1503
+ nonDefUVSOffset = offset
1504
+ ndefList.sort()
1505
+ numNonDefRecs = len(ndefList)
1506
+ data.append(struct.pack(">L", numNonDefRecs))
1507
+ offset += 4 + numNonDefRecs * 5
1508
+
1509
+ for uv, gname in ndefList:
1510
+ gid = ttFont.getGlyphID(gname)
1511
+ ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
1512
+ data.append(ndrec)
1513
+ else:
1514
+ nonDefUVSOffset = 0
1515
+
1516
+ vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
1517
+ varSelectorRecords.append(vrec)
1518
+
1519
+ data = bytesjoin(varSelectorRecords) + bytesjoin(data)
1520
+ self.length = 10 + len(data)
1521
+ headerdata = struct.pack(
1522
+ ">HLL", self.format, self.length, self.numVarSelectorRecords
1523
+ )
1524
+
1525
+ return headerdata + data
1526
+
1527
+
1528
+ class cmap_format_unknown(CmapSubtable):
1529
+ def toXML(self, writer, ttFont):
1530
+ cmapName = self.__class__.__name__[:12] + str(self.format)
1531
+ writer.begintag(
1532
+ cmapName,
1533
+ [
1534
+ ("platformID", self.platformID),
1535
+ ("platEncID", self.platEncID),
1536
+ ],
1537
+ )
1538
+ writer.newline()
1539
+ writer.dumphex(self.data)
1540
+ writer.endtag(cmapName)
1541
+ writer.newline()
1542
+
1543
+ def fromXML(self, name, attrs, content, ttFont):
1544
+ self.data = readHex(content)
1545
+ self.cmap = {}
1546
+
1547
+ def decompileHeader(self, data, ttFont):
1548
+ self.language = 0 # dummy value
1549
+ self.data = data
1550
+
1551
+ def decompile(self, data, ttFont):
1552
+ # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
1553
+ # If not, someone is calling the subtable decompile() directly, and must provide both args.
1554
+ if data is not None and ttFont is not None:
1555
+ self.decompileHeader(data, ttFont)
1556
+ else:
1557
+ assert (
1558
+ data is None and ttFont is None
1559
+ ), "Need both data and ttFont arguments"
1560
+
1561
+ def compile(self, ttFont):
1562
+ if self.data:
1563
+ return self.data
1564
+ else:
1565
+ return None
1566
+
1567
+
1568
+ cmap_classes = {
1569
+ 0: cmap_format_0,
1570
+ 2: cmap_format_2,
1571
+ 4: cmap_format_4,
1572
+ 6: cmap_format_6,
1573
+ 12: cmap_format_12,
1574
+ 13: cmap_format_13,
1575
+ 14: cmap_format_14,
1576
+ }
valley/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_v_a_r.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import DefaultTable
2
+ from fontTools.misc import sstruct
3
+ from fontTools.misc.textTools import bytesjoin
4
+ from fontTools.ttLib.tables.TupleVariation import (
5
+ compileTupleVariationStore,
6
+ decompileTupleVariationStore,
7
+ TupleVariation,
8
+ )
9
+
10
+
11
+ # https://www.microsoft.com/typography/otspec/cvar.htm
12
+ # https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
13
+ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html
14
+
15
+ CVAR_HEADER_FORMAT = """
16
+ > # big endian
17
+ majorVersion: H
18
+ minorVersion: H
19
+ tupleVariationCount: H
20
+ offsetToData: H
21
+ """
22
+
23
+ CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT)
24
+
25
+
26
+ class table__c_v_a_r(DefaultTable.DefaultTable):
27
+ dependencies = ["cvt ", "fvar"]
28
+
29
+ def __init__(self, tag=None):
30
+ DefaultTable.DefaultTable.__init__(self, tag)
31
+ self.majorVersion, self.minorVersion = 1, 0
32
+ self.variations = []
33
+
34
+ def compile(self, ttFont, useSharedPoints=False):
35
+ tupleVariationCount, tuples, data = compileTupleVariationStore(
36
+ variations=[v for v in self.variations if v.hasImpact()],
37
+ pointCount=len(ttFont["cvt "].values),
38
+ axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
39
+ sharedTupleIndices={},
40
+ useSharedPoints=useSharedPoints,
41
+ )
42
+ header = {
43
+ "majorVersion": self.majorVersion,
44
+ "minorVersion": self.minorVersion,
45
+ "tupleVariationCount": tupleVariationCount,
46
+ "offsetToData": CVAR_HEADER_SIZE + len(tuples),
47
+ }
48
+ return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data])
49
+
50
+ def decompile(self, data, ttFont):
51
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
52
+ header = {}
53
+ sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header)
54
+ self.majorVersion = header["majorVersion"]
55
+ self.minorVersion = header["minorVersion"]
56
+ assert self.majorVersion == 1, self.majorVersion
57
+ self.variations = decompileTupleVariationStore(
58
+ tableTag=self.tableTag,
59
+ axisTags=axisTags,
60
+ tupleVariationCount=header["tupleVariationCount"],
61
+ pointCount=len(ttFont["cvt "].values),
62
+ sharedTuples=None,
63
+ data=data,
64
+ pos=CVAR_HEADER_SIZE,
65
+ dataPos=header["offsetToData"],
66
+ )
67
+
68
+ def fromXML(self, name, attrs, content, ttFont):
69
+ if name == "version":
70
+ self.majorVersion = int(attrs.get("major", "1"))
71
+ self.minorVersion = int(attrs.get("minor", "0"))
72
+ elif name == "tuple":
73
+ valueCount = len(ttFont["cvt "].values)
74
+ var = TupleVariation({}, [None] * valueCount)
75
+ self.variations.append(var)
76
+ for tupleElement in content:
77
+ if isinstance(tupleElement, tuple):
78
+ tupleName, tupleAttrs, tupleContent = tupleElement
79
+ var.fromXML(tupleName, tupleAttrs, tupleContent)
80
+
81
+ def toXML(self, writer, ttFont):
82
+ axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
83
+ writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion)
84
+ writer.newline()
85
+ for var in self.variations:
86
+ var.toXML(writer, axisTags)